diff options
268 files changed, 26502 insertions, 7178 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000000..3372b1f03f7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,27 @@ +<!-- + +# Filing a Nix issue + +*WAIT* Are you sure you're filing your issue in the right repository? + +We appreciate you taking the time to tell us about issues you encounter, but routing the issue to the right place will get you help sooner and save everyone time. + +This is the Nix repository, and issues here should be about Nix the build and package management *_tool_*. + +If you have a problem with a specific package on NixOS or when using Nix, you probably want to file an issue with _nixpkgs_, whose issue tracker is over at https://github.com/NixOS/nixpkgs/issues. + +Examples of _Nix_ issues: + +- Nix segfaults when I run `nix-build -A blahblah` +- The Nix language needs a new builtin: `builtins.foobar` +- Regression in the behavior of `nix-env` in Nix 2.0 + +Examples of _nixpkgs_ issues: + +- glibc is b0rked on aarch64 +- chromium in NixOS doesn't support U2F but google-chrome does! +- The OpenJDK package on macOS is missing a key component + +Chances are if you're a newcomer to the Nix world, you'll probably want the [nixpkgs tracker](https://github.com/NixOS/nixpkgs/issues). It also gets a lot more eyeball traffic so you'll probably get a response a lot more quickly. + +--> diff --git a/.gitignore b/.gitignore index ce22fa007dc7..0f2f3ddeec14 100644 --- a/.gitignore +++ b/.gitignore @@ -13,9 +13,6 @@ perl/Makefile.config /corepkgs/config.nix -# /corepkgs/buildenv/ -/corepkgs/buildenv/builder.pl - # /corepkgs/channels/ /corepkgs/channels/unpack.sh @@ -38,6 +35,7 @@ perl/Makefile.config /scripts/nix-copy-closure /scripts/nix-reduce-build /scripts/nix-http-export.cgi +/scripts/nix-profile-daemon.sh # /src/libexpr/ /src/libexpr/lexer-tab.cc @@ -71,9 +69,6 @@ perl/Makefile.config # /src/nix-channel/ /src/nix-channel/nix-channel -# /src/buildenv/ -/src/buildenv/buildenv - # /src/nix-build/ /src/nix-build/nix-build diff --git a/Makefile b/Makefile index 5d8e990cc5c0..834f84b286bf 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ makefiles = \ local.mk \ - src/boost/format/local.mk \ src/libutil/local.mk \ src/libstore/local.mk \ src/libmain/local.mk \ @@ -13,7 +12,6 @@ makefiles = \ src/nix-collect-garbage/local.mk \ src/nix-copy-closure/local.mk \ src/nix-prefetch-url/local.mk \ - src/buildenv/local.mk \ src/resolve-system-dependencies/local.mk \ src/nix-channel/local.mk \ src/nix-build/local.mk \ @@ -24,9 +22,10 @@ makefiles = \ misc/launchd/local.mk \ misc/upstart/local.mk \ doc/manual/local.mk \ - tests/local.mk + tests/local.mk \ + tests/plugins/local.mk -GLOBAL_CXXFLAGS += -std=c++14 -g -Wall -include config.h +GLOBAL_CXXFLAGS += -g -Wall -include config.h -include Makefile.config diff --git a/Makefile.config.in b/Makefile.config.in index 45a70cd6dd1a..a9785dc73955 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -6,6 +6,8 @@ CXXFLAGS = @CXXFLAGS@ ENABLE_S3 = @ENABLE_S3@ HAVE_SODIUM = @HAVE_SODIUM@ HAVE_READLINE = @HAVE_READLINE@ +HAVE_BROTLI = @HAVE_BROTLI@ +HAVE_SECCOMP = @HAVE_SECCOMP@ LIBCURL_LIBS = @LIBCURL_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@ PACKAGE_NAME = @PACKAGE_NAME@ @@ -13,9 +15,10 @@ PACKAGE_VERSION = @PACKAGE_VERSION@ SODIUM_LIBS = @SODIUM_LIBS@ LIBLZMA_LIBS = @LIBLZMA_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@ +LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ bash = @bash@ bindir = @bindir@ -bro = @bro@ +brotli = @brotli@ lsof = @lsof@ datadir = @datadir@ datarootdir = @datarootdir@ diff --git a/configure.ac b/configure.ac index 9d8a81d0427b..c41a83c97646 100644 --- a/configure.ac +++ b/configure.ac @@ -61,7 +61,8 @@ CFLAGS= CXXFLAGS= AC_PROG_CC AC_PROG_CXX -AX_CXX_COMPILE_STDCXX_11 +AC_PROG_CPP +AX_CXX_COMPILE_STDCXX_14 # Use 64-bit file system calls so that we can support files > 2 GiB. @@ -127,7 +128,7 @@ NEED_PROG(gzip, gzip) NEED_PROG(xz, xz) AC_PATH_PROG(dot, dot) AC_PATH_PROG(pv, pv, pv) -AC_PATH_PROG(bro, bro, bro) +AC_PATH_PROGS(brotli, brotli bro, bro) AC_PATH_PROG(lsof, lsof, lsof) @@ -174,23 +175,51 @@ AC_SUBST(HAVE_SODIUM, [$have_sodium]) # Look for liblzma, a required dependency. PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"]) +AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt], + [AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])]) +# Look for libbrotli{enc,dec}, optional dependencies +PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], + [AC_DEFINE([HAVE_BROTLI], [1], [Whether to use libbrotli.]) + CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"] + have_brotli=1], [have_brotli=]) +AC_SUBST(HAVE_BROTLI, [$have_brotli]) + # Look for libseccomp, required for Linux sandboxing. if test "$sys_name" = linux; then - PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], - [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) + AC_ARG_ENABLE([seccomp-sandboxing], + AC_HELP_STRING([--disable-seccomp-sandboxing], + [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] + )) + if test "x$enable_seccomp_sandboxing" != "xno"; then + PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], + [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) + have_seccomp=1 + AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) + else + have_seccomp= + fi +else + have_seccomp= fi +AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) # Look for aws-cpp-sdk-s3. AC_LANG_PUSH(C++) AC_CHECK_HEADERS([aws/s3/S3Client.h], - [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-cpp-sdk-s3.]) + [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1], [enable_s3=]) AC_SUBST(ENABLE_S3, [$enable_s3]) AC_LANG_POP(C++) +if test -n "$enable_s3"; then + declare -a aws_version_tokens=($(printf '#include <aws/core/VersionConfig.h>\nAWS_SDK_VERSION_STRING' | $CPP $CPPFLAGS - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' ')) + AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.]) + AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.]) +fi + # Whether to use the Boehm garbage collector. AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc], diff --git a/corepkgs/buildenv.nix b/corepkgs/buildenv.nix index 5e7b40eaa0cb..0bac4c44b48a 100644 --- a/corepkgs/buildenv.nix +++ b/corepkgs/buildenv.nix @@ -1,11 +1,9 @@ -with import <nix/config.nix>; - { derivations, manifest }: derivation { name = "user-environment"; - system = builtins.currentSystem; - builder = nixLibexecDir + "/nix/buildenv"; + system = "builtin"; + builder = "builtin:buildenv"; inherit manifest; @@ -24,21 +22,4 @@ derivation { # Also don't bother substituting. allowSubstitutes = false; - - __sandboxProfile = '' - (allow sysctl-read) - (allow file-read* - (literal "/usr/lib/libSystem.dylib") - (literal "/usr/lib/libSystem.B.dylib") - (literal "/usr/lib/libobjc.A.dylib") - (literal "/usr/lib/libobjc.dylib") - (literal "/usr/lib/libauto.dylib") - (literal "/usr/lib/libc++abi.dylib") - (literal "/usr/lib/libc++.1.dylib") - (literal "/usr/lib/libDiagnosticMessagesClient.dylib") - (subpath "/usr/lib/system") - (subpath "/dev")) - ''; - - inherit chrootDeps; } diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix index e135b947fdbb..0ce1bab112f3 100644 --- a/corepkgs/fetchurl.nix +++ b/corepkgs/fetchurl.nix @@ -1,4 +1,4 @@ -{ system ? builtins.currentSystem +{ system ? "" # obsolete , url , md5 ? "", sha1 ? "", sha256 ? "", sha512 ? "" , outputHash ? @@ -17,7 +17,9 @@ derivation { inherit outputHashAlgo outputHash; outputHashMode = if unpack || executable then "recursive" else "flat"; - inherit name system url executable unpack; + inherit name url executable unpack; + + system = "builtin"; # No need to double the amount of network traffic preferLocalBuild = true; diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml index 1957e1105e68..20fd6a0cfb0d 100644 --- a/doc/manual/advanced-topics/distributed-builds.xml +++ b/doc/manual/advanced-topics/distributed-builds.xml @@ -4,71 +4,109 @@ version="5.0" xml:id='chap-distributed-builds'> -<title>Distributed Builds</title> - -<para>Nix supports distributed builds, where a local Nix installation can -forward Nix builds to other machines over the network. This allows -multiple builds to be performed in parallel (thus improving -performance) and allows Nix to perform multi-platform builds in a -semi-transparent way. For instance, if you perform a build for a -<literal>x86_64-darwin</literal> on an <literal>i686-linux</literal> -machine, Nix can automatically forward the build to a -<literal>x86_64-darwin</literal> machine, if available.</para> - -<para>You can enable distributed builds by setting the environment -variable <envar>NIX_BUILD_HOOK</envar> to point to a program that Nix -will call whenever it wants to build a derivation. The build hook -(typically a shell or Perl script) can decline the build, in which Nix -will perform it in the usual way if possible, or it can accept it, in -which case it is responsible for somehow getting the inputs of the -build to another machine, doing the build there, and getting the -results back.</para> - -<example xml:id='ex-remote-systems'><title>Remote machine configuration: -<filename>remote-systems.conf</filename></title> -<programlisting> -nix@mcflurry.labs.cs.uu.nl x86_64-darwin /home/nix/.ssh/id_quarterpounder_auto 2 -nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm -nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 -nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 kvm perf -</programlisting> -</example> - -<para>Nix ships with a build hook that should be suitable for most -purposes. It uses <command>ssh</command> and -<command>nix-copy-closure</command> to copy the build inputs and -outputs and perform the remote build. To use it, you should set -<envar>NIX_BUILD_HOOK</envar> to -<filename><replaceable>prefix</replaceable>/libexec/nix/build-remote</filename>. -You should also define a list of available build machines and point -the environment variable <envar>NIX_REMOTE_SYSTEMS</envar> to -it. <envar>NIX_REMOTE_SYSTEMS</envar> must be an absolute path. An -example configuration is shown in <xref linkend='ex-remote-systems' -/>. Each line in the file specifies a machine, with the following -bits of information: +<title>Remote Builds</title> + +<para>Nix supports remote builds, where a local Nix installation can +forward Nix builds to other machines. This allows multiple builds to +be performed in parallel and allows Nix to perform multi-platform +builds in a semi-transparent way. For instance, if you perform a +build for a <literal>x86_64-darwin</literal> on an +<literal>i686-linux</literal> machine, Nix can automatically forward +the build to a <literal>x86_64-darwin</literal> machine, if +available.</para> + +<para>To forward a build to a remote machine, it’s required that the +remote machine is accessible via SSH and that it has Nix +installed. You can test whether connecting to the remote Nix instance +works, e.g. + +<screen> +$ nix ping-store --store ssh://mac +</screen> + +will try to connect to the machine named <literal>mac</literal>. It is +possible to specify an SSH identity file as part of the remote store +URI, e.g. + +<screen> +$ nix ping-store --store ssh://mac?ssh-key=/home/alice/my-key +</screen> + +Since builds should be non-interactive, the key should not have a +passphrase. Alternatively, you can load identities ahead of time into +<command>ssh-agent</command> or <command>gpg-agent</command>.</para> + +<para>If you get the error + +<screen> +bash: nix-store: command not found +error: cannot connect to 'mac' +</screen> + +then you need to ensure that the <envar>PATH</envar> of +non-interactive login shells contains Nix.</para> + +<warning><para>If you are building via the Nix daemon, it is the Nix +daemon user account (that is, <literal>root</literal>) that should +have SSH access to the remote machine. If you can’t or don’t want to +configure <literal>root</literal> to be able to access to remote +machine, you can use a private Nix store instead by passing +e.g. <literal>--store ~/my-nix</literal>.</para></warning> + +<para>The list of remote machines can be specified on the command line +or in the Nix configuration file. The former is convenient for +testing. For example, the following command allows you to build a +derivation for <literal>x86_64-darwin</literal> on a Linux machine: + +<screen> +$ uname +Linux + +$ nix build \ + '(with import <nixpkgs> { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ + --builders 'ssh://mac x86_64-darwin' +[1/0/1 built, 0.0 MiB DL] building foo on ssh://mac + +$ cat ./result +Darwin +</screen> + +It is possible to specify multiple builders separated by a semicolon +or a newline, e.g. + +<screen> + --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' +</screen> +</para> + +<para>Each machine specification consists of the following elements, +separated by spaces. Only the first element is required. <orderedlist> - <listitem><para>The name of the remote machine, with optionally the - user under which the remote build should be performed. This is - actually passed as an argument to <command>ssh</command>, so it can - be an alias defined in your + <listitem><para>The URI of the remote store in the format + <literal>ssh://[<replaceable>username</replaceable>@]<replaceable>hostname</replaceable></literal>, + e.g. <literal>ssh://nix@mac</literal> or + <literal>ssh://mac</literal>. For backward compatibility, + <literal>ssh://</literal> may be omitted. The hostname may be an + alias defined in your <filename>~/.ssh/config</filename>.</para></listitem> <listitem><para>A comma-separated list of Nix platform type identifiers, such as <literal>x86_64-darwin</literal>. It is possible for a machine to support multiple platform types, e.g., - <literal>i686-linux,x86_64-linux</literal>.</para></listitem> + <literal>i686-linux,x86_64-linux</literal>. If omitted, this + defaults to the local platform type.</para></listitem> - <listitem><para>The SSH private key to be used to log in to the - remote machine. Since builds should be non-interactive, this key - should not have a passphrase!</para></listitem> + <listitem><para>The SSH identity file to be used to log in to the + remote machine. If omitted, SSH will use its regular + identities.</para></listitem> - <listitem><para>The maximum number of builds that - <filename>build-remote</filename> will execute in parallel on the - machine. Typically this should be equal to the number of CPU cores. - For instance, the machine <literal>itchy</literal> in the example - will execute up to 8 builds in parallel.</para></listitem> + <listitem><para>The maximum number of builds that Nix will execute + in parallel on the machine. Typically this should be equal to the + number of CPU cores. For instance, the machine + <literal>itchy</literal> in the example will execute up to 8 builds + in parallel.</para></listitem> <listitem><para>The “speed factor”, indicating the relative speed of the machine. If there are multiple machines of the right type, Nix @@ -76,30 +114,69 @@ bits of information: <listitem><para>A comma-separated list of <emphasis>supported features</emphasis>. If a derivation has the - <varname>requiredSystemFeatures</varname> attribute, then - <filename>build-remote</filename> will only perform the - derivation on a machine that has the specified features. For - instance, the attribute + <varname>requiredSystemFeatures</varname> attribute, then Nix will + only perform the derivation on a machine that has the specified + features. For instance, the attribute <programlisting> requiredSystemFeatures = [ "kvm" ]; </programlisting> will cause the build to be performed on a machine that has the - <literal>kvm</literal> feature (i.e., <literal>scratchy</literal> in - the example above).</para></listitem> + <literal>kvm</literal> feature.</para></listitem> <listitem><para>A comma-separated list of <emphasis>mandatory features</emphasis>. A machine will only be used to build a derivation if all of the machine’s mandatory features appear in the - derivation’s <varname>requiredSystemFeatures</varname> attribute. - Thus, in the example, the machine <literal>poochie</literal> will - only do derivations that have - <varname>requiredSystemFeatures</varname> set to <literal>["kvm" - "perf"]</literal> or <literal>["perf"]</literal>.</para></listitem> + derivation’s <varname>requiredSystemFeatures</varname> + attribute..</para></listitem> </orderedlist> -</para> +For example, the machine specification + +<programlisting> +nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm +nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2 +nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 1 2 kvm benchmark +</programlisting> + +specifies several machines that can perform +<literal>i686-linux</literal> builds. However, +<literal>poochie</literal> will only do builds that have the attribute + +<programlisting> +requiredSystemFeatures = [ "benchmark" ]; +</programlisting> + +or + +<programlisting> +requiredSystemFeatures = [ "benchmark" "kvm" ]; +</programlisting> + +<literal>itchy</literal> cannot do builds that require +<literal>kvm</literal>, but <literal>scratchy</literal> does support +such builds. For regular builds, <literal>itchy</literal> will be +preferred over <literal>scratchy</literal> because it has a higher +speed factor.</para> + +<para>Remote builders can also be configured in +<filename>nix.conf</filename>, e.g. + +<programlisting> +builders = ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd +</programlisting> + +Finally, remote builders can be configured in a separate configuration +file included in <option>builders</option> via the syntax +<literal>@<replaceable>file</replaceable></literal>. For example, + +<programlisting> +builders = @/etc/nix/machines +</programlisting> + +causes the list of machines in <filename>/etc/nix/machines</filename> +to be included. (This is the default.)</para> </chapter> diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml index fb4d8cefc4d2..1865bb37c860 100644 --- a/doc/manual/command-ref/conf-file.xml +++ b/doc/manual/command-ref/conf-file.xml @@ -40,7 +40,12 @@ <para>The configuration files consist of <literal><replaceable>name</replaceable> = -<replaceable>value</replaceable></literal> pairs, one per line. +<replaceable>value</replaceable></literal> pairs, one per line. Other +files can be included with a line like <literal>include +<replaceable>path</replaceable></literal>, where +<replaceable>path</replaceable> is interpreted relative to the current +conf file and a missing file is an error unless +<literal>!include</literal> is used instead. Comments start with a <literal>#</literal> character. Here is an example configuration file:</para> @@ -58,147 +63,99 @@ false</literal>.</para> <variablelist> - <varlistentry xml:id="conf-keep-outputs"><term><literal>keep-outputs</literal></term> - - <listitem><para>If <literal>true</literal>, the garbage collector - will keep the outputs of non-garbage derivations. If - <literal>false</literal> (default), outputs will be deleted unless - they are GC roots themselves (or reachable from other roots).</para> - - <para>In general, outputs must be registered as roots separately. - However, even if the output of a derivation is registered as a - root, the collector will still delete store paths that are used - only at build time (e.g., the C compiler, or source tarballs - downloaded from the network). To prevent it from doing so, set - this option to <literal>true</literal>.</para></listitem> - - </varlistentry> - - - <varlistentry xml:id="conf-keep-derivations"><term><literal>keep-derivations</literal></term> - - <listitem><para>If <literal>true</literal> (default), the garbage - collector will keep the derivations from which non-garbage store - paths were built. If <literal>false</literal>, they will be - deleted unless explicitly registered as a root (or reachable from - other roots).</para> - - <para>Keeping derivation around is useful for querying and - traceability (e.g., it allows you to ask with what dependencies or - options a store path was built), so by default this option is on. - Turn it off to save a bit of disk space (or a lot if - <literal>keep-outputs</literal> is also turned on).</para></listitem> - - </varlistentry> - - - <varlistentry><term><literal>keep-env-derivations</literal></term> + <varlistentry xml:id="conf-allowed-uris"><term><literal>allowed-uris</literal></term> - <listitem><para>If <literal>false</literal> (default), derivations - are not stored in Nix user environments. That is, the derivation - any build-time-only dependencies may be garbage-collected.</para> + <listitem> - <para>If <literal>true</literal>, when you add a Nix derivation to - a user environment, the path of the derivation is stored in the - user environment. Thus, the derivation will not be - garbage-collected until the user environment generation is deleted - (<command>nix-env --delete-generations</command>). To prevent - build-time-only dependencies from being collected, you should also - turn on <literal>keep-outputs</literal>.</para> + <para>A list of URI prefixes to which access is allowed in + restricted evaluation mode. For example, when set to + <literal>https://github.com/NixOS</literal>, builtin functions + such as <function>fetchGit</function> are allowed to access + <literal>https://github.com/NixOS/patchelf.git</literal>.</para> - <para>The difference between this option and - <literal>keep-derivations</literal> is that this one is - “sticky”: it applies to any user environment created while this - option was enabled, while <literal>keep-derivations</literal> - only applies at the moment the garbage collector is - run.</para></listitem> + </listitem> </varlistentry> - <varlistentry xml:id="conf-max-jobs"><term><literal>max-jobs</literal></term> + <varlistentry xml:id="conf-allow-import-from-derivation"><term><literal>allow-import-from-derivation</literal></term> - <listitem><para>This option defines the maximum number of jobs - that Nix will try to build in parallel. The default is - <literal>1</literal>. The special value <literal>auto</literal> - causes Nix to use the number of CPUs in your system. It can be - overridden using the <option - linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>) - command line switch.</para></listitem> + <listitem><para>By default, Nix allows you to <function>import</function> from a derivation, + allowing building at evaluation time. With this option set to false, Nix will throw an error + when evaluating an expression that uses this feature, allowing users to ensure their evaluation + will not require any builds to take place.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-cores"><term><literal>cores</literal></term> + <varlistentry xml:id="conf-allow-new-privileges"><term><literal>allow-new-privileges</literal></term> - <listitem><para>Sets the value of the - <envar>NIX_BUILD_CORES</envar> environment variable in the - invocation of builders. Builders can use this variable at their - discretion to control the maximum amount of parallelism. For - instance, in Nixpkgs, if the derivation attribute - <varname>enableParallelBuilding</varname> is set to - <literal>true</literal>, the builder passes the - <option>-j<replaceable>N</replaceable></option> flag to GNU Make. - It can be overridden using the <option - linkend='opt-cores'>--cores</option> command line switch and - defaults to <literal>1</literal>. The value <literal>0</literal> - means that the builder should use all available CPU cores in the - system.</para></listitem> + <listitem><para>(Linux-specific.) By default, builders on Linux + cannot acquire new privileges by calling setuid/setgid programs or + programs that have file capabilities. For example, programs such + as <command>sudo</command> or <command>ping</command> will + fail. (Note that in sandbox builds, no such programs are available + unless you bind-mount them into the sandbox via the + <option>sandbox-paths</option> option.) You can allow the + use of such programs by enabling this option. This is impure and + usually undesirable, but may be useful in certain scenarios + (e.g. to spin up containers or set up userspace network interfaces + in tests).</para></listitem> </varlistentry> - <varlistentry xml:id="conf-max-silent-time"><term><literal>max-silent-time</literal></term> + <varlistentry xml:id="conf-allowed-users"><term><literal>allowed-users</literal></term> <listitem> - <para>This option defines the maximum number of seconds that a - builder can go without producing any data on standard output or - standard error. This is useful (for instance in an automated - build system) to catch builds that are stuck in an infinite - loop, or to catch remote builds that are hanging due to network - problems. It can be overridden using the <option - linkend="opt-max-silent-time">--max-silent-time</option> command - line switch.</para> + <para>A list of names of users (separated by whitespace) that + are allowed to connect to the Nix daemon. As with the + <option>trusted-users</option> option, you can specify groups by + prefixing them with <literal>@</literal>. Also, you can allow + all users by specifying <literal>*</literal>. The default is + <literal>*</literal>.</para> - <para>The value <literal>0</literal> means that there is no - timeout. This is also the default.</para> + <para>Note that trusted users are always allowed to connect.</para> </listitem> </varlistentry> - <varlistentry xml:id="conf-timeout"><term><literal>timeout</literal></term> + <varlistentry xml:id="conf-auto-optimise-store"><term><literal>auto-optimise-store</literal></term> - <listitem> + <listitem><para>If set to <literal>true</literal>, Nix + automatically detects files in the store that have identical + contents, and replaces them with hard links to a single copy. + This saves disk space. If set to <literal>false</literal> (the + default), you can still run <command>nix-store + --optimise</command> to get rid of duplicate + files.</para></listitem> - <para>This option defines the maximum number of seconds that a - builder can run. This is useful (for instance in an automated - build system) to catch builds that are stuck in an infinite loop - but keep writing to their standard output or standard error. It - can be overridden using the <option - linkend="opt-timeout">--timeout</option> command line - switch.</para> + </varlistentry> - <para>The value <literal>0</literal> means that there is no - timeout. This is also the default.</para> + <varlistentry xml:id="conf-builders"> + <term><literal>builders</literal></term> + <listitem> + <para>A list of machines on which to perform builds. <phrase + condition="manual">See <xref linkend="chap-distributed-builds" + /> for details.</phrase></para> </listitem> - </varlistentry> - <varlistentry xml:id="conf-max-build-log-size"><term><literal>max-build-log-size</literal></term> + <varlistentry xml:id="conf-builders-use-substitutes"><term><literal>builders-use-substitutes</literal></term> - <listitem> - - <para>This option defines the maximum number of bytes that a - builder can write to its stdout/stderr. If the builder exceeds - this limit, it’s killed. A value of <literal>0</literal> (the - default) means that there is no limit.</para> - - </listitem> + <listitem><para>If set to <literal>true</literal>, Nix will instruct + remote build machines to use their own binary substitutes if available. In + practical terms, this means that remote hosts will fetch as many build + dependencies as possible from their own substitutes (e.g, from + <literal>cache.nixos.org</literal>), instead of waiting for this host to + upload them all. This can drastically reduce build times if the network + connection between this computer and the remote build host is slow. Defaults + to <literal>false</literal>.</para></listitem> </varlistentry> @@ -244,66 +201,51 @@ false</literal>.</para> </varlistentry> - <varlistentry><term><literal>sandbox</literal></term> + <varlistentry xml:id="conf-compress-build-log"><term><literal>compress-build-log</literal></term> - <listitem><para>If set to <literal>true</literal>, builds will be - performed in a <emphasis>sandboxed environment</emphasis>, i.e., - they’re isolated from the normal file system hierarchy and will - only see their dependencies in the Nix store, the temporary build - directory, private versions of <filename>/proc</filename>, - <filename>/dev</filename>, <filename>/dev/shm</filename> and - <filename>/dev/pts</filename> (on Linux), and the paths configured with the - <link linkend='conf-sandbox-paths'><literal>sandbox-paths</literal> - option</link>. This is useful to prevent undeclared dependencies - on files in directories such as <filename>/usr/bin</filename>. In - addition, on Linux, builds run in private PID, mount, network, IPC - and UTS namespaces to isolate them from other processes in the - system (except that fixed-output derivations do not run in private - network namespace to ensure they can access the network).</para> + <listitem><para>If set to <literal>true</literal> (the default), + build logs written to <filename>/nix/var/log/nix/drvs</filename> + will be compressed on the fly using bzip2. Otherwise, they will + not be compressed.</para></listitem> - <para>Currently, sandboxing only work on Linux and macOS. The use - of a sandbox requires that Nix is run as root (so you should use - the <link linkend='conf-build-users-group'>“build users” - feature</link> to perform the actual builds under different users - than root).</para> + </varlistentry> - <para>If this option is set to <literal>relaxed</literal>, then - fixed-output derivations and derivations that have the - <varname>__noChroot</varname> attribute set to - <literal>true</literal> do not run in sandboxes.</para> - <para>The default is <literal>false</literal>.</para> + <varlistentry xml:id="conf-connect-timeout"><term><literal>connect-timeout</literal></term> + + <listitem> + + <para>The timeout (in seconds) for establishing connections in + the binary cache substituter. It corresponds to + <command>curl</command>’s <option>--connect-timeout</option> + option.</para> </listitem> </varlistentry> - <varlistentry xml:id="conf-sandbox-paths"> - <term><literal>sandbox-paths</literal></term> - - <listitem><para>A list of paths bind-mounted into Nix sandbox - environments. You can use the syntax - <literal><replaceable>target</replaceable>=<replaceable>source</replaceable></literal> - to mount a path in a different location in the sandbox; for - instance, <literal>/bin=/nix-bin</literal> will mount the path - <literal>/nix-bin</literal> as <literal>/bin</literal> inside the - sandbox. If <replaceable>source</replaceable> is followed by - <literal>?</literal>, then it is not an error if - <replaceable>source</replaceable> does not exist; for example, - <literal>/dev/nvidiactl?</literal> specifies that - <filename>/dev/nvidiactl</filename> will only be mounted in the - sandbox if it exists in the host filesystem.</para> + <varlistentry xml:id="conf-cores"><term><literal>cores</literal></term> - <para>Depending on how Nix was built, the default value for this option - may be empty or provide <filename>/bin/sh</filename> as a - bind-mount of <command>bash</command>.</para></listitem> + <listitem><para>Sets the value of the + <envar>NIX_BUILD_CORES</envar> environment variable in the + invocation of builders. Builders can use this variable at their + discretion to control the maximum amount of parallelism. For + instance, in Nixpkgs, if the derivation attribute + <varname>enableParallelBuilding</varname> is set to + <literal>true</literal>, the builder passes the + <option>-j<replaceable>N</replaceable></option> flag to GNU Make. + It can be overridden using the <option + linkend='opt-cores'>--cores</option> command line switch and + defaults to <literal>1</literal>. The value <literal>0</literal> + means that the builder should use all available CPU cores in the + system.</para></listitem> </varlistentry> <varlistentry xml:id="conf-extra-sandbox-paths"> - <term><literal>build-extra-sandbox-paths</literal></term> + <term><literal>extra-sandbox-paths</literal></term> <listitem><para>A list of additional paths appended to <option>sandbox-paths</option>. Useful if you want to extend @@ -312,16 +254,37 @@ false</literal>.</para> </varlistentry> - <varlistentry><term><literal>use-substitutes</literal></term> + <varlistentry xml:id="conf-extra-platforms"><term><literal>extra-platforms</literal></term> - <listitem><para>If set to <literal>true</literal> (default), Nix - will use binary substitutes if available. This option can be - disabled to force building from source.</para></listitem> + <listitem><para>Platforms other than the native one which + this machine is capable of building for. This can be useful for + supporting additional architectures on compatible machines: + i686-linux can be built on x86_64-linux machines (and the default + for this setting reflects this); armv7 is backwards-compatible with + armv6 and armv5tel; some aarch64 machines can also natively run + 32-bit ARM code; and qemu-user may be used to support non-native + platforms (though this may be slow and buggy). Most values for this + are not enabled by default because build systems will often + misdetect the target platform and generate incompatible code, so you + may wish to cross-check the results of using this option against + proper natively-built versions of your + derivations.</para></listitem> </varlistentry> - <varlistentry><term><literal>fallback</literal></term> + <varlistentry xml:id="conf-extra-substituters"><term><literal>extra-substituters</literal></term> + + <listitem><para>Additional binary caches appended to those + specified in <option>substituters</option>. When used by + unprivileged users, untrusted substituters (i.e. those not listed + in <option>trusted-substituters</option>) are silently + ignored.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-fallback"><term><literal>fallback</literal></term> <listitem><para>If set to <literal>true</literal>, Nix will fall back to building from source if a binary substitute fails. This @@ -331,262 +294,272 @@ false</literal>.</para> </varlistentry> - <varlistentry><term><literal>keep-build-log</literal></term> + <varlistentry xml:id="conf-fsync-metadata"><term><literal>fsync-metadata</literal></term> - <listitem><para>If set to <literal>true</literal> (the default), - Nix will write the build log of a derivation (i.e. the standard - output and error of its builder) to the directory - <filename>/nix/var/log/nix/drvs</filename>. The build log can be - retrieved using the command <command>nix-store -l - <replaceable>path</replaceable></command>.</para></listitem> + <listitem><para>If set to <literal>true</literal>, changes to the + Nix store metadata (in <filename>/nix/var/nix/db</filename>) are + synchronously flushed to disk. This improves robustness in case + of system crashes, but reduces performance. The default is + <literal>true</literal>.</para></listitem> </varlistentry> - <varlistentry><term><literal>compress-build-log</literal></term> - - <listitem><para>If set to <literal>true</literal> (the default), - build logs written to <filename>/nix/var/log/nix/drvs</filename> - will be compressed on the fly using bzip2. Otherwise, they will - not be compressed.</para></listitem> - - </varlistentry> + <varlistentry xml:id="conf-hashed-mirrors"><term><literal>hashed-mirrors</literal></term> + <listitem><para>A list of web servers used by + <function>builtins.fetchurl</function> to obtain files by + hash. The default is + <literal>http://tarballs.nixos.org/</literal>. Given a hash type + <replaceable>ht</replaceable> and a base-16 hash + <replaceable>h</replaceable>, Nix will try to download the file + from + <literal>hashed-mirror/<replaceable>ht</replaceable>/<replaceable>h</replaceable></literal>. + This allows files to be downloaded even if they have disappeared + from their original URI. For example, given the default mirror + <literal>http://tarballs.nixos.org/</literal>, when building the derivation - <varlistentry><term><literal>substituters</literal></term> +<programlisting> +builtins.fetchurl { + url = https://example.org/foo-1.2.3.tar.xz; + sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; +} +</programlisting> - <listitem><para>A list of URLs of substituters, separated by - whitespace. The default is - <literal>https://cache.nixos.org</literal>.</para></listitem> + Nix will attempt to download this file from + <literal>http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae</literal> + first. If it is not available there, if will try the original URI.</para></listitem> </varlistentry> - <!-- - <varlistentry><term><literal>binary-caches-files</literal></term> + <varlistentry xml:id="conf-http-connections"><term><literal>http-connections</literal></term> - <listitem><para>A list of names of files that will be read to - obtain additional binary cache URLs. The default is - <literal>/nix/var/nix/profiles/per-user/<replaceable>username</replaceable>/channels/binary-caches/*</literal>. - Note that when you’re using the Nix daemon, - <replaceable>username</replaceable> is always equal to - <literal>root</literal>, so Nix will only use the binary caches - provided by the channels installed by root. Do not set this - option to read files created by untrusted users!</para></listitem> + <listitem><para>The maximum number of parallel TCP connections + used to fetch files from binary caches and by other downloads. It + defaults to 25. 0 means no limit.</para></listitem> </varlistentry> - --> - <varlistentry><term><literal>trusted-substituters</literal></term> + <varlistentry xml:id="conf-keep-build-log"><term><literal>keep-build-log</literal></term> - <listitem><para>A list of URLs of substituters, separated by - whitespace. These are not used by default, but can be enabled by - users of the Nix daemon by specifying <literal>--option - substituters <replaceable>urls</replaceable></literal> on the - command line. Unprivileged users are only allowed to pass a - subset of the URLs listed in <literal>substituters</literal> and - <literal>trusted-substituters</literal>.</para></listitem> + <listitem><para>If set to <literal>true</literal> (the default), + Nix will write the build log of a derivation (i.e. the standard + output and error of its builder) to the directory + <filename>/nix/var/log/nix/drvs</filename>. The build log can be + retrieved using the command <command>nix-store -l + <replaceable>path</replaceable></command>.</para></listitem> </varlistentry> - <varlistentry><term><literal>extra-substituters</literal></term> - - <listitem><para>Additional binary caches appended to those - specified in <option>substituters</option>. When used by - unprivileged users, untrusted substituters (i.e. those not listed - in <option>trusted-substituters</option>) are silently - ignored.</para></listitem> + <varlistentry xml:id="conf-keep-derivations"><term><literal>keep-derivations</literal></term> - </varlistentry> + <listitem><para>If <literal>true</literal> (default), the garbage + collector will keep the derivations from which non-garbage store + paths were built. If <literal>false</literal>, they will be + deleted unless explicitly registered as a root (or reachable from + other roots).</para> + <para>Keeping derivation around is useful for querying and + traceability (e.g., it allows you to ask with what dependencies or + options a store path was built), so by default this option is on. + Turn it off to save a bit of disk space (or a lot if + <literal>keep-outputs</literal> is also turned on).</para></listitem> - <varlistentry><term><literal>signed-binary-caches</literal></term> + </varlistentry> - <listitem><para>If set to <literal>*</literal> (the default), Nix - will only download binaries if they are signed using one of the - keys listed in <option>binary-cache-public-keys</option>. Set to - the empty string to disable signature checking.</para></listitem> - </varlistentry> + <varlistentry xml:id="conf-keep-env-derivations"><term><literal>keep-env-derivations</literal></term> + <listitem><para>If <literal>false</literal> (default), derivations + are not stored in Nix user environments. That is, the derivation + any build-time-only dependencies may be garbage-collected.</para> - <varlistentry><term><literal>binary-cache-public-keys</literal></term> + <para>If <literal>true</literal>, when you add a Nix derivation to + a user environment, the path of the derivation is stored in the + user environment. Thus, the derivation will not be + garbage-collected until the user environment generation is deleted + (<command>nix-env --delete-generations</command>). To prevent + build-time-only dependencies from being collected, you should also + turn on <literal>keep-outputs</literal>.</para> - <listitem><para>A whitespace-separated list of public keys - corresponding to the secret keys trusted to sign binary - caches. For example: - <literal>cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= - hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=</literal>.</para></listitem> + <para>The difference between this option and + <literal>keep-derivations</literal> is that this one is + “sticky”: it applies to any user environment created while this + option was enabled, while <literal>keep-derivations</literal> + only applies at the moment the garbage collector is + run.</para></listitem> </varlistentry> - <varlistentry><term><literal>http-connections</literal></term> + <varlistentry xml:id="conf-keep-outputs"><term><literal>keep-outputs</literal></term> - <listitem><para>The maximum number of parallel TCP connections - used to fetch files from binary caches and by other downloads. It - defaults to 25. 0 means no limit.</para></listitem> + <listitem><para>If <literal>true</literal>, the garbage collector + will keep the outputs of non-garbage derivations. If + <literal>false</literal> (default), outputs will be deleted unless + they are GC roots themselves (or reachable from other roots).</para> - </varlistentry> + <para>In general, outputs must be registered as roots separately. + However, even if the output of a derivation is registered as a + root, the collector will still delete store paths that are used + only at build time (e.g., the C compiler, or source tarballs + downloaded from the network). To prevent it from doing so, set + this option to <literal>true</literal>.</para></listitem> + </varlistentry> - <varlistentry><term><literal>netrc-file</literal></term> - <listitem><para>If set to an absolute path to a <filename>netrc</filename> - file, Nix will use the HTTP authentication credentials in this file when - trying to download from a remote host through HTTP or HTTPS. Defaults to - <filename>$NIX_CONF_DIR/netrc</filename>.</para> + <varlistentry xml:id="conf-max-build-log-size"><term><literal>max-build-log-size</literal></term> - <para>The <filename>netrc</filename> file consists of a list of - accounts in the following format: + <listitem> -<screen> -machine <replaceable>my-machine</replaceable> -login <replaceable>my-username</replaceable> -password <replaceable>my-password</replaceable> -</screen> + <para>This option defines the maximum number of bytes that a + builder can write to its stdout/stderr. If the builder exceeds + this limit, it’s killed. A value of <literal>0</literal> (the + default) means that there is no limit.</para> - For the exact syntax, see <link - xlink:href="https://ec.haxx.se/usingcurl-netrc.html">the - <literal>curl</literal> documentation.</link></para></listitem> + </listitem> </varlistentry> + <varlistentry xml:id="conf-max-free"><term><literal>max-free</literal></term> - <varlistentry><term><literal>system</literal></term> - - <listitem><para>This option specifies the canonical Nix system - name of the current installation, such as - <literal>i686-linux</literal> or - <literal>x86_64-darwin</literal>. Nix can only build derivations - whose <literal>system</literal> attribute equals the value - specified here. In general, it never makes sense to modify this - value from its default, since you can use it to ‘lie’ about the - platform you are building on (e.g., perform a Mac OS build on a - Linux machine; the result would obviously be wrong). It only - makes sense if the Nix binaries can run on multiple platforms, - e.g., ‘universal binaries’ that run on <literal>x86_64-linux</literal> and - <literal>i686-linux</literal>.</para> - - <para>It defaults to the canonical Nix system name detected by - <filename>configure</filename> at build time.</para></listitem> + <listitem><para>This option defines after how many free bytes to stop collecting + garbage once the <literal>min-free</literal> condition gets triggered.</para></listitem> </varlistentry> + <varlistentry xml:id="conf-max-jobs"><term><literal>max-jobs</literal></term> - <varlistentry><term><literal>fsync-metadata</literal></term> - - <listitem><para>If set to <literal>true</literal>, changes to the - Nix store metadata (in <filename>/nix/var/nix/db</filename>) are - synchronously flushed to disk. This improves robustness in case - of system crashes, but reduces performance. The default is - <literal>true</literal>.</para></listitem> + <listitem><para>This option defines the maximum number of jobs + that Nix will try to build in parallel. The default is + <literal>1</literal>. The special value <literal>auto</literal> + causes Nix to use the number of CPUs in your system. It can be + overridden using the <option + linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>) + command line switch.</para></listitem> </varlistentry> - <varlistentry><term><literal>auto-optimise-store</literal></term> + <varlistentry xml:id="conf-max-silent-time"><term><literal>max-silent-time</literal></term> - <listitem><para>If set to <literal>true</literal>, Nix - automatically detects files in the store that have identical - contents, and replaces them with hard links to a single copy. - This saves disk space. If set to <literal>false</literal> (the - default), you can still run <command>nix-store - --optimise</command> to get rid of duplicate - files.</para></listitem> + <listitem> - </varlistentry> + <para>This option defines the maximum number of seconds that a + builder can go without producing any data on standard output or + standard error. This is useful (for instance in an automated + build system) to catch builds that are stuck in an infinite + loop, or to catch remote builds that are hanging due to network + problems. It can be overridden using the <option + linkend="opt-max-silent-time">--max-silent-time</option> command + line switch.</para> + <para>The value <literal>0</literal> means that there is no + timeout. This is also the default.</para> - <varlistentry xml:id="conf-connect-timeout"><term><literal>connect-timeout</literal></term> + </listitem> - <listitem> + </varlistentry> - <para>The timeout (in seconds) for establishing connections in - the binary cache substituter. It corresponds to - <command>curl</command>’s <option>--connect-timeout</option> - option.</para> + <varlistentry xml:id="conf-min-free"><term><literal>min-free</literal></term> + <listitem> + <para>When the disk reaches <literal>min-free</literal> bytes of free disk space during a build, nix + will start to garbage-collection until <literal>max-free</literal> bytes are available on the disk. + A value of <literal>0</literal> (the default) means that this feature is disabled.</para> </listitem> </varlistentry> - - <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term> + <varlistentry xml:id="conf-narinfo-cache-negative-ttl"><term><literal>narinfo-cache-negative-ttl</literal></term> <listitem> - <para>A list of names of users (separated by whitespace) that - have additional rights when connecting to the Nix daemon, such - as the ability to specify additional binary caches, or to import - unsigned NARs. You can also specify groups by prefixing them - with <literal>@</literal>; for instance, - <literal>@wheel</literal> means all users in the - <literal>wheel</literal> group. The default is - <literal>root</literal>.</para> - - <warning><para>The users listed here have the ability to - compromise the security of a multi-user Nix store. For instance, - they could install Trojan horses subsequently executed by other - users. So you should consider carefully whether to add users to - this list.</para></warning> + <para>The TTL in seconds for negative lookups. If a store path is + queried from a substituter but was not found, there will be a + negative lookup cached in the local disk cache database for the + specified duration.</para> </listitem> </varlistentry> - - <varlistentry xml:id="conf-allowed-users"><term><literal>allowed-users</literal></term> + <varlistentry xml:id="conf-narinfo-cache-positive-ttl"><term><literal>narinfo-cache-positive-ttl</literal></term> <listitem> - <para>A list of names of users (separated by whitespace) that - are allowed to connect to the Nix daemon. As with the - <option>trusted-users</option> option, you can specify groups by - prefixing them with <literal>@</literal>. Also, you can allow - all users by specifying <literal>*</literal>. The default is - <literal>*</literal>.</para> - - <para>Note that trusted users are always allowed to connect.</para> + <para>The TTL in seconds for positive lookups. If a store path is + queried from a substituter, the result of the query will be cached + in the local disk cache database including some of the NAR + metadata. The default TTL is a month, setting a shorter TTL for + positive lookups can be useful for binary caches that have + frequent garbage collection, in which case having a more frequent + cache invalidation would prevent trying to pull the path again and + failing with a hash mismatch if the build isn't reproducible. + </para> </listitem> </varlistentry> + <varlistentry xml:id="conf-netrc-file"><term><literal>netrc-file</literal></term> - <varlistentry xml:id="conf-restrict-eval"><term><literal>restrict-eval</literal></term> + <listitem><para>If set to an absolute path to a <filename>netrc</filename> + file, Nix will use the HTTP authentication credentials in this file when + trying to download from a remote host through HTTP or HTTPS. Defaults to + <filename>$NIX_CONF_DIR/netrc</filename>.</para> - <listitem> + <para>The <filename>netrc</filename> file consists of a list of + accounts in the following format: - <para>If set to <literal>true</literal>, the Nix evaluator will - not allow access to any files outside of the Nix search path (as - set via the <envar>NIX_PATH</envar> environment variable or the - <option>-I</option> option), or to URIs outside of - <option>allowed-uri</option>. The default is - <literal>false</literal>.</para> +<screen> +machine <replaceable>my-machine</replaceable> +login <replaceable>my-username</replaceable> +password <replaceable>my-password</replaceable> +</screen> - </listitem> + For the exact syntax, see <link + xlink:href="https://ec.haxx.se/usingcurl-netrc.html">the + <literal>curl</literal> documentation.</link></para></listitem> </varlistentry> - <varlistentry xml:id="conf-allowed-uris"><term><literal>allowed-uris</literal></term> - + <varlistentry xml:id="conf-plugin-files"> + <term><literal>plugin-files</literal></term> <listitem> - - <para>A list of URI prefixes to which access is allowed in - restricted evaluation mode. For example, when set to - <literal>https://github.com/NixOS</literal>, builtin functions - such as <function>fetchGit</function> are allowed to access - <literal>https://github.com/NixOS/patchelf.git</literal>.</para> - + <para> + A list of plugin files to be loaded by Nix. Each of these + files will be dlopened by Nix, allowing them to affect + execution through static initialization. In particular, these + plugins may construct static instances of RegisterPrimOp to + add new primops or constants to the expression language, + RegisterStoreImplementation to add new store implementations, + RegisterCommand to add new subcommands to the + <literal>nix</literal> command, and RegisterSetting to add new + nix config settings. See the constructors for those types for + more details. + </para> + <para> + Since these files are loaded into the same address space as + Nix itself, they must be DSOs compatible with the instance of + Nix running at the time (i.e. compiled against the same + headers, not linked to any incompatible libraries). They + should not be linked to any Nix libs directly, as those will + be available already at load time. + </para> + <para> + If an entry in the list is a directory, all files in the + directory are loaded as plugins (non-recursively). + </para> </listitem> </varlistentry> - <varlistentry xml:id="conf-pre-build-hook"><term><literal>pre-build-hook</literal></term> <listitem> @@ -635,6 +608,70 @@ password <replaceable>my-password</replaceable> </varlistentry> + <varlistentry xml:id="conf-require-sigs"><term><literal>require-sigs</literal></term> + + <listitem><para>If set to <literal>true</literal> (the default), + any non-content-addressed path added or copied to the Nix store + (e.g. when substituting from a binary cache) must have a valid + signature, that is, be signed using one of the keys listed in + <option>trusted-public-keys</option> or + <option>secret-key-files</option>. Set to <literal>false</literal> + to disable signature checking.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-restrict-eval"><term><literal>restrict-eval</literal></term> + + <listitem> + + <para>If set to <literal>true</literal>, the Nix evaluator will + not allow access to any files outside of the Nix search path (as + set via the <envar>NIX_PATH</envar> environment variable or the + <option>-I</option> option), or to URIs outside of + <option>allowed-uri</option>. The default is + <literal>false</literal>.</para> + + </listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-sandbox"><term><literal>sandbox</literal></term> + + <listitem><para>If set to <literal>true</literal>, builds will be + performed in a <emphasis>sandboxed environment</emphasis>, i.e., + they’re isolated from the normal file system hierarchy and will + only see their dependencies in the Nix store, the temporary build + directory, private versions of <filename>/proc</filename>, + <filename>/dev</filename>, <filename>/dev/shm</filename> and + <filename>/dev/pts</filename> (on Linux), and the paths configured with the + <link linkend='conf-sandbox-paths'><literal>sandbox-paths</literal> + option</link>. This is useful to prevent undeclared dependencies + on files in directories such as <filename>/usr/bin</filename>. In + addition, on Linux, builds run in private PID, mount, network, IPC + and UTS namespaces to isolate them from other processes in the + system (except that fixed-output derivations do not run in private + network namespace to ensure they can access the network).</para> + + <para>Currently, sandboxing only work on Linux and macOS. The use + of a sandbox requires that Nix is run as root (so you should use + the <link linkend='conf-build-users-group'>“build users” + feature</link> to perform the actual builds under different users + than root).</para> + + <para>If this option is set to <literal>relaxed</literal>, then + fixed-output derivations and derivations that have the + <varname>__noChroot</varname> attribute set to + <literal>true</literal> do not run in sandboxes.</para> + + <para>The default is <literal>false</literal>.</para> + + </listitem> + + </varlistentry> + + <varlistentry xml:id="conf-sandbox-dev-shm-size"><term><literal>sandbox-dev-shm-size</literal></term> <listitem><para>This option determines the maximum size of the @@ -648,61 +685,157 @@ password <replaceable>my-password</replaceable> </varlistentry> - <varlistentry xml:id="conf-allow-import-from-derivation"><term><literal>allow-import-from-derivation</literal></term> + <varlistentry xml:id="conf-sandbox-paths"> + <term><literal>sandbox-paths</literal></term> - <listitem><para>By default, Nix allows you to <function>import</function> from a derivation, - allowing building at evaluation time. With this option set to false, Nix will throw an error - when evaluating an expression that uses this feature, allowing users to ensure their evaluation - will not require any builds to take place.</para></listitem> + <listitem><para>A list of paths bind-mounted into Nix sandbox + environments. You can use the syntax + <literal><replaceable>target</replaceable>=<replaceable>source</replaceable></literal> + to mount a path in a different location in the sandbox; for + instance, <literal>/bin=/nix-bin</literal> will mount the path + <literal>/nix-bin</literal> as <literal>/bin</literal> inside the + sandbox. If <replaceable>source</replaceable> is followed by + <literal>?</literal>, then it is not an error if + <replaceable>source</replaceable> does not exist; for example, + <literal>/dev/nvidiactl?</literal> specifies that + <filename>/dev/nvidiactl</filename> will only be mounted in the + sandbox if it exists in the host filesystem.</para> + + <para>Depending on how Nix was built, the default value for this option + may be empty or provide <filename>/bin/sh</filename> as a + bind-mount of <command>bash</command>.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-allow-new-privileges"><term><literal>allow-new-privileges</literal></term> + <varlistentry xml:id="conf-secret-key-files"><term><literal>secret-key-files</literal></term> - <listitem><para>(Linux-specific.) By default, builders on Linux - cannot acquire new privileges by calling setuid/setgid programs or - programs that have file capabilities. For example, programs such - as <command>sudo</command> or <command>ping</command> will - fail. (Note that in sandbox builds, no such programs are available - unless you bind-mount them into the sandbox via the - <option>sandbox-paths</option> option.) You can allow the - use of such programs by enabling this option. This is impure and - usually undesirable, but may be useful in certain scenarios - (e.g. to spin up containers or set up userspace network interfaces - in tests).</para></listitem> + <listitem><para>A whitespace-separated list of files containing + secret (private) keys. These are used to sign locally-built + paths. They can be generated using <command>nix-store + --generate-binary-cache-key</command>. The corresponding public + key can be distributed to other users, who can add it to + <option>trusted-public-keys</option> in their + <filename>nix.conf</filename>.</para></listitem> </varlistentry> - <varlistentry xml:id="conf-hashed-mirrors"><term><literal>hashed-mirrors</literal></term> + <varlistentry xml:id="conf-show-trace"><term><literal>show-trace</literal></term> - <listitem><para>A list of web servers used by - <function>builtins.fetchurl</function> to obtain files by - hash. The default is - <literal>http://tarballs.nixos.org/</literal>. Given a hash type - <replaceable>ht</replaceable> and a base-16 hash - <replaceable>h</replaceable>, Nix will try to download the file - from - <literal>hashed-mirror/<replaceable>ht</replaceable>/<replaceable>h</replaceable></literal>. - This allows files to be downloaded even if they have disappeared - from their original URI. For example, given the default mirror - <literal>http://tarballs.nixos.org/</literal>, when building the derivation + <listitem><para>Causes Nix to print out a stack trace in case of Nix + expression evaluation errors.</para></listitem> -<programlisting> -builtins.fetchurl { - url = https://example.org/foo-1.2.3.tar.xz; - sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; -} -</programlisting> + </varlistentry> - Nix will attempt to download this file from - <literal>http://tarballs.nixos.org/sha256/2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae</literal> - first. If it is not available there, if will try the original URI.</para></listitem> + + <varlistentry xml:id="conf-substitute"><term><literal>substitute</literal></term> + + <listitem><para>If set to <literal>true</literal> (default), Nix + will use binary substitutes if available. This option can be + disabled to force building from source.</para></listitem> </varlistentry> + <varlistentry xml:id="conf-substituters"><term><literal>substituters</literal></term> + + <listitem><para>A list of URLs of substituters, separated by + whitespace. The default is + <literal>https://cache.nixos.org</literal>.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-system"><term><literal>system</literal></term> + + <listitem><para>This option specifies the canonical Nix system + name of the current installation, such as + <literal>i686-linux</literal> or + <literal>x86_64-darwin</literal>. Nix can only build derivations + whose <literal>system</literal> attribute equals the value + specified here. In general, it never makes sense to modify this + value from its default, since you can use it to ‘lie’ about the + platform you are building on (e.g., perform a Mac OS build on a + Linux machine; the result would obviously be wrong). It only + makes sense if the Nix binaries can run on multiple platforms, + e.g., ‘universal binaries’ that run on <literal>x86_64-linux</literal> and + <literal>i686-linux</literal>.</para> + + <para>It defaults to the canonical Nix system name detected by + <filename>configure</filename> at build time.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-timeout"><term><literal>timeout</literal></term> + + <listitem> + + <para>This option defines the maximum number of seconds that a + builder can run. This is useful (for instance in an automated + build system) to catch builds that are stuck in an infinite loop + but keep writing to their standard output or standard error. It + can be overridden using the <option + linkend="opt-timeout">--timeout</option> command line + switch.</para> + + <para>The value <literal>0</literal> means that there is no + timeout. This is also the default.</para> + + </listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-trusted-public-keys"><term><literal>trusted-public-keys</literal></term> + + <listitem><para>A whitespace-separated list of public keys. When + paths are copied from another Nix store (such as a binary cache), + they must be signed with one of these keys. For example: + <literal>cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= + hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=</literal>.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-trusted-substituters"><term><literal>trusted-substituters</literal></term> + + <listitem><para>A list of URLs of substituters, separated by + whitespace. These are not used by default, but can be enabled by + users of the Nix daemon by specifying <literal>--option + substituters <replaceable>urls</replaceable></literal> on the + command line. Unprivileged users are only allowed to pass a + subset of the URLs listed in <literal>substituters</literal> and + <literal>trusted-substituters</literal>.</para></listitem> + + </varlistentry> + + + <varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term> + + <listitem> + + <para>A list of names of users (separated by whitespace) that + have additional rights when connecting to the Nix daemon, such + as the ability to specify additional binary caches, or to import + unsigned NARs. You can also specify groups by prefixing them + with <literal>@</literal>; for instance, + <literal>@wheel</literal> means all users in the + <literal>wheel</literal> group. The default is + <literal>root</literal>.</para> + + <warning><para>Adding a user to <option>trusted-users</option> + is essentially equivalent to giving that user root access to the + system. For example, the user can set + <option>sandbox-paths</option> and thereby obtain read access to + directories that are otherwise inacessible to + them.</para></warning> + + </listitem> + + </varlistentry> + </variablelist> </para> diff --git a/doc/manual/command-ref/env-common.xml b/doc/manual/command-ref/env-common.xml index a83aeaf2e575..361d3e2b0330 100644 --- a/doc/manual/command-ref/env-common.xml +++ b/doc/manual/command-ref/env-common.xml @@ -154,6 +154,8 @@ $ mount -o bind /mnt/otherdisk/nix /nix</screen> <literal>daemon</literal> if you want to use the Nix daemon to execute Nix operations. This is necessary in <link linkend="ssec-multi-user">multi-user Nix installations</link>. + If the Nix daemon's Unix socket is at some non-standard path, + this variable should be set to <literal>unix://path/to/socket</literal>. Otherwise, it should be left unset.</para></listitem> </varlistentry> diff --git a/doc/manual/command-ref/nix-build.xml b/doc/manual/command-ref/nix-build.xml index d6b2e5e5adb7..40fe7a43f10c 100644 --- a/doc/manual/command-ref/nix-build.xml +++ b/doc/manual/command-ref/nix-build.xml @@ -29,8 +29,6 @@ </group> <replaceable>attrPath</replaceable> </arg> - <arg><option>--drv-link</option> <replaceable>drvlink</replaceable></arg> - <arg><option>--add-drv-link</option></arg> <arg><option>--no-out-link</option></arg> <arg> <group choice='req'> @@ -91,25 +89,6 @@ also <xref linkend="sec-common-options" />.</phrase></para> <variablelist> - <varlistentry><term><option>--drv-link</option> <replaceable>drvlink</replaceable></term> - - <listitem><para>Add a symlink named - <replaceable>drvlink</replaceable> to the store derivation - produced by <command>nix-instantiate</command>. The derivation is - a root of the garbage collector until the symlink is deleted or - renamed. If there are multiple derivations, numbers are suffixed - to <replaceable>drvlink</replaceable> to distinguish between - them.</para></listitem> - - </varlistentry> - - <varlistentry><term><option>--add-drv-link</option></term> - - <listitem><para>Shorthand for <option>--drv-link</option> - <filename>./derivation</filename>.</para></listitem> - - </varlistentry> - <varlistentry><term><option>--no-out-link</option></term> <listitem><para>Do not create a symlink to the output path. Note diff --git a/doc/manual/command-ref/nix-channel.xml b/doc/manual/command-ref/nix-channel.xml index 9acf44e52984..ff4021a765e0 100644 --- a/doc/manual/command-ref/nix-channel.xml +++ b/doc/manual/command-ref/nix-channel.xml @@ -31,7 +31,7 @@ <refsection><title>Description</title> -<para>A Nix channel is mechanism that allows you to automatically stay +<para>A Nix channel is a mechanism that allows you to automatically stay up-to-date with a set of pre-built Nix expressions. A Nix channel is just a URL that points to a place containing both a set of Nix expressions and a pointer to a binary cache. <phrase @@ -165,8 +165,8 @@ following files:</para> <varlistentry><term><filename>nixexprs.tar.xz</filename></term> <listitem><para>A tarball containing Nix expressions and files - referenced by them (such as build scripts and patches). At - top-level, the tarball should contain a single directory. That + referenced by them (such as build scripts and patches). At the + top level, the tarball should contain a single directory. That directory must contain a file <filename>default.nix</filename> that serves as the channel’s “entry point”.</para></listitem> @@ -175,7 +175,7 @@ following files:</para> <varlistentry><term><filename>binary-cache-url</filename></term> <listitem><para>A file containing the URL to a binary cache (such - as <uri>https://cache.nixos.org</uri>. Nix will automatically + as <uri>https://cache.nixos.org</uri>). Nix will automatically check this cache for pre-built binaries, if the user has sufficient rights to add binary caches. For instance, in a multi-user Nix setup, the binary caches provided by the channels diff --git a/doc/manual/command-ref/nix-collect-garbage.xml b/doc/manual/command-ref/nix-collect-garbage.xml index 35a78c5b2015..43e06879691c 100644 --- a/doc/manual/command-ref/nix-collect-garbage.xml +++ b/doc/manual/command-ref/nix-collect-garbage.xml @@ -22,12 +22,6 @@ <arg><option>--delete-old</option></arg> <arg><option>-d</option></arg> <arg><option>--delete-older-than</option> <replaceable>period</replaceable></arg> - <group choice='opt'> - <arg choice='plain'><option>--print-roots</option></arg> - <arg choice='plain'><option>--print-live</option></arg> - <arg choice='plain'><option>--print-dead</option></arg> - <arg choice='plain'><option>--delete</option></arg> - </group> <arg><option>--max-freed</option> <replaceable>bytes</replaceable></arg> <arg><option>--dry-run</option></arg> </cmdsynopsis> diff --git a/doc/manual/command-ref/nix-env.xml b/doc/manual/command-ref/nix-env.xml index d4563ac47551..eac7739558be 100644 --- a/doc/manual/command-ref/nix-env.xml +++ b/doc/manual/command-ref/nix-env.xml @@ -456,7 +456,7 @@ $ nix-env -f ~/foo.nix -i '.*'</screen> from another profile: <screen> -$ nix-env -i --from-profile /nix/var/nix/profiles/foo -i gcc</screen> +$ nix-env -i --from-profile /nix/var/nix/profiles/foo gcc</screen> </para> diff --git a/doc/manual/command-ref/nix-hash.xml b/doc/manual/command-ref/nix-hash.xml index b4b509773d33..80263e18e339 100644 --- a/doc/manual/command-ref/nix-hash.xml +++ b/doc/manual/command-ref/nix-hash.xml @@ -44,7 +44,9 @@ cryptographic hash of the contents of each <replaceable>path</replaceable> and prints it on standard output. By default, it computes an MD5 hash, but other hash algorithms are -available as well. The hash is printed in hexadecimal.</para> +available as well. The hash is printed in hexadecimal. To generate +the same hash as <command>nix-prefetch-url</command> you have to +specify multiple arguments, see below for an example.</para> <para>The hash is computed over a <emphasis>serialisation</emphasis> of each path: a dump of the file system tree rooted at the path. This @@ -122,6 +124,15 @@ cryptographic hash as <literal>nix-store --dump <refsection><title>Examples</title> +<para>Computing the same hash as <command>nix-prefetch-url</command>: +<screen> +$ nix-prefetch-url file://<(echo test) +1lkgqb6fclns49861dwk9rzb6xnfkxbpws74mxnx01z9qyv1pjpj +$ nix-hash --type sha256 --flat --base32 <(echo test) +1lkgqb6fclns49861dwk9rzb6xnfkxbpws74mxnx01z9qyv1pjpj +</screen> +</para> + <para>Computing hashes: <screen> diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml index a5f615b0c268..5fff64a18f93 100644 --- a/doc/manual/command-ref/nix-store.xml +++ b/doc/manual/command-ref/nix-store.xml @@ -204,7 +204,7 @@ printed.)</para> with <option>-K</option>, if an output path is not identical to the corresponding output from the previous build, the new output path is left in - <filename>/nix/store/<replaceable>name</replaceable>-check.</filename></para> + <filename>/nix/store/<replaceable>name</replaceable>.check.</filename></para> <para>See also the <option>build-repeat</option> configuration option, which repeats a derivation a number of times and prevents @@ -501,10 +501,11 @@ error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' <arg choice='plain'><option>--referrers</option></arg> <arg choice='plain'><option>--referrers-closure</option></arg> <arg choice='plain'><option>--deriver</option></arg> - <arg choice='plain'><option>--deriver</option></arg> + <arg choice='plain'><option>-d</option></arg> <arg choice='plain'><option>--graph</option></arg> <arg choice='plain'><option>--tree</option></arg> <arg choice='plain'><option>--binding</option> <replaceable>name</replaceable></arg> + <arg choice='plain'><option>-b</option> <replaceable>name</replaceable></arg> <arg choice='plain'><option>--hash</option></arg> <arg choice='plain'><option>--size</option></arg> <arg choice='plain'><option>--roots</option></arg> @@ -642,6 +643,7 @@ query is applied to the target of the symlink.</para> </varlistentry> <varlistentry><term><option>--deriver</option></term> + <term><option>-d</option></term> <listitem><para>Prints the <link linkend="gloss-deriver">deriver</link> of the store paths @@ -678,6 +680,7 @@ query is applied to the target of the symlink.</para> </varlistentry> <varlistentry><term><option>--binding</option> <replaceable>name</replaceable></term> + <term><option>-b</option> <replaceable>name</replaceable></term> <listitem><para>Prints the value of the attribute <replaceable>name</replaceable> (i.e., environment variable) of diff --git a/doc/manual/command-ref/opt-common-syn.xml b/doc/manual/command-ref/opt-common-syn.xml index 3aff4e1b6357..b610b54b9620 100644 --- a/doc/manual/command-ref/opt-common-syn.xml +++ b/doc/manual/command-ref/opt-common-syn.xml @@ -9,6 +9,9 @@ </group> </arg> <arg> + <arg choice='plain'><option>--quiet</option></arg> +</arg> +<arg> <group choice='plain'> <arg choice='plain'><option>--no-build-output</option></arg> <arg choice='plain'><option>-Q</option></arg> @@ -47,7 +50,6 @@ </arg> <arg><option>--fallback</option></arg> <arg><option>--readonly-mode</option></arg> -<arg><option>--show-trace</option></arg> <arg> <option>-I</option> <replaceable>path</replaceable> diff --git a/doc/manual/command-ref/opt-common.xml b/doc/manual/command-ref/opt-common.xml index 32d53c753a22..4c572e129445 100644 --- a/doc/manual/command-ref/opt-common.xml +++ b/doc/manual/command-ref/opt-common.xml @@ -75,6 +75,23 @@ </varlistentry> +<varlistentry><term><option>--quiet</option></term> + + <listitem> + + <para>Decreases the level of verbosity of diagnostic messages + printed on standard error. This is the inverse option to + <option>-v</option> / <option>--verbose</option>. + </para> + + <para>This option may be specified repeatedly. See the previous + verbosity levels list.</para> + + </listitem> + +</varlistentry> + + <varlistentry><term><option>--no-build-output</option> / <option>-Q</option></term> <listitem><para>By default, output written by builders to standard @@ -301,13 +318,6 @@ </varlistentry> -<varlistentry><term><option>--show-trace</option></term> - - <listitem><para>Causes Nix to print out a stack trace in case of Nix - expression evaluation errors.</para></listitem> - -</varlistentry> - <varlistentry xml:id="opt-I"><term><option>-I</option> <replaceable>path</replaceable></term> diff --git a/doc/manual/expressions/advanced-attributes.xml b/doc/manual/expressions/advanced-attributes.xml index f3cf98371302..dfd013b5cf31 100644 --- a/doc/manual/expressions/advanced-attributes.xml +++ b/doc/manual/expressions/advanced-attributes.xml @@ -112,7 +112,13 @@ impureEnvVars = [ "http_proxy" "https_proxy" <replaceable>...</replaceable> ]; linkend="fixed-output-drvs">fixed-output derivations</link>, where impurities such as these are okay since (the hash of) the output is known in advance. It is ignored for all other - derivations.</para></listitem> + derivations.</para> + + <warning><para><varname>impureEnvVars</varname> implementation takes + environment variables from the current builder process. When a daemon is + building its environmental variables are used. Without the daemon, the + environmental variables come from the environment of the + <command>nix-build</command>.</para></warning></listitem> </varlistentry> diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml index 5e88b88561fc..5489fab0026a 100644 --- a/doc/manual/expressions/builtins.xml +++ b/doc/manual/expressions/builtins.xml @@ -92,6 +92,36 @@ available as <function>builtins.derivation</function>.</para> </varlistentry> + <varlistentry><term><function>builtins.bitAnd</function> + <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> + + <listitem><para>Return the bitwise AND of the integers + <replaceable>e1</replaceable> and + <replaceable>e2</replaceable>.</para></listitem> + + </varlistentry> + + + <varlistentry><term><function>builtins.bitOr</function> + <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> + + <listitem><para>Return the bitwise OR of the integers + <replaceable>e1</replaceable> and + <replaceable>e2</replaceable>.</para></listitem> + + </varlistentry> + + + <varlistentry><term><function>builtins.bitXor</function> + <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> + + <listitem><para>Return the bitwise XOR of the integers + <replaceable>e1</replaceable> and + <replaceable>e2</replaceable>.</para></listitem> + + </varlistentry> + + <varlistentry><term><varname>builtins</varname></term> <listitem><para>The set <varname>builtins</varname> contains all @@ -126,6 +156,17 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting> </varlistentry> + <varlistentry><term><function>builtins.splitVersion</function> + <replaceable>s</replaceable></term> + + <listitem><para>Split a string representing a version into its + components, by the same version splitting logic underlying the + version comparison in <link linkend="ssec-version-comparisons"> + <command>nix-env -u</command></link>.</para></listitem> + + </varlistentry> + + <varlistentry><term><function>builtins.concatLists</function> <replaceable>lists</replaceable></term> @@ -134,6 +175,14 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting> </varlistentry> + <varlistentry><term><function>builtins.concatStringsSep</function> + <replaceable>separator</replaceable> <replaceable>list</replaceable></term> + + <listitem><para>Concatenate a list of strings with a separator + between each element, e.g. <literal>concatStringsSep "/" + ["usr" "local" "bin"] == "usr/local/bin"</literal></para></listitem> + + </varlistentry> <varlistentry xml:id='builtin-currentSystem'><term><varname>builtins.currentSystem</varname></term> @@ -288,6 +337,61 @@ stdenv.mkDerivation { … } </varlistentry> + <varlistentry> + <term> + <function>builtins.fetchGit</function> + <replaceable>args</replaceable> + </term> + + <listitem> + <para> + Fetch a path from git. <replaceable>args</replaceable> can be + a URL, in which case the HEAD of the repo at that URL is + fetched. Otherwise, it can be an attribute with the following + attributes (all except <varname>url</varname> optional): + </para> + + <variablelist> + <varlistentry> + <term>url</term> + <listitem> + <para> + The URL of the repo. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>name</term> + <listitem> + <para> + The name of the directory the repo should be exported to + in the store. Defaults to the basename of the URL. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>rev</term> + <listitem> + <para> + The git revision to fetch. Defaults to the tip of + <varname>ref</varname>. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>ref</term> + <listitem> + <para> + The git ref to look for the requested revision under. + This is often a branch or tag name. Defaults to + <literal>HEAD</literal>. + </para> + </listitem> + </varlistentry> + </variablelist> + </listitem> + </varlistentry> + <varlistentry><term><function>builtins.filter</function> <replaceable>f</replaceable> <replaceable>xs</replaceable></term> @@ -300,8 +404,9 @@ stdenv.mkDerivation { … } </varlistentry> - <varlistentry><term><function>builtins.filterSource</function> - <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> + <varlistentry xml:id='builtin-filterSource'> + <term><function>builtins.filterSource</function> + <replaceable>e1</replaceable> <replaceable>e2</replaceable></term> <listitem> @@ -760,6 +865,75 @@ Evaluates to <literal>[ "foo" ]</literal>. </varlistentry> + <varlistentry> + <term> + <function>builtins.path</function> + <replaceable>args</replaceable> + </term> + + <listitem> + <para> + An enrichment of the built-in path type, based on the attributes + present in <replaceable>args</replaceable>. All are optional + except <varname>path</varname>: + </para> + + <variablelist> + <varlistentry> + <term>path</term> + <listitem> + <para>The underlying path.</para> + </listitem> + </varlistentry> + <varlistentry> + <term>name</term> + <listitem> + <para> + The name of the path when added to the store. This can + used to reference paths that have nix-illegal characters + in their names, like <literal>@</literal>. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>filter</term> + <listitem> + <para> + A function of the type expected by + <link linkend="builtin-filterSource">builtins.filterSource</link>, + with the same semantics. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>recursive</term> + <listitem> + <para> + When <literal>false</literal>, when + <varname>path</varname> is added to the store it is with a + flat hash, rather than a hash of the NAR serialization of + the file. Thus, <varname>path</varname> must refer to a + regular file, not a directory. This allows similar + behavior to <literal>fetchurl</literal>. Defaults to + <literal>true</literal>. + </para> + </listitem> + </varlistentry> + <varlistentry> + <term>sha256</term> + <listitem> + <para> + When provided, this is the expected hash of the file at + the path. Evaluation will fail if the hash is incorrect, + and providing a hash allows + <literal>builtins.path</literal> to be used even when the + <literal>pure-eval</literal> nix config option is on. + </para> + </listitem> + </varlistentry> + </variablelist> + </listitem> + </varlistentry> <varlistentry><term><function>builtins.pathExists</function> <replaceable>path</replaceable></term> @@ -1059,7 +1233,10 @@ in foo</programlisting> This is not allowed because it would cause a cyclic dependency in the computation of the cryptographic hashes for - <varname>foo</varname> and <varname>bar</varname>.</para></listitem> + <varname>foo</varname> and <varname>bar</varname>.</para> + <para>It is also not possible to reference the result of a derivation. + If you are using Nixpkgs, the <literal>writeTextFile</literal> function is able to + do that.</para></listitem> </varlistentry> diff --git a/doc/manual/expressions/debug-build.xml b/doc/manual/expressions/debug-build.xml deleted file mode 100644 index 0c1f4e6719b2..000000000000 --- a/doc/manual/expressions/debug-build.xml +++ /dev/null @@ -1,34 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="sec-debug-build"> - -<title>Debugging Build Failures</title> - -<para>At the beginning of each phase of the build (such as unpacking, -building or installing), the set of all shell variables is written to -the file <filename>env-vars</filename> at the top-level build -directory. This is useful for debugging: it allows you to recreate -the environment in which a build was performed. For instance, if a -build fails, then assuming you used the <option>-K</option> flag, you -can go to the output directory and <quote>switch</quote> to the -environment of the builder: - -<screen> -$ nix-build -K ./foo.nix -... fails, keeping build directory `/tmp/nix-1234-0' - -$ cd /tmp/nix-1234-0 - -$ source env-vars - -<lineannotation>(edit some files...)</lineannotation> - -$ make - -<lineannotation>(execution continues with the same GCC, make, etc.)</lineannotation></screen> - -</para> - -</section> diff --git a/doc/manual/expressions/language-constructs.xml b/doc/manual/expressions/language-constructs.xml index 2f0027d479cd..47d95f8a13e3 100644 --- a/doc/manual/expressions/language-constructs.xml +++ b/doc/manual/expressions/language-constructs.xml @@ -61,7 +61,7 @@ evaluates to <literal>"foobar"</literal>. <simplesect><title>Inheriting attributes</title> -<para>When defining a set it is often convenient to copy variables +<para>When defining a set or in a let-expression it is often convenient to copy variables from the surrounding lexical scope (e.g., when you want to propagate attributes). This can be shortened using the <literal>inherit</literal> keyword. For instance, @@ -72,7 +72,15 @@ let x = 123; in y = 456; }</programlisting> -evaluates to <literal>{ x = 123; y = 456; }</literal>. (Note that +is equivalent to + +<programlisting> +let x = 123; in +{ x = x; + y = 456; +}</programlisting> + +and both evaluate to <literal>{ x = 123; y = 456; }</literal>. (Note that this works because <varname>x</varname> is added to the lexical scope by the <literal>let</literal> construct.) It is also possible to inherit attributes from another set. For instance, in this fragment @@ -101,6 +109,26 @@ variables from the surrounding scope (<varname>fetchurl</varname> <varname>libXaw</varname> (the X Athena Widgets) from the <varname>xlibs</varname> (X11 client-side libraries) set.</para> +<para> +Summarizing the fragment + +<programlisting> +... +inherit x y z; +inherit (src-set) a b c; +...</programlisting> + +is equivalent to + +<programlisting> +... +x = x; y = y; z = z; +a = src-set.a; b = src-set.b; c = src-set.c; +...</programlisting> + +when used while defining local variables in a let-expression or +while defining a set.</para> + </simplesect> diff --git a/doc/manual/expressions/simple-building-testing.xml b/doc/manual/expressions/simple-building-testing.xml index bd3901a13351..0348c082b205 100644 --- a/doc/manual/expressions/simple-building-testing.xml +++ b/doc/manual/expressions/simple-building-testing.xml @@ -81,6 +81,4 @@ Just pass the option <link linkend='opt-max-jobs'><option>-j in parallel, or set. Typically this should be the number of CPUs.</para> -<xi:include href="debug-build.xml" /> - </section> diff --git a/doc/manual/glossary/glossary.xml b/doc/manual/glossary/glossary.xml index e0636044cc25..4977825578f1 100644 --- a/doc/manual/glossary/glossary.xml +++ b/doc/manual/glossary/glossary.xml @@ -85,29 +85,48 @@ <glossentry xml:id="gloss-reference"><glossterm>reference</glossterm> - <glossdef><para>A store path <varname>P</varname> is said to have a - reference to a store path <varname>Q</varname> if the store object - at <varname>P</varname> contains the path <varname>Q</varname> - somewhere. This implies than an execution involving - <varname>P</varname> potentially needs <varname>Q</varname> to be - present. The <emphasis>references</emphasis> of a store path are - the set of store paths to which it has a reference.</para></glossdef> + <glossdef> + <para>A store path <varname>P</varname> is said to have a + reference to a store path <varname>Q</varname> if the store object + at <varname>P</varname> contains the path <varname>Q</varname> + somewhere. The <emphasis>references</emphasis> of a store path are + the set of store paths to which it has a reference. + </para> + <para>A derivation can reference other derivations and sources + (but not output paths), whereas an output path only references other + output paths. + </para> + </glossdef> </glossentry> +<glossentry xml:id="gloss-reachable"><glossterm>reachable</glossterm> + + <glossdef><para>A store path <varname>Q</varname> is reachable from + another store path <varname>P</varname> if <varname>Q</varname> is in the + <link linkend="gloss-closure">closure</link> of the + <link linkend="gloss-reference">references</link> relation. + </para></glossdef> +</glossentry> <glossentry xml:id="gloss-closure"><glossterm>closure</glossterm> <glossdef><para>The closure of a store path is the set of store paths that are directly or indirectly “reachable” from that store path; that is, it’s the closure of the path under the <link - linkend="gloss-reference">references</link> relation. For instance, - if the store object at path <varname>P</varname> contains a - reference to path <varname>Q</varname>, then <varname>Q</varname> is - in the closure of <varname>P</varname>. For correct deployment it - is necessary to deploy whole closures, since otherwise at runtime - files could be missing. The command <command>nix-store - -qR</command> prints out closures of store paths.</para></glossdef> + linkend="gloss-reference">references</link> relation. For a package, the + closure of its derivation is equivalent to the build-time + dependencies, while the closure of its output path is equivalent to its + runtime dependencies. For correct deployment it is necessary to deploy whole + closures, since otherwise at runtime files could be missing. The command + <command>nix-store -qR</command> prints out closures of store paths. + </para> + <para>As an example, if the store object at path <varname>P</varname> contains + a reference to path <varname>Q</varname>, then <varname>Q</varname> is + in the closure of <varname>P</varname>. Further, if <varname>Q</varname> + references <varname>R</varname> then <varname>R</varname> is also in + the closure of <varname>P</varname>. + </para></glossdef> </glossentry> @@ -147,7 +166,7 @@ linkend="sec-profiles" />.</para> </glossdef> - + </glossentry> diff --git a/doc/manual/installation/installing-binary.xml b/doc/manual/installation/installing-binary.xml index 24e76eafeb18..7e8dfb0db3d4 100644 --- a/doc/manual/installation/installing-binary.xml +++ b/doc/manual/installation/installing-binary.xml @@ -79,16 +79,6 @@ alice$ ./install </para> -<para>Nix can be uninstalled using <command>rpm -e nix</command> or -<command>dpkg -r nix</command> on RPM- and Dpkg-based systems, -respectively. After this you should manually remove the Nix store and -other auxiliary data, if desired: - -<screen> -$ rm -rf /nix</screen> - -</para> - <para>You can uninstall Nix simply by running: <screen> diff --git a/doc/manual/installation/multi-user.xml b/doc/manual/installation/multi-user.xml index a13e3c89be78..69ae1ef27041 100644 --- a/doc/manual/installation/multi-user.xml +++ b/doc/manual/installation/multi-user.xml @@ -52,34 +52,6 @@ This creates 10 build users. There can never be more concurrent builds than the number of build users, so you may want to increase this if you expect to do many builds at the same time.</para> -<para>On macOS, you can create the required group and users by -running the following script: - -<programlisting> -#! /bin/bash -e - -dseditgroup -o create nixbld -q - -gid=$(dscl . -read /Groups/nixbld | awk '($1 == "PrimaryGroupID:") {print $2 }') - -echo "created nixbld group with gid $gid" - -for i in $(seq 1 10); do - user=/Users/nixbld$i - uid="$((30000 + $i))" - dscl . create $user - dscl . create $user RealName "Nix build user $i" - dscl . create $user PrimaryGroupID "$gid" - dscl . create $user UserShell /usr/bin/false - dscl . create $user NFSHomeDirectory /var/empty - dscl . create $user UniqueID "$uid" - dseditgroup -o edit -a nixbld$i -t user nixbld - echo "created nixbld$i user with uid $uid" -done -</programlisting> - -</para> - </simplesect> diff --git a/doc/manual/installation/prerequisites-source.xml b/doc/manual/installation/prerequisites-source.xml index 49660c36e397..01e9688d635f 100644 --- a/doc/manual/installation/prerequisites-source.xml +++ b/doc/manual/installation/prerequisites-source.xml @@ -9,6 +9,9 @@ <itemizedlist> <listitem><para>GNU Make.</para></listitem> + + <listitem><para>Bash Shell. The <literal>./configure</literal> script + relies on bashisms, so Bash is required.</para></listitem> <listitem><para>A version of GCC or Clang that supports C++14.</para></listitem> @@ -28,6 +31,14 @@ distribution does not provide these, you can obtain bzip2 from <link xlink:href="http://www.bzip.org/"/>.</para></listitem> + <listitem><para><literal>liblzma</literal>, which is provided by + XZ Utils. If your distribution does not provide this, you can + get it from <link xlink:href="https://tukaani.org/xz/"/>.</para></listitem> + + <listitem><para>cURL and its library. If your distribution does not + provide it, you can get it from <link + xlink:href="https://curl.haxx.se/"/>.</para></listitem> + <listitem><para>The SQLite embedded database library, version 3.6.19 or higher. If your distribution does not provide it, please install it from <link xlink:href="http://www.sqlite.org/" />.</para></listitem> diff --git a/doc/manual/installation/supported-platforms.xml b/doc/manual/installation/supported-platforms.xml index a468a5640637..6858573ff407 100644 --- a/doc/manual/installation/supported-platforms.xml +++ b/doc/manual/installation/supported-platforms.xml @@ -33,7 +33,4 @@ </para> -<para>Nix is fairly portable, so it should work on most platforms that -support POSIX threads and have a C++11 compiler.</para> - </chapter> diff --git a/doc/manual/introduction/about-nix.xml b/doc/manual/introduction/about-nix.xml index 7cb124033f3e..e8c0a29753a1 100644 --- a/doc/manual/introduction/about-nix.xml +++ b/doc/manual/introduction/about-nix.xml @@ -60,7 +60,8 @@ This is because tools such as compilers don’t search in per-packages directories such as <filename>/nix/store/5lbfaxb722zp…-openssl-0.9.8d/include</filename>, so if a package builds correctly on your system, this is because you -specified the dependency explicitly.</para> +specified the dependency explicitly. This takes care of the build-time +dependencies.</para> <para>Once a package is built, runtime dependencies are found by scanning binaries for the hash parts of Nix store paths (such as @@ -262,7 +263,7 @@ xlink:href="http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html">GNU LGPLv2.1 or (at your option) any later version</link>.</para> <para>Nix uses the <link -xlink:href="https://github.com/antirez/linenoise">linenoise +xlink:href="https://github.com/arangodb/linenoise-ng">linenoise-ng library</link>, which has the following license:</para> <programlisting><xi:include href="../../../src/linenoise/LICENSE" parse="text" /></programlisting> diff --git a/doc/manual/introduction/quick-start.xml b/doc/manual/introduction/quick-start.xml index aa239b7538b4..1ce6c8d50a1b 100644 --- a/doc/manual/introduction/quick-start.xml +++ b/doc/manual/introduction/quick-start.xml @@ -15,7 +15,7 @@ to subsequent chapters.</para> <step><para>Install single-user Nix by running the following: <screen> -$ curl https://nixos.org/nix/install | sh +$ bash <(curl https://nixos.org/nix/install) </screen> This will install Nix in <filename>/nix</filename>. The install script diff --git a/doc/manual/manual.xml b/doc/manual/manual.xml index 61205d916993..b408b6817727 100644 --- a/doc/manual/manual.xml +++ b/doc/manual/manual.xml @@ -12,19 +12,14 @@ <firstname>Eelco</firstname> <surname>Dolstra</surname> </personname> - <affiliation> - <orgname>LogicBlox</orgname> - </affiliation> <contrib>Author</contrib> </author> <copyright> - <year>2004-2014</year> + <year>2004-2018</year> <holder>Eelco Dolstra</holder> </copyright> - <date>November 2014</date> - </info> <!-- @@ -41,7 +36,6 @@ <xi:include href="expressions/writing-nix-expressions.xml" /> <xi:include href="advanced-topics/advanced-topics.xml" /> <xi:include href="command-ref/command-ref.xml" /> - <xi:include href="troubleshooting/troubleshooting.xml" /> <xi:include href="glossary/glossary.xml" /> <xi:include href="hacking.xml" /> <xi:include href="release-notes/release-notes.xml" /> diff --git a/doc/manual/packages/garbage-collection.xml b/doc/manual/packages/garbage-collection.xml index 03b8e4c976c1..a1b0ef22a11e 100644 --- a/doc/manual/packages/garbage-collection.xml +++ b/doc/manual/packages/garbage-collection.xml @@ -52,6 +52,14 @@ garbage collector as follows: <screen> $ nix-store --gc</screen> +The behaviour of the gargage collector is affected by the <literal>keep- +derivations</literal> (default: true) and <literal>keep-outputs</literal> +(default: false) options in the Nix configuration file. The defaults will ensure +that all derivations that are not build-time dependencies of garbage collector roots +will be collected but that all output paths that are not runtime dependencies +will be collected. (This is usually what you want, but while you are developing +it may make sense to keep outputs to ensure that rebuild times are quick.) + If you are feeling uncertain, you can also first view what files would be deleted: diff --git a/doc/manual/packages/ssh-substituter.xml b/doc/manual/packages/ssh-substituter.xml index f24f354c4c39..8db3f96625d3 100644 --- a/doc/manual/packages/ssh-substituter.xml +++ b/doc/manual/packages/ssh-substituter.xml @@ -12,7 +12,7 @@ automatically fetching any store paths in Firefox’s closure if they are available on the server <literal>avalon</literal>: <screen> -$ nix-env -i firefox --option ssh-substituter-hosts alice@avalon +$ nix-env -i firefox --substituters ssh://alice@avalon </screen> This works similar to the binary cache substituter that Nix usually @@ -31,7 +31,7 @@ an SSH passphrase interactively. Therefore, you should use installing it into your profile, e.g. <screen> -$ nix-store -r /nix/store/m85bxg…-firefox-34.0.5 --option ssh-substituter-hosts alice@avalon +$ nix-store -r /nix/store/m85bxg…-firefox-34.0.5 --substituters ssh://alice@avalon </screen> This is essentially equivalent to doing diff --git a/doc/manual/release-notes/release-notes.xml b/doc/manual/release-notes/release-notes.xml index c4b14bc5499e..b8392a647af9 100644 --- a/doc/manual/release-notes/release-notes.xml +++ b/doc/manual/release-notes/release-notes.xml @@ -12,7 +12,7 @@ </partintro> --> -<xi:include href="rl-1.12.xml" /> +<xi:include href="rl-2.0.xml" /> <xi:include href="rl-1.11.10.xml" /> <xi:include href="rl-1.11.xml" /> <xi:include href="rl-1.10.xml" /> diff --git a/doc/manual/release-notes/rl-1.12.xml b/doc/manual/release-notes/rl-1.12.xml deleted file mode 100644 index 29943e3e6e97..000000000000 --- a/doc/manual/release-notes/rl-1.12.xml +++ /dev/null @@ -1,426 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="ssec-relnotes-1.12"> - -<title>Release 1.12 (TBA)</title> - -<para>This release has the following new features:</para> - -<itemizedlist> - - <listitem> - <para>Start of new <command>nix</command> command line - interface. This is a work in progress and the interface is subject - to change.</para> - - <itemizedlist> - - <listitem><para>Self-documenting: <option>--help</option> shows - all available command-line arguments.</para></listitem> - - <listitem><para><option>--help-config</option> shows all - configuration options.</para></listitem> - - <listitem><para><command>nix build</command>: Replacement for - <command>nix-build</command>.</para></listitem> - - <listitem><para><command>nix ls-store</command> and <command>nix - ls-nar</command> allow listing the contents of a store path or - NAR file.</para></listitem> - - <listitem><para><command>nix cat-store</command> and - <command>nix cat-nar</command> allow extracting a file from a - store path or NAR file.</para></listitem> - - <listitem><para><command>nix verify</command> checks whether a - store path is unmodified and/or is trusted.</para></listitem> - - <listitem><para><command>nix copy-sigs</command> copies - signatures from one store to another.</para></listitem> - - <listitem><para><command>nix sign-paths</command> signs store - paths.</para></listitem> - - <listitem><para><command>nix copy</command> copies paths between - arbitrary Nix stores, generalising - <command>nix-copy-closure</command> and - <command>nix-push</command>.</para></listitem> - - <listitem><para><command>nix path-info</command> shows - information about store paths.</para></listitem> - - <listitem><para><command>nix run</command> starts a shell in - which the specified packages are available.</para></listitem> - - <listitem><para><command>nix log</command> shows the build log - of a package or path. If the build log is not available locally, - it will try to obtain it from a binary cache.</para></listitem> - - <listitem><para><command>nix eval</command> replaces - <command>nix-instantiate --eval</command>.</para></listitem> - - <listitem><para><command>nix dump-path</command> to get a NAR - from a store path.</para></listitem> - - <listitem><para><command>nix edit</command> opens the source - code of a package in an editor.</para></listitem> - - <listitem><para><command>nix search</command> replaces - <command>nix-env -qa</command>. It searches the available - packages for occurences of a search string in the attribute - name, package name or description. It caches available packages - to speed up searches.</para></listitem> - - <listitem><para><command>nix why-depends</command> (d41c5eb13f4f3a37d80dbc6d3888644170c3b44a).</para></listitem> - - <listitem><para><command>nix show-derivation</command> (e8d6ee7c1b90a2fe6d824f1a875acc56799ae6e2).</para></listitem> - - <listitem><para><command>nix add-to-store</command> (970366266b8df712f5f9cedb45af183ef5a8357f).</para></listitem> - - <listitem><para>Progress indicator.</para></listitem> - - <listitem><para>All options are available as flags now - (b8283773bd64d7da6859ed520ee19867742a03ba).</para></listitem> - - </itemizedlist> - - </listitem> - - <listitem> - <para>The external program <command>nix-repl</command> has been - integrated into Nix as <command>nix repl</command>.</para> - </listitem> - - <listitem> - <para>New build mode <command>nix-build --hash</command> that - builds a derivation, computes the hash of the output, and moves - the output to the store path corresponding to what a fixed-output - derivation with that hash would produce. - (Add docs and examples; see d367b8e7875161e655deaa96bf8a5dd0bcf8229e)</para> - </listitem> - - <listitem> - <para>It is no longer necessary to set the - <envar>NIX_REMOTE</envar> environment variable if you need to use - the Nix daemon. Nix will use the daemon automatically if you don’t - have write access to the Nix database.</para> - </listitem> - - <listitem> - <para>The Nix language now supports floating point numbers. They are - based on regular C++ <literal>float</literal> and compatible with - existing integers and number-related operations. Export and import to and - from JSON and XML works, too.</para> - </listitem> - - <listitem> - <para><command>nix-shell</command> now sets the - <varname>IN_NIX_SHELL</varname> environment variable during - evaluation and in the shell itself. This can be used to perform - different actions depending on whether you’re in a Nix shell or in - a regular build. Nixpkgs provides - <varname>lib.inNixShell</varname> to check this variable during - evaluation. (bb36a1a3cf3fbe6bc9d0afcc5fa0f928bed03170)</para> - </listitem> - - <listitem> - <para>Internal: all <classname>Store</classname> classes are now - thread-safe. <classname>RemoteStore</classname> supports multiple - concurrent connections to the daemon. This is primarily useful in - multi-threaded programs such as - <command>hydra-queue-runner</command>.</para> - </listitem> - - <listitem> - <para>The dependency on Perl has been removed. As a result, some - (obsolete) programs have been removed: <command>nix-push</command> - (replaced by <command>nix copy</command>), - <command>nix-pull</command> (obsoleted by binary caches), - <command>nix-generate-patches</command>, - <command>bsdiff</command>, <command>bspatch</command>.</para> - </listitem> - - <listitem> - <para>Improved store abstraction. Substituters - eliminated. BinaryCacheStore, LocalBinaryCacheStore, - HttpBinaryCacheStore, S3BinaryCacheStore (compile-time - optional), SSHStore. Add docs + examples? - </para> - </listitem> - - <listitem> - <para>Nix now stores signatures for local store - paths. Locally-built paths are now signed automatically using the - secret keys specified by the <option>secret-key-files</option> - store option.</para> - - <para>In addition, store paths that have been built locally are - marked as “ultimately trusted”, and content-addressable store - paths carry a “content-addressability assertion” that allow them - to be trusted without any signatures.</para> - </listitem> - - <listitem> - <para><envar>NIX_PATH</envar> is now lazy, so URIs in the path are - only downloaded if they are needed for evaluation.</para> - </listitem> - - <listitem> - <para>You can now use - <uri>channel:<replaceable>channel-name</replaceable></uri> as a - short-hand for - <uri>https://nixos.org/channels/<replaceable>channel-name</replaceable>/nixexprs.tar.xz</uri>. For - example, <literal>nix-build channel:nixos-15.09 -A hello</literal> - will build the GNU Hello package from the - <literal>nixos-15.09</literal> channel.</para> - </listitem> - - <listitem> - <para>When <option>--no-build-output</option> is given, the last - 10 lines of the build log will be shown if a build - fails.</para> - </listitem> - - <listitem> - <para><function>builtins.fetchGit</function>. - (38539b943a060d9cdfc24d6e5d997c0885b8aa2f)</para> - </listitem> - - <listitem> - <para><literal><nix/fetchurl.nix></literal> now uses the - content-addressable tarball cache at - <uri>http://tarballs.nixos.org/</uri>, just like - <function>fetchurl</function> in - Nixpkgs. (f2682e6e18a76ecbfb8a12c17e3a0ca15c084197)</para> - </listitem> - - <listitem> - <para>Chroot Nix stores: allow the “physical” location of the Nix - store (e.g. <filename>/home/alice/nix/store</filename>) to differ - from its “logical” location (typically - <filename>/nix/store</filename>). This allows non-root users to - use Nix while still getting the benefits from prebuilt binaries - from - <uri>cache.nixos.org</uri>. (4494000e04122f24558e1436e66d20d89028b4bd, - 3eb621750848e0e6b30e5a79f76afbb096bb6c8a)</para> - </listitem> - - <listitem> - <para>On Linux, builds are now executed in a user - namespace with uid 1000 and gid 100.</para> - </listitem> - - <listitem> - <para><function>builtins.fetchurl</function> and - <function>builtins.fetchTarball</function> now support - <varname>sha256</varname> and <varname>name</varname> - attributes.</para> - </listitem> - - <listitem> - <para><literal>HttpBinaryCacheStore</literal> (the replacement of - <command>download-from-binary-cache</command>) now retries - automatically on certain HTTP error codes.</para> - </listitem> - - <listitem> - <para>Derivation attributes can now reference the outputs of the - derivation using the <function>placeholder</function> builtin - function. For example, the attribute - -<programlisting> -configureFlags = "--prefix=${placeholder "out"} --includedir=${placeholder "dev"}"; -</programlisting> - - will cause the <envar>configureFlags</envar> environment variable - to contain the actual store paths corresponding to the - <literal>out</literal> and <literal>dev</literal> outputs. TODO: - add docs.</para> - </listitem> - - <listitem> - <para>Support for HTTP/2. This makes binary cache lookups much - more efficient. (90ad02bf626b885a5dd8967894e2eafc953bdf92)</para> - </listitem> - - <listitem> - <para>The <option>build-sandbox-paths</option> configuration - option can now specify optional paths by appending a - <literal>?</literal>, e.g. <literal>/dev/nvidiactl?</literal> will - bind-mount <varname>/dev/nvidiactl</varname> only if it - exists.</para> - </listitem> - - <listitem> - <para>More support for testing build reproducibility: when - <option>enforce-determinism</option> is set to - <literal>false</literal>, it’s no longer a fatal error build - rounds produce different output - (8bdf83f936adae6f2c907a6d2541e80d4120f051); add a hook to run - diffoscope when build rounds produce different output - (9a313469a4bdea2d1e8df24d16289dc2a172a169w).</para> - </listitem> - - <listitem> - <para>Kill builds as soon as stdout/stderr is closed. This fixes a - bug that allowed builds to hang Nix indefinitely (regardless of - timeouts). (21948deed99a3295e4d5666e027a6ca42dc00b40)</para> - </listitem> - - <listitem> - <para>Add support for passing structured data to builders. TODO: - document. (6de33a9c675b187437a2e1abbcb290981a89ecb1)</para> - </listitem> - - <listitem> - <para><varname>exportReferencesGraph</varname>: Export more - complete info in JSON - format. (c2b0d8749f7e77afc1c4b3e8dd36b7ee9720af4a)</para> - </listitem> - - <listitem> - <para>Support for - netrc. (e6e74f987f0fa284d220432d426eb965269a97d6, - 302386f775eea309679654e5ea7c972fb6e7b9af)</para> - </listitem> - - <listitem> - <para>Support <uri>s3://</uri> URIs in all places where Nix allows - URIs. (9ff9c3f2f80ba4108e9c945bbfda2c64735f987b)</para> - </listitem> - - <listitem> - <para>The <option>build-max-jobs</option> option can be set to - <literal>auto</literal> to use the number of CPUs in the - system. (7251d048fa812d2551b7003bc9f13a8f5d4c95a5)</para> - </listitem> - - <listitem> - <para>Add support for Brotli compression. - <uri>cache.nixos.org</uri> compresses build logs using - Brotli.</para> - </listitem> - - <listitem> - <para>Substitutions from binary caches now require signatures by - default. This was already the case on - NixOS. (ecbc3fedd3d5bdc5a0e1a0a51b29062f2874ac8b)</para> - </listitem> - - <listitem> - <para><command>nix-env</command> now ignores packages with bad - derivation names (in particular those starting with a digit or - containing a - dot). (b0cb11722626e906a73f10dd9a0c9eea29faf43a)</para> - </listitem> - - <listitem> - <para>Renamed various configuration options. (TODO: in progress)</para> - </listitem> - - <listitem> - <para>Remote machines can now be specified on the command - line. TODO: - document. (1a68710d4dff609bbaf61db3e17a2573f0aadf17)</para> - </listitem> - - <listitem> - <para>In Linux sandbox builds, we now use - <filename>/build</filename> instead of <filename>/tmp</filename> - as the temporary build directory. This fixes potential security - problems when a build accidentally stores its - <envar>TMPDIR</envar> in some critical place, such as an - RPATH. (eba840c8a13b465ace90172ff76a0db2899ab11b)</para> - </listitem> - - <listitem> - <para>In Linux sandbox builds, we now provide a default - <filename>/bin/sh</filename> (namely <filename>ash</filename> from - BusyBox). (a2d92bb20e82a0957067ede60e91fab256948b41)</para> - </listitem> - - <listitem> - <para>Make all configuration options available as command line - flags (b8283773bd64d7da6859ed520ee19867742a03ba).</para> - </listitem> - - <listitem> - <para>Support base-64 - hashes. (c0015e87af70f539f24d2aa2bc224a9d8b84276b)</para> - </listitem> - - <listitem> - <para><command>nix-shell</command> now uses - <varname>bashInteractive</varname> from Nixpkgs, rather than the - <command>bash</command> command that happens to be in the caller’s - <envar>PATH</envar>. This is especially important on macOS where - the <command>bash</command> provided by the system is seriously - outdated and cannot execute <literal>stdenv</literal>’s setup - script.</para> - </listitem> - - <listitem> - <para>New builtin functions: <function>builtins.split</function> - (b8867a0239b1930a16f9ef3f7f3e864b01416dff), - <function>builtins.partition</function>.</para> - </listitem> - - <listitem> - <para>Automatic garbage collection.</para> - </listitem> - - <listitem> - <para><command>nix-store -q --roots</command> and - <command>nix-store --gc --print-roots</command> now show temporary - and in-memory roots.</para> - </listitem> - - <listitem> - <para>Builders can now communicate what build phase they are in by - writing messages to the file descriptor specified in - <envar>NIX_LOG_FD</envar>. (88e6bb76de5564b3217be9688677d1c89101b2a3) - </para> - </listitem> - -</itemizedlist> - -<para>Some features were removed:</para> - -<itemizedlist> - - <listitem> - <para>“Nested” log output. As a result, - <command>nix-log2xml</command> was also removed.</para> - </listitem> - - <listitem> - <para>OpenSSL-based signing. (f435f8247553656774dd1b2c88e9de5d59cab203)</para> - </listitem> - - <listitem> - <para>Caching of failed - builds. (8cffec84859cec8b610a2a22ab0c4d462a9351ff)</para> - </listitem> - - <listitem> - <para><filename>nix-mode.el</filename> has been removed from - Nix. It is now a separate repository in - <uri>https://github.com/NixOS/nix-mode</uri> and can be installed - through the MELPA package repository.</para> - </listitem> - - <listitem> - <para>In restricted evaluation mode - (<option>--restrict-eval</option>), builtin functions that - download from the network (such as <function>fetchGit</function>) - are permitted to fetch underneath the list of URI prefixes - specified in the option <option>allowed-uris</option>.</para> - </listitem> - -</itemizedlist> - -<para>This release has contributions from TBD.</para> - -</section> diff --git a/doc/manual/release-notes/rl-2.0.xml b/doc/manual/release-notes/rl-2.0.xml new file mode 100644 index 000000000000..fc9a77b08b60 --- /dev/null +++ b/doc/manual/release-notes/rl-2.0.xml @@ -0,0 +1,1012 @@ +<section xmlns="http://docbook.org/ns/docbook" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:xi="http://www.w3.org/2001/XInclude" + version="5.0" + xml:id="ssec-relnotes-2.0"> + +<title>Release 2.0 (2018-02-22)</title> + +<para>The following incompatible changes have been made:</para> + +<itemizedlist> + + <listitem> + <para>The manifest-based substituter mechanism + (<command>download-using-manifests</command>) has been <link + xlink:href="https://github.com/NixOS/nix/commit/867967265b80946dfe1db72d40324b4f9af988ed">removed</link>. It + has been superseded by the binary cache substituter mechanism + since several years. As a result, the following programs have been + removed: + + <itemizedlist> + <listitem><para><command>nix-pull</command></para></listitem> + <listitem><para><command>nix-generate-patches</command></para></listitem> + <listitem><para><command>bsdiff</command></para></listitem> + <listitem><para><command>bspatch</command></para></listitem> + </itemizedlist> + </para> + </listitem> + + <listitem> + <para>The “copy from other stores” substituter mechanism + (<command>copy-from-other-stores</command> and the + <envar>NIX_OTHER_STORES</envar> environment variable) has been + removed. It was primarily used by the NixOS installer to copy + available paths from the installation medium. The replacement is + to use a chroot store as a substituter + (e.g. <literal>--substituters /mnt</literal>), or to build into a + chroot store (e.g. <literal>--store /mnt --substituters /</literal>).</para> + </listitem> + + <listitem> + <para>The command <command>nix-push</command> has been removed as + part of the effort to eliminate Nix's dependency on Perl. You can + use <command>nix copy</command> instead, e.g. <literal>nix copy + --to file:///tmp/my-binary-cache <replaceable>paths…</replaceable></literal></para> + </listitem> + + <listitem> + <para>The “nested” log output feature (<option>--log-type + pretty</option>) has been removed. As a result, + <command>nix-log2xml</command> was also removed.</para> + </listitem> + + <listitem> + <para>OpenSSL-based signing has been <link + xlink:href="https://github.com/NixOS/nix/commit/f435f8247553656774dd1b2c88e9de5d59cab203">removed</link>. This + feature was never well-supported. A better alternative is provided + by the <option>secret-key-files</option> and + <option>trusted-public-keys</option> options.</para> + </listitem> + + <listitem> + <para>Failed build caching has been <link + xlink:href="https://github.com/NixOS/nix/commit/8cffec84859cec8b610a2a22ab0c4d462a9351ff">removed</link>. This + feature was introduced to support the Hydra continuous build + system, but Hydra no longer uses it.</para> + </listitem> + + <listitem> + <para><filename>nix-mode.el</filename> has been removed from + Nix. It is now <link + xlink:href="https://github.com/NixOS/nix-mode">a separate + repository</link> and can be installed through the MELPA package + repository.</para> + </listitem> + +</itemizedlist> + +<para>This release has the following new features:</para> + +<itemizedlist> + + <listitem> + <para>It introduces a new command named <command>nix</command>, + which is intended to eventually replace all + <command>nix-*</command> commands with a more consistent and + better designed user interface. It currently provides replacements + for some (but not all) of the functionality provided by + <command>nix-store</command>, <command>nix-build</command>, + <command>nix-shell -p</command>, <command>nix-env -qa</command>, + <command>nix-instantiate --eval</command>, + <command>nix-push</command> and + <command>nix-copy-closure</command>. It has the following major + features:</para> + + <itemizedlist> + + <listitem> + <para>Unlike the legacy commands, it has a consistent way to + refer to packages and package-like arguments (like store + paths). For example, the following commands all copy the GNU + Hello package to a remote machine: + + <screen>nix copy --to ssh://machine nixpkgs.hello</screen> + <screen>nix copy --to ssh://machine /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10</screen> + <screen>nix copy --to ssh://machine '(with import <nixpkgs> {}; hello)'</screen> + + By contrast, <command>nix-copy-closure</command> only accepted + store paths as arguments.</para> + </listitem> + + <listitem> + <para>It is self-documenting: <option>--help</option> shows + all available command-line arguments. If + <option>--help</option> is given after a subcommand, it shows + examples for that subcommand. <command>nix + --help-config</command> shows all configuration + options.</para> + </listitem> + + <listitem> + <para>It is much less verbose. By default, it displays a + single-line progress indicator that shows how many packages + are left to be built or downloaded, and (if there are running + builds) the most recent line of builder output. If a build + fails, it shows the last few lines of builder output. The full + build log can be retrieved using <command>nix + log</command>.</para> + </listitem> + + <listitem> + <para>It <link + xlink:href="https://github.com/NixOS/nix/commit/b8283773bd64d7da6859ed520ee19867742a03ba">provides</link> + all <filename>nix.conf</filename> configuration options as + command line flags. For example, instead of <literal>--option + http-connections 100</literal> you can write + <literal>--http-connections 100</literal>. Boolean options can + be written as + <literal>--<replaceable>foo</replaceable></literal> or + <literal>--no-<replaceable>foo</replaceable></literal> + (e.g. <option>--no-auto-optimise-store</option>).</para> + </listitem> + + <listitem> + <para>Many subcommands have a <option>--json</option> flag to + write results to stdout in JSON format.</para> + </listitem> + + </itemizedlist> + + <warning><para>Please note that the <command>nix</command> command + is a work in progress and the interface is subject to + change.</para></warning> + + <para>It provides the following high-level (“porcelain”) + subcommands:</para> + + <itemizedlist> + + <listitem> + <para><command>nix build</command> is a replacement for + <command>nix-build</command>.</para> + </listitem> + + <listitem> + <para><command>nix run</command> executes a command in an + environment in which the specified packages are available. It + is (roughly) a replacement for <command>nix-shell + -p</command>. Unlike that command, it does not execute the + command in a shell, and has a flag (<command>-c</command>) + that specifies the unquoted command line to be + executed.</para> + + <para>It is particularly useful in conjunction with chroot + stores, allowing Linux users who do not have permission to + install Nix in <command>/nix/store</command> to still use + binary substitutes that assume + <command>/nix/store</command>. For example, + + <screen>nix run --store ~/my-nix nixpkgs.hello -c hello --greeting 'Hi everybody!'</screen> + + downloads (or if not substitutes are available, builds) the + GNU Hello package into + <filename>~/my-nix/nix/store</filename>, then runs + <command>hello</command> in a mount namespace where + <filename>~/my-nix/nix/store</filename> is mounted onto + <command>/nix/store</command>.</para> + </listitem> + + <listitem> + <para><command>nix search</command> replaces <command>nix-env + -qa</command>. It searches the available packages for + occurrences of a search string in the attribute name, package + name or description. Unlike <command>nix-env -qa</command>, it + has a cache to speed up subsequent searches.</para> + </listitem> + + <listitem> + <para><command>nix copy</command> copies paths between + arbitrary Nix stores, generalising + <command>nix-copy-closure</command> and + <command>nix-push</command>.</para> + </listitem> + + <listitem> + <para><command>nix repl</command> replaces the external + program <command>nix-repl</command>. It provides an + interactive environment for evaluating and building Nix + expressions. Note that it uses <literal>linenoise-ng</literal> + instead of GNU Readline.</para> + </listitem> + + <listitem> + <para><command>nix upgrade-nix</command> upgrades Nix to the + latest stable version. This requires that Nix is installed in + a profile. (Thus it won’t work on NixOS, or if it’s installed + outside of the Nix store.)</para> + </listitem> + + <listitem> + <para><command>nix verify</command> checks whether store paths + are unmodified and/or “trusted” (see below). It replaces + <command>nix-store --verify</command> and <command>nix-store + --verify-path</command>.</para> + </listitem> + + <listitem> + <para><command>nix log</command> shows the build log of a + package or path. If the build log is not available locally, it + will try to obtain it from the configured substituters (such + as <uri>cache.nixos.org</uri>, which now provides build + logs).</para> + </listitem> + + <listitem> + <para><command>nix edit</command> opens the source code of a + package in your editor.</para> + </listitem> + + <listitem> + <para><command>nix eval</command> replaces + <command>nix-instantiate --eval</command>.</para> + </listitem> + + <listitem> + <para><command + xlink:href="https://github.com/NixOS/nix/commit/d41c5eb13f4f3a37d80dbc6d3888644170c3b44a">nix + why-depends</command> shows why one store path has another in + its closure. This is primarily useful to finding the causes of + closure bloat. For example, + + <screen>nix why-depends nixpkgs.vlc nixpkgs.libdrm.dev</screen> + + shows a chain of files and fragments of file contents that + cause the VLC package to have the “dev” output of + <literal>libdrm</literal> in its closure — an undesirable + situation.</para> + </listitem> + + <listitem> + <para><command>nix path-info</command> shows information about + store paths, replacing <command>nix-store -q</command>. A + useful feature is the option <option>--closure-size</option> + (<option>-S</option>). For example, the following command show + the closure sizes of every path in the current NixOS system + closure, sorted by size: + + <screen>nix path-info -rS /run/current-system | sort -nk2</screen> + + </para> + </listitem> + + <listitem> + <para><command>nix optimise-store</command> replaces + <command>nix-store --optimise</command>. The main difference + is that it has a progress indicator.</para> + </listitem> + + </itemizedlist> + + <para>A number of low-level (“plumbing”) commands are also + available:</para> + + <itemizedlist> + + <listitem> + <para><command>nix ls-store</command> and <command>nix + ls-nar</command> list the contents of a store path or NAR + file. The former is primarily useful in conjunction with + remote stores, e.g. + + <screen>nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10</screen> + + lists the contents of path in a binary cache.</para> + </listitem> + + <listitem> + <para><command>nix cat-store</command> and <command>nix + cat-nar</command> allow extracting a file from a store path or + NAR file.</para> + </listitem> + + <listitem> + <para><command>nix dump-path</command> writes the contents of + a store path to stdout in NAR format. This replaces + <command>nix-store --dump</command>.</para> + </listitem> + + <listitem> + <para><command + xlink:href="https://github.com/NixOS/nix/commit/e8d6ee7c1b90a2fe6d824f1a875acc56799ae6e2">nix + show-derivation</command> displays a store derivation in JSON + format. This is an alternative to + <command>pp-aterm</command>.</para> + </listitem> + + <listitem> + <para><command + xlink:href="https://github.com/NixOS/nix/commit/970366266b8df712f5f9cedb45af183ef5a8357f">nix + add-to-store</command> replaces <command>nix-store + --add</command>.</para> + </listitem> + + <listitem> + <para><command>nix sign-paths</command> signs store + paths.</para> + </listitem> + + <listitem> + <para><command>nix copy-sigs</command> copies signatures from + one store to another.</para> + </listitem> + + <listitem> + <para><command>nix show-config</command> shows all + configuration options and their current values.</para> + </listitem> + + </itemizedlist> + + </listitem> + + <listitem> + <para>The store abstraction that Nix has had for a long time to + support store access via the Nix daemon has been extended + significantly. In particular, substituters (which used to be + external programs such as + <command>download-from-binary-cache</command>) are now subclasses + of the abstract <classname>Store</classname> class. This allows + many Nix commands to operate on such store types. For example, + <command>nix path-info</command> shows information about paths in + your local Nix store, while <command>nix path-info --store + https://cache.nixos.org/</command> shows information about paths + in the specified binary cache. Similarly, + <command>nix-copy-closure</command>, <command>nix-push</command> + and substitution are all instances of the general notion of + copying paths between different kinds of Nix stores.</para> + + <para>Stores are specified using an URI-like syntax, + e.g. <uri>https://cache.nixos.org/</uri> or + <uri>ssh://machine</uri>. The following store types are supported: + + <itemizedlist> + + <listitem> + + <para><classname>LocalStore</classname> (stori URI + <literal>local</literal> or an absolute path) and the misnamed + <classname>RemoteStore</classname> (<literal>daemon</literal>) + provide access to a local Nix store, the latter via the Nix + daemon. You can use <literal>auto</literal> or the empty + string to auto-select a local or daemon store depending on + whether you have write permission to the Nix store. It is no + longer necessary to set the <envar>NIX_REMOTE</envar> + environment variable to use the Nix daemon.</para> + + <para>As noted above, <classname>LocalStore</classname> now + supports chroot builds, allowing the “physical” location of + the Nix store + (e.g. <filename>/home/alice/nix/store</filename>) to differ + from its “logical” location (typically + <filename>/nix/store</filename>). This allows non-root users + to use Nix while still getting the benefits from prebuilt + binaries from <uri>cache.nixos.org</uri>.</para> + + </listitem> + + <listitem> + + <para><classname>BinaryCacheStore</classname> is the abstract + superclass of all binary cache stores. It supports writing + build logs and NAR content listings in JSON format.</para> + + </listitem> + + <listitem> + + <para><classname>HttpBinaryCacheStore</classname> + (<literal>http://</literal>, <literal>https://</literal>) + supports binary caches via HTTP or HTTPS. If the server + supports <literal>PUT</literal> requests, it supports + uploading store paths via commands such as <command>nix + copy</command>.</para> + + </listitem> + + <listitem> + + <para><classname>LocalBinaryCacheStore</classname> + (<literal>file://</literal>) supports binary caches in the + local filesystem.</para> + + </listitem> + + <listitem> + + <para><classname>S3BinaryCacheStore</classname> + (<literal>s3://</literal>) supports binary caches stored in + Amazon S3, if enabled at compile time.</para> + + </listitem> + + <listitem> + + <para><classname>LegacySSHStore</classname> (<literal>ssh://</literal>) + is used to implement remote builds and + <command>nix-copy-closure</command>.</para> + + </listitem> + + <listitem> + + <para><classname>SSHStore</classname> + (<literal>ssh-ng://</literal>) supports arbitrary Nix + operations on a remote machine via the same protocol used by + <command>nix-daemon</command>.</para> + + </listitem> + + </itemizedlist> + + </para> + + </listitem> + + <listitem> + + <para>Security has been improved in various ways: + + <itemizedlist> + + <listitem> + <para>Nix now stores signatures for local store + paths. When paths are copied between stores (e.g., copied from + a binary cache to a local store), signatures are + propagated.</para> + + <para>Locally-built paths are signed automatically using the + secret keys specified by the <option>secret-key-files</option> + store option. Secret/public key pairs can be generated using + <command>nix-store + --generate-binary-cache-key</command>.</para> + + <para>In addition, locally-built store paths are marked as + “ultimately trusted”, but this bit is not propagated when + paths are copied between stores.</para> + </listitem> + + <listitem> + <para>Content-addressable store paths no longer require + signatures — they can be imported into a store by unprivileged + users even if they lack signatures.</para> + </listitem> + + <listitem> + <para>The command <command>nix verify</command> checks whether + the specified paths are trusted, i.e., have a certain number + of trusted signatures, are ultimately trusted, or are + content-addressed.</para> + </listitem> + + <listitem> + <para>Substitutions from binary caches <link + xlink:href="https://github.com/NixOS/nix/commit/ecbc3fedd3d5bdc5a0e1a0a51b29062f2874ac8b">now</link> + require signatures by default. This was already the case on + NixOS.</para> + </listitem> + + <listitem> + <para>In Linux sandbox builds, we <link + xlink:href="https://github.com/NixOS/nix/commit/eba840c8a13b465ace90172ff76a0db2899ab11b">now</link> + use <filename>/build</filename> instead of + <filename>/tmp</filename> as the temporary build + directory. This fixes potential security problems when a build + accidentally stores its <envar>TMPDIR</envar> in some + security-sensitive place, such as an RPATH.</para> + </listitem> + + </itemizedlist> + + </para> + + </listitem> + + <listitem> + <para><emphasis>Pure evaluation mode</emphasis>. This is a variant + of the existing restricted evaluation mode. In pure mode, the Nix + evaluator forbids access to anything that could cause different + evaluations of the same command line arguments to produce a + different result. This includes builtin functions such as + <function>builtins.getEnv</function>, but more importantly, + <emphasis>all</emphasis> filesystem or network access unless a + content hash or commit hash is specified. For example, calls to + <function>builtins.fetchGit</function> are only allowed if a + <varname>rev</varname> attribute is specified.</para> + + <para>The goal of this feature is to enable true reproducibility + and traceability of builds (including NixOS system configurations) + at the evaluation level. For example, in the future, + <command>nixos-rebuild</command> might build configurations from a + Nix expression in a Git repository in pure mode. That expression + might fetch other repositories such as Nixpkgs via + <function>builtins.fetchGit</function>. The commit hash of the + top-level repository then uniquely identifies a running system, + and, in conjunction with that repository, allows it to be + reproduced or modified.</para> + + </listitem> + + <listitem> + <para>There are several new features to support binary + reproducibility (i.e. to help ensure that multiple builds of the + same derivation produce exactly the same output). When + <option>enforce-determinism</option> is set to + <literal>false</literal>, it’s <link + xlink:href="https://github.com/NixOS/nix/commit/8bdf83f936adae6f2c907a6d2541e80d4120f051">no + longer</link> a fatal error if build rounds produce different + output. Also, a hook named <option>diff-hook</option> is <link + xlink:href="https://github.com/NixOS/nix/commit/9a313469a4bdea2d1e8df24d16289dc2a172a169">provided</link> + to allow you to run tools such as <command>diffoscope</command> + when build rounds produce different output.</para> + </listitem> + + <listitem> + <para>Configuring remote builds is a lot easier now. Provided you + are not using the Nix daemon, you can now just specify a remote + build machine on the command line, e.g. <literal>--option builders + 'ssh://my-mac x86_64-darwin'</literal>. The environment variable + <envar>NIX_BUILD_HOOK</envar> has been removed and is no longer + needed. The environment variable <envar>NIX_REMOTE_SYSTEMS</envar> + is still supported for compatibility, but it is also possible to + specify builders in <command>nix.conf</command> by setting the + option <literal>builders = + @<replaceable>path</replaceable></literal>.</para> + </listitem> + + <listitem> + <para>If a fixed-output derivation produces a result with an + incorrect hash, the output path is moved to the location + corresponding to the actual hash and registered as valid. Thus, a + subsequent build of the fixed-output derivation with the correct + hash is unnecessary.</para> + </listitem> + + <listitem> + <para><command>nix-shell</command> <link + xlink:href="https://github.com/NixOS/nix/commit/ea59f39326c8e9dc42dfed4bcbf597fbce58797c">now</link> + sets the <varname>IN_NIX_SHELL</varname> environment variable + during evaluation and in the shell itself. This can be used to + perform different actions depending on whether you’re in a Nix + shell or in a regular build. Nixpkgs provides + <varname>lib.inNixShell</varname> to check this variable during + evaluation.</para> + </listitem> + + <listitem> + <para><envar>NIX_PATH</envar> is now lazy, so URIs in the path are + only downloaded if they are needed for evaluation.</para> + </listitem> + + <listitem> + <para>You can now use + <uri>channel:<replaceable>channel-name</replaceable></uri> as a + short-hand for + <uri>https://nixos.org/channels/<replaceable>channel-name</replaceable>/nixexprs.tar.xz</uri>. For + example, <literal>nix-build channel:nixos-15.09 -A hello</literal> + will build the GNU Hello package from the + <literal>nixos-15.09</literal> channel. In the future, this may + use Git to fetch updates more efficiently.</para> + </listitem> + + <listitem> + <para>When <option>--no-build-output</option> is given, the last + 10 lines of the build log will be shown if a build + fails.</para> + </listitem> + + <listitem> + <para>Networking has been improved: + + <itemizedlist> + + <listitem> + <para>HTTP/2 is now supported. This makes binary cache lookups + <link + xlink:href="https://github.com/NixOS/nix/commit/90ad02bf626b885a5dd8967894e2eafc953bdf92">much + more efficient</link>.</para> + </listitem> + + <listitem> + <para>We now retry downloads on many HTTP errors, making + binary caches substituters more resilient to temporary + failures.</para> + </listitem> + + <listitem> + <para>HTTP credentials can now be configured via the standard + <filename>netrc</filename> mechanism.</para> + </listitem> + + <listitem> + <para>If S3 support is enabled at compile time, + <uri>s3://</uri> URIs are <link + xlink:href="https://github.com/NixOS/nix/commit/9ff9c3f2f80ba4108e9c945bbfda2c64735f987b">supported</link> + in all places where Nix allows URIs.</para> + </listitem> + + <listitem> + <para>Brotli compression is now supported. In particular, + <uri>cache.nixos.org</uri> build logs are now compressed using + Brotli.</para> + </listitem> + + </itemizedlist> + + </para> + + </listitem> + + <listitem> + <para><command>nix-env</command> <link + xlink:href="https://github.com/NixOS/nix/commit/b0cb11722626e906a73f10dd9a0c9eea29faf43a">now</link> + ignores packages with bad derivation names (in particular those + starting with a digit or containing a dot).</para> + </listitem> + + <listitem> + <para>Many configuration options have been renamed, either because + they were unnecessarily verbose + (e.g. <option>build-use-sandbox</option> is now just + <option>sandbox</option>) or to reflect generalised behaviour + (e.g. <option>binary-caches</option> is now + <option>substituters</option> because it allows arbitrary store + URIs). The old names are still supported for compatibility.</para> + </listitem> + + <listitem> + <para>The <option>max-jobs</option> option can <link + xlink:href="https://github.com/NixOS/nix/commit/7251d048fa812d2551b7003bc9f13a8f5d4c95a5">now</link> + be set to <literal>auto</literal> to use the number of CPUs in the + system.</para> + </listitem> + + <listitem> + <para>Hashes can <link + xlink:href="https://github.com/NixOS/nix/commit/c0015e87af70f539f24d2aa2bc224a9d8b84276b">now</link> + be specified in base-64 format, in addition to base-16 and the + non-standard base-32.</para> + </listitem> + + <listitem> + <para><command>nix-shell</command> now uses + <varname>bashInteractive</varname> from Nixpkgs, rather than the + <command>bash</command> command that happens to be in the caller’s + <envar>PATH</envar>. This is especially important on macOS where + the <command>bash</command> provided by the system is seriously + outdated and cannot execute <literal>stdenv</literal>’s setup + script.</para> + </listitem> + + <listitem> + <para>Nix can now automatically trigger a garbage collection if + free disk space drops below a certain level during a build. This + is configured using the <option>min-free</option> and + <option>max-free</option> options.</para> + </listitem> + + <listitem> + <para><command>nix-store -q --roots</command> and + <command>nix-store --gc --print-roots</command> now show temporary + and in-memory roots.</para> + </listitem> + + <listitem> + <para> + Nix can now be extended with plugins. See the documentation of + the <option>plugin-files</option> option for more details. + </para> + </listitem> + +</itemizedlist> + +<para>The Nix language has the following new features: + +<itemizedlist> + + <listitem> + <para>It supports floating point numbers. They are based on the + C++ <literal>float</literal> type and are supported by the + existing numerical operators. Export and import to and from JSON + and XML works, too.</para> + </listitem> + + <listitem> + <para>Derivation attributes can now reference the outputs of the + derivation using the <function>placeholder</function> builtin + function. For example, the attribute + +<programlisting> +configureFlags = "--prefix=${placeholder "out"} --includedir=${placeholder "dev"}"; +</programlisting> + + will cause the <envar>configureFlags</envar> environment variable + to contain the actual store paths corresponding to the + <literal>out</literal> and <literal>dev</literal> outputs.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>The following builtin functions are new or extended: + +<itemizedlist> + + <listitem> + <para><function + xlink:href="https://github.com/NixOS/nix/commit/38539b943a060d9cdfc24d6e5d997c0885b8aa2f">builtins.fetchGit</function> + allows Git repositories to be fetched at evaluation time. Thus it + differs from the <function>fetchgit</function> function in + Nixpkgs, which fetches at build time and cannot be used to fetch + Nix expressions during evaluation. A typical use case is to import + external NixOS modules from your configuration, e.g. + + <programlisting>imports = [ (builtins.fetchGit https://github.com/edolstra/dwarffs + "/module.nix") ];</programlisting> + + </para> + </listitem> + + <listitem> + <para>Similarly, <function>builtins.fetchMercurial</function> + allows you to fetch Mercurial repositories.</para> + </listitem> + + <listitem> + <para><function>builtins.path</function> generalises + <function>builtins.filterSource</function> and path literals + (e.g. <literal>./foo</literal>). It allows specifying a store path + name that differs from the source path name + (e.g. <literal>builtins.path { path = ./foo; name = "bar"; + }</literal>) and also supports filtering out unwanted + files.</para> + </listitem> + + <listitem> + <para><function>builtins.fetchurl</function> and + <function>builtins.fetchTarball</function> now support + <varname>sha256</varname> and <varname>name</varname> + attributes.</para> + </listitem> + + <listitem> + <para><function + xlink:href="https://github.com/NixOS/nix/commit/b8867a0239b1930a16f9ef3f7f3e864b01416dff">builtins.split</function> + splits a string using a POSIX extended regular expression as the + separator.</para> + </listitem> + + <listitem> + <para><function + xlink:href="https://github.com/NixOS/nix/commit/26d92017d3b36cff940dcb7d1611c42232edb81a">builtins.partition</function> + partitions the elements of a list into two lists, depending on a + Boolean predicate.</para> + </listitem> + + <listitem> + <para><literal><nix/fetchurl.nix></literal> now uses the + content-addressable tarball cache at + <uri>http://tarballs.nixos.org/</uri>, just like + <function>fetchurl</function> in + Nixpkgs. (f2682e6e18a76ecbfb8a12c17e3a0ca15c084197)</para> + </listitem> + + <listitem> + <para>In restricted and pure evaluation mode, builtin functions + that download from the network (such as + <function>fetchGit</function>) are permitted to fetch underneath a + list of URI prefixes specified in the option + <option>allowed-uris</option>.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>The Nix build environment has the following changes: + +<itemizedlist> + + <listitem> + <para>Values such as Booleans, integers, (nested) lists and + attribute sets can <link + xlink:href="https://github.com/NixOS/nix/commit/6de33a9c675b187437a2e1abbcb290981a89ecb1">now</link> + be passed to builders in a non-lossy way. If the special attribute + <varname>__structuredAttrs</varname> is set to + <literal>true</literal>, the other derivation attributes are + serialised in JSON format and made available to the builder via + the file <envar>.attrs.json</envar> in the builder’s temporary + directory. This obviates the need for + <varname>passAsFile</varname> since JSON files have no size + restrictions, unlike process environments.</para> + + <para><link + xlink:href="https://github.com/NixOS/nix/commit/2d5b1b24bf70a498e4c0b378704cfdb6471cc699">As + a convenience to Bash builders</link>, Nix writes a script named + <envar>.attrs.sh</envar> to the builder’s directory that + initialises shell variables corresponding to all attributes that + are representable in Bash. This includes non-nested (associative) + arrays. For example, the attribute <literal>hardening.format = + true</literal> ends up as the Bash associative array element + <literal>${hardening[format]}</literal>.</para> + </listitem> + + <listitem> + <para>Builders can <link + xlink:href="https://github.com/NixOS/nix/commit/88e6bb76de5564b3217be9688677d1c89101b2a3">now</link> + communicate what build phase they are in by writing messages to + the file descriptor specified in <envar>NIX_LOG_FD</envar>. The + current phase is shown by the <command>nix</command> progress + indicator. + </para> + </listitem> + + <listitem> + <para>In Linux sandbox builds, we <link + xlink:href="https://github.com/NixOS/nix/commit/a2d92bb20e82a0957067ede60e91fab256948b41">now</link> + provide a default <filename>/bin/sh</filename> (namely + <filename>ash</filename> from BusyBox).</para> + </listitem> + + <listitem> + <para>In structured attribute mode, + <varname>exportReferencesGraph</varname> <link + xlink:href="https://github.com/NixOS/nix/commit/c2b0d8749f7e77afc1c4b3e8dd36b7ee9720af4a">exports</link> + extended information about closures in JSON format. In particular, + it includes the sizes and hashes of paths. This is primarily + useful for NixOS image builders.</para> + </listitem> + + <listitem> + <para>Builds are <link + xlink:href="https://github.com/NixOS/nix/commit/21948deed99a3295e4d5666e027a6ca42dc00b40">now</link> + killed as soon as Nix receives EOF on the builder’s stdout or + stderr. This fixes a bug that allowed builds to hang Nix + indefinitely, regardless of + timeouts.</para> + </listitem> + + <listitem> + <para>The <option>sandbox-paths</option> configuration + option can now specify optional paths by appending a + <literal>?</literal>, e.g. <literal>/dev/nvidiactl?</literal> will + bind-mount <varname>/dev/nvidiactl</varname> only if it + exists.</para> + </listitem> + + <listitem> + <para>On Linux, builds are now executed in a user + namespace with UID 1000 and GID 100.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>A number of significant internal changes were made: + +<itemizedlist> + + <listitem> + <para>Nix no longer depends on Perl and all Perl components have + been rewritten in C++ or removed. The Perl bindings that used to + be part of Nix have been moved to a separate package, + <literal>nix-perl</literal>.</para> + </listitem> + + <listitem> + <para>All <classname>Store</classname> classes are now + thread-safe. <classname>RemoteStore</classname> supports multiple + concurrent connections to the daemon. This is primarily useful in + multi-threaded programs such as + <command>hydra-queue-runner</command>.</para> + </listitem> + +</itemizedlist> + +</para> + +<para>This release has contributions from + +Adrien Devresse, +Alexander Ried, +Alex Cruice, +Alexey Shmalko, +AmineChikhaoui, +Andy Wingo, +Aneesh Agrawal, +Anthony Cowley, +Armijn Hemel, +aszlig, +Ben Gamari, +Benjamin Hipple, +Benjamin Staffin, +Benno Fünfstück, +Bjørn Forsman, +Brian McKenna, +Charles Strahan, +Chase Adams, +Chris Martin, +Christian Theune, +Chris Warburton, +Daiderd Jordan, +Dan Connolly, +Daniel Peebles, +Dan Peebles, +davidak, +David McFarland, +Dmitry Kalinkin, +Domen Kožar, +Eelco Dolstra, +Emery Hemingway, +Eric Litak, +Eric Wolf, +Fabian Schmitthenner, +Frederik Rietdijk, +Gabriel Gonzalez, +Giorgio Gallo, +Graham Christensen, +Guillaume Maudoux, +Harmen, +Iavael, +James Broadhead, +James Earl Douglas, +Janus Troelsen, +Jeremy Shaw, +Joachim Schiele, +Joe Hermaszewski, +Joel Moberg, +Johannes 'fish' Ziemke, +Jörg Thalheim, +Jude Taylor, +kballou, +Keshav Kini, +Kjetil Orbekk, +Langston Barrett, +Linus Heckemann, +Ludovic Courtès, +Manav Rathi, +Marc Scholten, +Markus Hauck, +Matt Audesse, +Matthew Bauer, +Matthias Beyer, +Matthieu Coudron, +N1X, +Nathan Zadoks, +Neil Mayhew, +Nicolas B. Pierron, +Niklas Hambüchen, +Nikolay Amiantov, +Ole Jørgen Brønner, +Orivej Desh, +Peter Simons, +Peter Stuart, +Pyry Jahkola, +regnat, +Renzo Carbonara, +Rhys, +Robert Vollmert, +Scott Olson, +Scott R. Parish, +Sergei Trofimovich, +Shea Levy, +Sheena Artrip, +Spencer Baugh, +Stefan Junker, +Susan Potter, +Thomas Tuegel, +Timothy Allen, +Tristan Hume, +Tuomas Tynkkynen, +tv, +Tyson Whitehead, +Vladimír Čunát, +Will Dietz, +wmertens, +Wout Mertens, +zimbatm and +Zoran Plesivčak. +</para> + +</section> diff --git a/doc/manual/style.css b/doc/manual/style.css index 53fd9d5709c3..592583ab086a 100644 --- a/doc/manual/style.css +++ b/doc/manual/style.css @@ -96,7 +96,6 @@ div.example margin-right: 1.5em; background: #f4f4f8; border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.example p.title @@ -106,7 +105,6 @@ div.example p.title div.example pre { - box-shadow: none; } @@ -116,15 +114,12 @@ div.example pre pre.screen, pre.programlisting { - border: 1px solid #b0b0b0; - padding: 3px 3px; + padding: 6px 6px; margin-left: 1.5em; margin-right: 1.5em; color: #600000; background: #f4f4f8; font-family: monospace; - border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.example pre.programlisting @@ -149,7 +144,6 @@ div.example pre.programlisting padding: 0.3em 0.3em 0.3em 0.3em; background: #fffff5; border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.note, div.warning @@ -256,16 +250,14 @@ span.command strong div.calloutlist table { - box-shadow: none; } table { border-collapse: collapse; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; } div.affiliation { font-style: italic; -} \ No newline at end of file +} diff --git a/doc/manual/troubleshooting/collisions-nixenv.xml b/doc/manual/troubleshooting/collisions-nixenv.xml deleted file mode 100644 index 23cc43faf088..000000000000 --- a/doc/manual/troubleshooting/collisions-nixenv.xml +++ /dev/null @@ -1,38 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="sec-collisions-nixenv"> - -<title>Collisions in <command>nix-env</command></title> - -<para>Symptom: when installing or upgrading, you get an error message such as - -<screen> -$ nix-env -i docbook-xml -... -adding /nix/store/s5hyxgm62gk2...-docbook-xml-4.2 -collision between `/nix/store/s5hyxgm62gk2...-docbook-xml-4.2/xml/dtd/docbook/calstblx.dtd' - and `/nix/store/06h377hr4b33...-docbook-xml-4.3/xml/dtd/docbook/calstblx.dtd' - at /nix/store/...-builder.pl line 62.</screen> - -</para> - -<para>The cause is that two installed packages in the user environment -have overlapping filenames (e.g., -<filename>xml/dtd/docbook/calstblx.dtd</filename>. This usually -happens when you accidentally try to install two versions of the same -package. For instance, in the example above, the Nix Packages -collection contains two versions of <literal>docbook-xml</literal>, so -<command>nix-env -i</command> will try to install both. The default -user environment builder has no way to way to resolve such conflicts, -so it just gives up.</para> - -<para>Solution: remove one of the offending packages from the user -environment (if already installed) using <command>nix-env --e</command>, or specify exactly which version should be installed -(e.g., <literal>nix-env -i docbook-xml-4.2</literal>).</para> - -<!-- FIXME: describe priorities --> - -</section> diff --git a/doc/manual/troubleshooting/links-nix-store.xml b/doc/manual/troubleshooting/links-nix-store.xml deleted file mode 100644 index c768889567d0..000000000000 --- a/doc/manual/troubleshooting/links-nix-store.xml +++ /dev/null @@ -1,43 +0,0 @@ -<section xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="sec-links-nix-store"> - -<title><quote>Too many links</quote> Error in the Nix store</title> - - -<para>Symptom: when building something, you get an error message such as - -<screen> -... -<literal>mkdir: cannot create directory `/nix/store/<replaceable>name</replaceable>': Too many links</literal></screen> - -</para> - -<para>This is usually because you have more than 32,000 subdirectories -in <filename>/nix/store</filename>, as can be seen using <command>ls --l</command>: - -<screen> -$ ls -ld /nix/store -drwxrwxrwt 32000 nix nix 4620288 Sep 8 15:08 store</screen> - -The <literal>ext2</literal> file system is limited to an inode link -count of 32,000 (each subdirectory increasing the count by one). -Furthermore, the <literal>st_nlink</literal> field of the -<function>stat</function> system call is a 16-bit value.</para> - -<para>This only happens on very large Nix installations (such as build -machines).</para> - -<para>Quick solution: run the garbage collector. You may want to use -the <option>--max-links</option> option.</para> - -<para>Real solution: put the Nix store on a file system that supports -more than 32,000 subdirectories per directory, such as ext4. (This -doesn’t solve the <literal>st_nlink</literal> limit, but ext4 lies to -the kernel by reporting a link count of 1 if it exceeds the -limit.)</para> - -</section> diff --git a/doc/manual/troubleshooting/troubleshooting.xml b/doc/manual/troubleshooting/troubleshooting.xml deleted file mode 100644 index 1e973a192b18..000000000000 --- a/doc/manual/troubleshooting/troubleshooting.xml +++ /dev/null @@ -1,16 +0,0 @@ -<appendix xmlns="http://docbook.org/ns/docbook" - xmlns:xlink="http://www.w3.org/1999/xlink" - xmlns:xi="http://www.w3.org/2001/XInclude" - version="5.0" - xml:id="ch-troubleshooting"> - -<title>Troubleshooting</title> - -<para>This section provides solutions for some common problems. See -the <link xlink:href="https://github.com/NixOS/nix/issues">Nix bug -tracker</link> for a list of currently known issues.</para> - -<xi:include href="collisions-nixenv.xml" /> -<xi:include href="links-nix-store.xml" /> - -</appendix> diff --git a/local.mk b/local.mk index 5c80451bd579..5d7e0fb2e428 100644 --- a/local.mk +++ b/local.mk @@ -1,9 +1,8 @@ ifeq ($(MAKECMDGOALS), dist) - # Make sure we are in repo root with `--git-dir` - dist-files += $(shell git --git-dir=.git ls-files || find * -type f) + dist-files += $(shell cat .dist-files) endif -dist-files += configure config.h.in nix.spec perl/configure src/nlohmann/json.hpp +dist-files += configure config.h.in nix.spec perl/configure clean-files += Makefile.config @@ -11,5 +10,3 @@ GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I $(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644))) - -$(foreach i, $(call rwildcard, src/boost, *.hpp), $(eval $(call install-file-in, $(i), $(includedir)/nix/$(patsubst src/%/,%,$(dir $(i))), 0644))) diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 743829e3f5f3..aa7633a709c1 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -6,6 +6,7 @@ use Data::Dumper; use File::Basename; use File::Path; use File::Slurp; +use File::Copy; use JSON::PP; use LWP::UserAgent; @@ -54,7 +55,7 @@ sub downloadFile { my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json')); - my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die; + my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die "job '$jobName' lacks product $productNr\n"; $dstName //= basename($srcFile); my $dstFile = "$releaseDir/" . $dstName; @@ -78,11 +79,11 @@ sub downloadFile { return ($dstFile, $sha256_expected); } -downloadFile("tarball", "2"); # PDF -downloadFile("tarball", "3"); # .tar.bz2 -my ($tarball, $tarballHash) = downloadFile("tarball", "4"); # .tar.xz +downloadFile("tarball", "2"); # .tar.bz2 +my ($tarball, $tarballHash) = downloadFile("tarball", "3"); # .tar.xz my ($tarball_i686_linux, $tarball_i686_linux_hash) = downloadFile("binaryTarball.i686-linux", "1"); my ($tarball_x86_64_linux, $tarball_x86_64_linux_hash) = downloadFile("binaryTarball.x86_64-linux", "1"); +my ($tarball_aarch64_linux, $tarball_aarch64_linux_hash) = downloadFile("binaryTarball.aarch64-linux", "1"); my ($tarball_x86_64_darwin, $tarball_x86_64_darwin_hash) = downloadFile("binaryTarball.x86_64-darwin", "1"); # Update Nixpkgs in a very hacky way. @@ -111,6 +112,7 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix", "{\n" . " x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" . " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . + " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . "}\n"); @@ -144,9 +146,15 @@ write_file("$siteDir/nix-release.tt", "latestNixVersion = \"$version\"\n" . "nix_hash_i686_linux = \"$tarball_i686_linux_hash\"\n" . "nix_hash_x86_64_linux = \"$tarball_x86_64_linux_hash\"\n" . + "nix_hash_aarch64_linux = \"$tarball_aarch64_linux_hash\"\n" . "nix_hash_x86_64_darwin = \"$tarball_x86_64_darwin_hash\"\n" . "-%]\n"); system("cd $siteDir && nix-shell --run 'make nix/install nix/install.sig'") == 0 or die; +copy("$siteDir/nix/install", "$siteDir/nix/install-$version") or die; +copy("$siteDir/nix/install.sig", "$siteDir/nix/install-$version.sig") or die; + +system("cd $siteDir && git add nix/install-$version nix/install-$version.sig") == 0 or die; + system("cd $siteDir && git commit -a -m 'Nix $version released'") == 0 or die; diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile index d6b88c7e91a5..0f69d02df25f 100644 --- a/misc/docker/Dockerfile +++ b/misc/docker/Dockerfile @@ -4,7 +4,9 @@ FROM alpine RUN apk add --update openssl # Download Nix and install it into the system. -RUN wget -O- https://nixos.org/releases/nix/nix-1.11.14/nix-1.11.14-x86_64-linux.tar.bz2 | bzcat - | tar xf - \ +RUN wget https://nixos.org/releases/nix/nix-2.0.2/nix-2.0.2-x86_64-linux.tar.bz2 \ + && echo "d0c2492d7d8f824e3b1ace15a1a58f64a0a8faacc59936ebedfe18905d982d7c nix-2.0.2-x86_64-linux.tar.bz2" | sha256sum -c \ + && tar xjf nix-*-x86_64-linux.tar.bz2 \ && addgroup -g 30000 -S nixbld \ && for i in $(seq 1 30); do adduser -S -D -h /var/empty -g "Nix build user $i" -u $((30000 + i)) -G nixbld nixbld$i ; done \ && mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \ diff --git a/misc/docker/README.md b/misc/docker/README.md new file mode 100644 index 000000000000..491be7408964 --- /dev/null +++ b/misc/docker/README.md @@ -0,0 +1,8 @@ +To update https://hub.docker.com/r/nixos/nix/ + + $ docker build . -t nixos/nix:2.0 + $ docker tag nixos/nix:2.0 nixos/nix:latest + $ docker push nixos/nix:latest + $ docker push nixos/nix:2.0 + +Write access: @domenkozar diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in index 66fcd155ee9b..549619a57d56 100644 --- a/misc/launchd/org.nixos.nix-daemon.plist.in +++ b/misc/launchd/org.nixos.nix-daemon.plist.in @@ -4,6 +4,8 @@ <dict> <key>Label</key> <string>org.nixos.nix-daemon</string> + <key>KeepAlive</key> + <true/> <key>RunAtLoad</key> <true/> <key>Program</key> diff --git a/mk/libraries.mk b/mk/libraries.mk index 3cd7a53107bd..14c95fa91cf6 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -45,6 +45,11 @@ endif # - $(1)_INSTALL_DIR: the directory where the library will be # installed. Defaults to $(libdir). # +# - $(1)_EXCLUDE_FROM_LIBRARY_LIST: if defined, the library will not +# be automatically marked as a dependency of the top-level all +# target andwill not be listed in the make help output. This is +# useful for libraries built solely for testing, for example. +# # - BUILD_SHARED_LIBS: if equal to ‘1’, a dynamic library will be # built, otherwise a static library. define build-library @@ -149,7 +154,9 @@ define build-library $(1)_DEPS := $$(foreach fn, $$($(1)_OBJS), $$(call filename-to-dep, $$(fn))) -include $$($(1)_DEPS) + ifndef $(1)_EXCLUDE_FROM_LIBRARY_LIST libs-list += $$($(1)_PATH) + endif clean-files += $$(_d)/*.a $$(_d)/*.$(SO_EXT) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS) dist-files += $$(_srcs) endef diff --git a/mk/programs.mk b/mk/programs.mk index 3ac64494e3a5..2fbda12bd153 100644 --- a/mk/programs.mk +++ b/mk/programs.mk @@ -51,7 +51,7 @@ define build-program else $(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_PATH) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ - install -t $$($(1)_INSTALL_DIR) $$< + install -t $(DESTDIR)$$($(1)_INSTALL_DIR) $$< endif diff --git a/mk/tests.mk b/mk/tests.mk index 1138857c3c16..70c30661b95f 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -12,21 +12,23 @@ installcheck: @total=0; failed=0; \ red=""; \ green=""; \ + yellow=""; \ normal=""; \ if [ -t 1 ]; then \ - ncolors="$$(tput colors)"; \ - if [ -n "$$ncolors" ] && [ "$$ncolors" -ge 8 ]; then \ - red="$$(tput setaf 1)"; \ - green="$$(tput setaf 2)"; \ - normal="$$(tput sgr0)"; \ - fi; \ + red="[31;1m"; \ + green="[32;1m"; \ + yellow="[33;1m"; \ + normal="[m"; \ fi; \ for i in $(_installcheck-list); do \ total=$$((total + 1)); \ printf "running test $$i..."; \ log="$$(cd $$(dirname $$i) && $(tests-environment) $$(basename $$i) 2>&1)"; \ - if [ $$? -eq 0 ]; then \ + status=$$?; \ + if [ $$status -eq 0 ]; then \ echo " [$${green}PASS$$normal]"; \ + elif [ $$status -eq 99 ]; then \ + echo " [$${yellow}SKIP$$normal]"; \ else \ echo " [$${red}FAIL$$normal]"; \ echo "$$log" | sed 's/^/ /'; \ @@ -37,7 +39,7 @@ installcheck: echo "$${red}$$failed out of $$total tests failed $$normal"; \ exit 1; \ else \ - echo "$${green}All tests succeeded"; \ + echo "$${green}All tests succeeded$$normal"; \ fi .PHONY: check installcheck diff --git a/nix.spec.in b/nix.spec.in index 4e7dd90ea68c..cd053dbfce5c 100644 --- a/nix.spec.in +++ b/nix.spec.in @@ -3,31 +3,47 @@ %global nixbld_user "nix-builder-" %global nixbld_group "nixbld" +# NOTE: BUILD on EL7 requires +# - Centos / RHEL7 software collection repository +# yum install centos-release-scl +# +# - Recent boost backport +# curl https://copr.fedorainfracloud.org/coprs/whosthere/boost/repo/epel-7/whosthere-boost-epel-7.repo -o /etc/yum.repos.d/whosthere-boost-epel-7.repo +# + +# Disable documentation generation +# necessary on some platforms +%bcond_without docgen + Summary: The Nix software deployment system Name: nix Version: @PACKAGE_VERSION@ Release: 2%{?dist} License: LGPLv2+ -%if 0%{?rhel} && 0%{?rhel} < 7 Group: Applications/System -%endif URL: http://nixos.org/ Source0: %{name}-%{version}.tar.bz2 -%if 0%{?el5} -BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) -%endif + Requires: curl Requires: bzip2 Requires: gzip Requires: xz -Requires: libseccomp +BuildRequires: bison +BuildRequires: boost-devel >= 1.60 BuildRequires: bzip2-devel -BuildRequires: sqlite-devel + +# for RHEL <= 7, we need software collections for a C++14 compatible compatible compiler +%if 0%{?rhel} +BuildRequires: devtoolset-7-gcc +BuildRequires: devtoolset-7-gcc-c++ +%endif + +BuildRequires: flex BuildRequires: libcurl-devel BuildRequires: libseccomp-devel - -# Hack to make that shitty RPM scanning hack shut up. -Provides: perl(Nix::SSH) +BuildRequires: openssl-devel +BuildRequires: sqlite-devel +BuildRequires: xz-devel %description Nix is a purely functional package manager. It allows multiple @@ -39,9 +55,6 @@ it can be used equally well under other Unix systems. %package devel Summary: Development files for %{name} -%if 0%{?rhel} && 0%{?rhel} < 7 -Group: Development/Libraries -%endif Requires: %{name}%{?_isa} = %{version}-%{release} %description devel @@ -51,9 +64,6 @@ developing applications that use %{name}. %package doc Summary: Documentation files for %{name} -%if 0%{?rhel} && 0%{?rhel} < 7 -Group: Documentation -%endif BuildArch: noarch Requires: %{name} = %{version}-%{release} @@ -65,20 +75,25 @@ The %{name}-doc package contains documentation files for %{name}. %build +%if 0%{?rhel} +source /opt/rh/devtoolset-7/enable +%endif extraFlags= # - override docdir so large documentation files are owned by the # -doc subpackage # - set localstatedir by hand to the preferred nix value %configure --localstatedir=/nix/var \ + %{!?without_docgen:--disable-doc-gen} \ --docdir=%{_defaultdocdir}/%{name}-doc-%{version} \ $extraFlags -make -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES +make V=1 %{?_smp_mflags} %install -%if 0%{?el5} -rm -rf $RPM_BUILD_ROOT +%if 0%{?rhel} +source /opt/rh/devtoolset-7/enable %endif + make DESTDIR=$RPM_BUILD_ROOT install find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';' @@ -128,6 +143,7 @@ systemctl start nix-daemon.socket %endif %files +%license COPYING %{_bindir}/nix* %{_libdir}/*.so %{_prefix}/libexec/* @@ -136,9 +152,11 @@ systemctl start nix-daemon.socket %{_prefix}/lib/systemd/system/nix-daemon.service %endif %{_datadir}/nix +%if ! %{without docgen} %{_mandir}/man1/*.1* %{_mandir}/man5/*.5* %{_mandir}/man8/*.8* +%endif %config(noreplace) %{_sysconfdir}/profile.d/nix.sh %config(noreplace) %{_sysconfdir}/profile.d/nix-daemon.sh /nix @@ -147,6 +165,9 @@ systemctl start nix-daemon.socket %{_includedir}/nix %{_prefix}/lib/pkgconfig/*.pc + +%if ! %{without docgen} %files doc %docdir %{_defaultdocdir}/%{name}-doc-%{version} %{_defaultdocdir}/%{name}-doc-%{version} +%endif diff --git a/perl/Makefile b/perl/Makefile index 684a37e8121f..284c75022493 100644 --- a/perl/Makefile +++ b/perl/Makefile @@ -1,6 +1,6 @@ makefiles = local.mk -GLOBAL_CXXFLAGS += -std=c++14 -g -Wall +GLOBAL_CXXFLAGS += -g -Wall -include Makefile.config diff --git a/release-common.nix b/release-common.nix index 4553118e1f56..d7fb8125f25e 100644 --- a/release-common.nix +++ b/release-common.nix @@ -1,22 +1,72 @@ { pkgs }: +with pkgs; + rec { - sh = pkgs.busybox.override { + # Use "busybox-sandbox-shell" if present, + # if not (legacy) fallback and hope it's sufficient. + sh = pkgs.busybox-sandbox-shell or (busybox.override { useMusl = true; enableStatic = true; enableMinimal = true; extraConfig = '' + CONFIG_FEATURE_FANCY_ECHO y + CONFIG_FEATURE_SH_MATH y + CONFIG_FEATURE_SH_MATH_64 y + CONFIG_ASH y + CONFIG_ASH_OPTIMIZE_FOR_SIZE y + + CONFIG_ASH_ALIAS y + CONFIG_ASH_BASH_COMPAT y + CONFIG_ASH_CMDCMD y CONFIG_ASH_ECHO y + CONFIG_ASH_GETOPTS y + CONFIG_ASH_INTERNAL_GLOB y + CONFIG_ASH_JOB_CONTROL y + CONFIG_ASH_PRINTF y CONFIG_ASH_TEST y - CONFIG_ASH_OPTIMIZE_FOR_SIZE y ''; - }; + }); configureFlags = [ "--disable-init-state" "--enable-gc" - ] ++ pkgs.lib.optionals pkgs.stdenv.isLinux [ + ] ++ lib.optionals stdenv.isLinux [ "--with-sandbox-shell=${sh}/bin/busybox" ]; + + tarballDeps = + [ bison + flex + libxml2 + libxslt + docbook5 + docbook5_xsl + autoconf-archive + autoreconfHook + ]; + + buildDeps = + [ curl + bzip2 xz brotli + openssl pkgconfig sqlite boehmgc + boost + + # Tests + git + mercurial + ] + ++ lib.optional stdenv.isLinux libseccomp + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) + (aws-sdk-cpp.override { + apis = ["s3" "transfer"]; + customMemoryManagement = false; + }); + + perlDeps = + [ perl + perlPackages.DBDSQLite + ]; } diff --git a/release.nix b/release.nix index aa4d63777ebe..37deb8e7ee38 100644 --- a/release.nix +++ b/release.nix @@ -1,12 +1,12 @@ -{ nix ? { outPath = ./.; revCount = 1234; shortRev = "abcdef"; } -, nixpkgs ? { outPath = <nixpkgs>; revCount = 1234; shortRev = "abcdef"; } +{ nix ? builtins.fetchGit ./. +, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.03"; } , officialRelease ? false , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] }: let - pkgs = import <nixpkgs> {}; + pkgs = import nixpkgs { system = builtins.currentSystem or "x86_64-linux"; }; jobs = rec { @@ -14,6 +14,8 @@ let tarball = with pkgs; + with import ./release-common.nix { inherit pkgs; }; + releaseTools.sourceTarball { name = "nix-tarball"; version = builtins.readFile ./version; @@ -21,30 +23,19 @@ let src = nix; inherit officialRelease; - buildInputs = - [ curl bison flex libxml2 libxslt - bzip2 xz brotli - pkgconfig sqlite libsodium boehmgc - docbook5 docbook5_xsl - autoconf-archive - git - ] ++ lib.optional stdenv.isLinux libseccomp; + buildInputs = tarballDeps ++ buildDeps; configureFlags = "--enable-gc"; postUnpack = '' - # Clean up when building from a working tree. - if [[ -d $sourceRoot/.git ]]; then - git -C $sourceRoot clean -fd - fi + (cd $sourceRoot && find . -type f) | cut -c3- > $sourceRoot/.dist-files + cat $sourceRoot/.dist-files ''; preConfigure = '' (cd perl ; autoreconf --install --force --verbose) # TeX needs a writable font cache. export VARTEXFONTS=$TMPDIR/texfonts - - cp -rv ${nlohmann_json}/include/nlohmann src/nlohmann ''; distPhase = @@ -64,7 +55,9 @@ let build = pkgs.lib.genAttrs systems (system: - with import <nixpkgs> { inherit system; }; + let pkgs = import nixpkgs { inherit system; }; in + + with pkgs; with import ./release-common.nix { inherit pkgs; }; @@ -72,22 +65,7 @@ let name = "nix"; src = tarball; - buildInputs = - [ curl - bzip2 xz brotli - openssl pkgconfig sqlite boehmgc - - # Tests - git - mercurial - ] - ++ lib.optional stdenv.isLinux libseccomp - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) - (aws-sdk-cpp.override { - apis = ["s3"]; - customMemoryManagement = false; - }); + buildInputs = buildDeps; configureFlags = configureFlags ++ [ "--sysconfdir=/etc" ]; @@ -107,14 +85,14 @@ let perlBindings = pkgs.lib.genAttrs systems (system: - let pkgs = import <nixpkgs> { inherit system; }; in with pkgs; + let pkgs = import nixpkgs { inherit system; }; in with pkgs; releaseTools.nixBuild { name = "nix-perl"; src = tarball; buildInputs = - [ (builtins.getAttr system jobs.build) curl bzip2 xz pkgconfig pkgs.perl ] + [ jobs.build.${system} curl bzip2 xz pkgconfig pkgs.perl boost ] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; configureFlags = '' @@ -132,34 +110,56 @@ let binaryTarball = pkgs.lib.genAttrs systems (system: - # FIXME: temporarily use a different branch for the Darwin build. - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; let toplevel = builtins.getAttr system jobs.build; version = toplevel.src.version; + installerClosureInfo = closureInfo { rootPaths = [ toplevel cacert ]; }; in runCommand "nix-binary-tarball-${version}" - { exportReferencesGraph = [ "closure1" toplevel "closure2" cacert ]; - buildInputs = [ perl shellcheck ]; + { nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; } '' - storePaths=$(perl ${pathsFromGraph} ./closure1 ./closure2) - printRegistration=1 perl ${pathsFromGraph} ./closure1 ./closure2 > $TMPDIR/reginfo + cp ${installerClosureInfo}/registration $TMPDIR/reginfo substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \ --subst-var-by nix ${toplevel} \ --subst-var-by cacert ${cacert} - substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user \ + + substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ + --subst-var-by nix ${toplevel} \ + --subst-var-by cacert ${cacert} + substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ + --subst-var-by nix ${toplevel} \ + --subst-var-by cacert ${cacert} + substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \ --subst-var-by nix ${toplevel} \ --subst-var-by cacert ${cacert} - shellcheck -e SC1090 $TMPDIR/install - shellcheck -e SC1091,SC2002 $TMPDIR/install-darwin-multi-user + if type -p shellcheck; then + # SC1090: Don't worry about not being able to find + # $nix/etc/profile.d/nix.sh + shellcheck --exclude SC1090 $TMPDIR/install + shellcheck $TMPDIR/install-darwin-multi-user.sh + shellcheck $TMPDIR/install-systemd-multi-user.sh + + # SC1091: Don't panic about not being able to source + # /etc/profile + # SC2002: Ignore "useless cat" "error", when loading + # .reginfo, as the cat is a much cleaner + # implementation, even though it is "useless" + # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving + # root's home directory + shellcheck --external-sources \ + --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user + fi chmod +x $TMPDIR/install - chmod +x $TMPDIR/install-darwin-multi-user + chmod +x $TMPDIR/install-darwin-multi-user.sh + chmod +x $TMPDIR/install-systemd-multi-user.sh + chmod +x $TMPDIR/install-multi-user dir=nix-${version}-${system} fn=$out/$dir.tar.bz2 mkdir -p $out/nix-support @@ -171,22 +171,23 @@ let --transform "s,$TMPDIR/install,$dir/install," \ --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \ --transform "s,$NIX_STORE,$dir/store,S" \ - $TMPDIR/install $TMPDIR/install-darwin-multi-user $TMPDIR/reginfo $storePaths + $TMPDIR/install $TMPDIR/install-darwin-multi-user.sh \ + $TMPDIR/install-systemd-multi-user.sh \ + $TMPDIR/install-multi-user $TMPDIR/reginfo \ + $(cat ${installerClosureInfo}/store-paths) ''); coverage = - with import <nixpkgs> { system = "x86_64-linux"; }; + with pkgs; + + with import ./release-common.nix { inherit pkgs; }; releaseTools.coverageAnalysis { name = "nix-build"; src = tarball; - buildInputs = - [ curl bzip2 openssl pkgconfig sqlite xz libsodium libseccomp - # These are for "make check" only: - graphviz libxml2 libxslt - ]; + buildInputs = buildDeps; configureFlags = '' --disable-init-state @@ -196,7 +197,7 @@ let doInstallCheck = true; - lcovFilter = [ "*/boost/*" "*-tab.*" ]; + lcovFilter = [ "*/boost/*" "*-tab.*" "*/nlohmann/*" "*/linenoise/*" ]; # We call `dot', and even though we just use it to # syntax-check generated dot files, it still requires some @@ -205,35 +206,37 @@ let }; - rpm_fedora25i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora25i386) [ "libsodium-devel" ]; - rpm_fedora25x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora25x86_64) [ "libsodium-devel" ]; + rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ]; #deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ]; #deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ]; - deb_ubuntu1604i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1604i386) [ "libsodium-dev" ] [ "libsodium18" ]; - deb_ubuntu1604x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1604x86_64) [ "libsodium-dev" ] [ "libsodium18" ]; - deb_ubuntu1610i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1610i386) [ "libsodium-dev" ] [ "libsodium18" ]; - deb_ubuntu1610x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1610x86_64) [ "libsodium-dev" ] [ "libsodium18" ]; + deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ]; + deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ]; # System tests. tests.remoteBuilds = (import ./tests/remote-builds.nix rec { + inherit nixpkgs; nix = build.x86_64-linux; system = "x86_64-linux"; }); tests.nix-copy-closure = (import ./tests/nix-copy-closure.nix rec { + inherit nixpkgs; nix = build.x86_64-linux; system = "x86_64-linux"; }); - tests.setuid = pkgs.lib.genAttrs (pkgs.lib.filter (pkgs.lib.hasSuffix "-linux") systems) (system: - import ./tests/setuid.nix rec { - nix = build.${system}; inherit system; - }); + tests.setuid = pkgs.lib.genAttrs + ["i686-linux" "x86_64-linux"] + (system: + import ./tests/setuid.nix rec { + inherit nixpkgs; + nix = build.${system}; inherit system; + }); tests.binaryTarball = - with import <nixpkgs> { system = "x86_64-linux"; }; + with import nixpkgs { system = "x86_64-linux"; }; vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test" { diskImage = vmTools.diskImages.ubuntu1204x86_64; } @@ -252,7 +255,7 @@ let ''); # */ tests.evalNixpkgs = - import <nixpkgs/pkgs/top-level/make-tarball.nix> { + import (nixpkgs + "/pkgs/top-level/make-tarball.nix") { inherit nixpkgs; inherit pkgs; nix = build.x86_64-linux; @@ -265,7 +268,8 @@ let export NIX_STATE_DIR=$TMPDIR nix-store --init - nix-instantiate ${nixpkgs}/nixos/release-combined.nix -A tested --dry-run + nix-instantiate ${nixpkgs}/nixos/release-combined.nix -A tested --dry-run \ + --arg nixpkgs '{ outPath = ${nixpkgs}; revCount = 123; shortRev = "abcdefgh"; }' touch $out ''; @@ -283,12 +287,6 @@ let binaryTarball.i686-linux binaryTarball.x86_64-darwin binaryTarball.x86_64-linux - #deb_debian8i386 - #deb_debian8x86_64 - deb_ubuntu1604i386 - deb_ubuntu1604x86_64 - rpm_fedora25i386 - rpm_fedora25x86_64 tests.remoteBuilds tests.nix-copy-closure tests.binaryTarball @@ -306,16 +304,17 @@ let makeRPM = system: diskImageFun: extraPackages: - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; releaseTools.rpmBuild rec { name = "nix-rpm"; src = jobs.tarball; diskImage = (diskImageFun vmTools.diskImageFuns) { extraPackages = - [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" ] + [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" ] ++ extraPackages; }; - memSize = 1024; + # At most 2047MB can be simulated in qemu-system-i386 + memSize = 2047; meta.schedulingPriority = 50; postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck"; #enableParallelBuilding = true; @@ -328,16 +327,16 @@ let makeDeb = system: diskImageFun: extraPackages: extraDebPackages: - with import <nixpkgs> { inherit system; }; + with import nixpkgs { inherit system; }; releaseTools.debBuild { name = "nix-deb"; src = jobs.tarball; diskImage = (diskImageFun vmTools.diskImageFuns) { extraPackages = - [ "libsqlite3-dev" "libbz2-dev" "libcurl-dev" "libcurl3-nss" "libssl-dev" "liblzma-dev" "libseccomp-dev" "ncurses-bin" ] + [ "libsqlite3-dev" "libbz2-dev" "libcurl-dev" "libcurl3-nss" "libssl-dev" "liblzma-dev" "libseccomp-dev" "libsodium-dev" "libboost-all-dev" ] ++ extraPackages; }; - memSize = 1024; + memSize = 2047; meta.schedulingPriority = 50; postInstall = "make installcheck"; configureFlags = "--sysconfdir=/etc"; diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh index cea25eb8adf3..87c4c2b0582a 100644 --- a/scripts/install-darwin-multi-user.sh +++ b/scripts/install-darwin-multi-user.sh @@ -1,813 +1,144 @@ -#!/bin/bash +#!/usr/bin/env bash set -eu set -o pipefail -# Sourced from: -# - https://github.com/LnL7/nix-darwin/blob/8c29d0985d74b4a990238497c47a2542a5616b3c/bootstrap.sh -# - https://gist.github.com/expipiplus1/e571ce88c608a1e83547c918591b149f/ac504c6c1b96e65505fbda437a28ce563408ecb0 -# - https://github.com/NixOS/nixos-org-configurations/blob/a122f418797713d519aadf02e677fce0dc1cb446/delft/scripts/nix-mac-installer.sh -# - https://github.com/matthewbauer/macNixOS/blob/f6045394f9153edea417be90c216788e754feaba/install-macNixOS.sh -# - https://gist.github.com/LnL7/9717bd6cdcb30b086fd7f2093e5f8494/86b26f852ce563e973acd30f796a9a416248c34a -# -# however tracking which bits came from which would be impossible. - -readonly ESC='\033[0m' -readonly BOLD='\033[38;1m' -readonly BLUE='\033[38;34m' -readonly BLUE_UL='\033[38;4;34m' -readonly GREEN='\033[38;32m' -readonly GREEN_UL='\033[38;4;32m' -readonly RED='\033[38;31m' -readonly RED_UL='\033[38;4;31m' -readonly YELLOW='\033[38;33m' -readonly YELLOW_UL='\033[38;4;33m' - -readonly CORES=$(sysctl -n hw.ncpu) -readonly NIX_USER_COUNT="$CORES" -readonly NIX_BUILD_GROUP_ID="30000" -readonly NIX_BUILD_GROUP_NAME="nixbld" -readonly NIX_FIRST_BUILD_UID="30001" -# Please don't change this. We don't support it, because the -# default shell profile that comes with Nix doesn't support it. -readonly NIX_ROOT="/nix" readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist -readonly PROFILE_TARGETS=("/etc/profile" "/etc/bashrc" "/etc/zshrc") -readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" -readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" - -readonly NIX_INSTALLED_NIX="@nix@" -readonly NIX_INSTALLED_CACERT="@cacert@" -readonly EXTRACTED_NIX_PATH="$(dirname "$0")" - -readonly ROOT_HOME="/var/root" - -if [ -t 0 ]; then - readonly IS_HEADLESS='no' -else - readonly IS_HEADLESS='yes' -fi +dsclattr() { + /usr/bin/dscl . -read "$1" \ + | awk "/$2/ { print \$2 }" +} -headless() { - if [ "$IS_HEADLESS" = "yes" ]; then - return 0 - else - return 1 +poly_validate_assumptions() { + if [ "$(uname -s)" != "Darwin" ]; then + failure "This script is for use with macOS!" fi } -contactme() { - echo "We'd love to help if you need it." - echo "" - echo "If you can, open an issue at https://github.com/nixos/nix/issues" - echo "" - echo "Or feel free to contact the team," - echo " - on IRC #nixos on irc.freenode.net" - echo " - on twitter @nixos_org" +poly_service_installed_check() { + [ -e "$PLIST_DEST" ] } -uninstall_directions() { - subheader "Uninstalling nix:" - local step=0 - - if [ -e "$PLIST_DEST" ]; then - step=$((step + 1)) +poly_service_uninstall_directions() { cat <<EOF -$step. Delete $PLIST_DEST +$1. Delete $PLIST_DEST sudo launchctl unload $PLIST_DEST sudo rm $PLIST_DEST EOF - fi - - for profile_target in "${PROFILE_TARGETS[@]}"; do - if [ -e "$profile_target" ] && [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then - step=$((step + 1)) - cat <<EOF -$step. Restore $profile_target$PROFILE_BACKUP_SUFFIX back to $profile_target - - sudo mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target - -(after this one, you may need to re-open any terminals that were -opened while it existed.) - -EOF - fi - done +} - step=$((step + 1)) +poly_service_setup_note() { cat <<EOF -$step. Delete the files Nix added to your system: - - sudo rm -rf /etc/nix $NIX_ROOT $ROOT_HOME/.nix-profile $ROOT_HOME/.nix-defexpr $ROOT_HOME/.nix-channels $HOME/.nix-profile $HOME/.nix-defexpr $HOME/.nix-channels - -and that is it. + - load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon EOF - -} - -nix_user_for_core() { - printf "nixbld%d" "$1" -} - -nix_uid_for_core() { - echo $((NIX_FIRST_BUILD_UID + $1 - 1)) } -dsclattr() { - /usr/bin/dscl . -read "$1" \ - | awk "/$2/ { print \$2 }" -} - -_textout() { - echo -en "$1" - shift - if [ "$*" = "" ]; then - cat - else - echo "$@" - fi - echo -en "$ESC" -} - -header() { - follow="---------------------------------------------------------" - header=$(echo "---- $* $follow$follow$follow" | head -c 80) - echo "" - _textout "$BLUE" "$header" -} - -warningheader() { - follow="---------------------------------------------------------" - header=$(echo "---- $* $follow$follow$follow" | head -c 80) - echo "" - _textout "$RED" "$header" -} - -subheader() { - echo "" - _textout "$BLUE_UL" "$*" -} - -row() { - printf "$BOLD%s$ESC:\t%s\n" "$1" "$2" -} - -task() { - echo "" - ok "~~> $1" -} - -bold() { - echo "$BOLD$*$ESC" -} - -ok() { - _textout "$GREEN" "$@" -} - -warning() { - warningheader "warning!" - cat - echo "" -} - -failure() { - header "oh no!" - _textout "$RED" "$@" - echo "" - _textout "$RED" "$(contactme)" - trap finish_cleanup EXIT - exit 1 -} - -ui_confirm() { - _textout "$GREEN$GREEN_UL" "$1" - - if headless; then - echo "No TTY, assuming you would say yes :)" - return 0 - fi - - local prompt="[y/n] " - echo -n "$prompt" - while read -r y; do - if [ "$y" = "y" ]; then - echo "" - return 0 - elif [ "$y" = "n" ]; then - echo "" - return 1 - else - _textout "$RED" "Sorry, I didn't understand. I can only understand answers of y or n" - echo -n "$prompt" - fi - done - echo "" - return 1 -} - -__sudo() { - local expl="$1" - local cmd="$2" - shift - header "sudo execution" - - echo "I am executing:" - echo "" - printf " $ sudo %s\n" "$cmd" - echo "" - echo "$expl" - echo "" - - return 0 -} +poly_configure_nix_daemon_service() { + _sudo "to set up the nix-daemon as a LaunchDaemon" \ + ln -sfn "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST" -_sudo() { - local expl="$1" - shift - if ! headless; then - __sudo "$expl" "$*" - fi - sudo "$@" -} + _sudo "to load the LaunchDaemon plist for nix-daemon" \ + launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist + _sudo "to start the nix-daemon" \ + launchctl start org.nixos.nix-daemon -readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX) -function finish_cleanup { - rm -rf "$SCRATCH" } -function finish_fail { - finish_cleanup - - failure <<EOF -Jeeze, something went wrong. If you can take all the output and open -an issue, we'd love to fix the problem so nobody else has this issue. - -:( -EOF +poly_group_exists() { + /usr/bin/dscl . -read "/Groups/$1" > /dev/null 2>&1 } -trap finish_fail EXIT - -function finish_success { - finish_cleanup - ok "Alright! We're done!" - cat <<EOF - -Before Nix will work in your existing shells, you'll need to close -them and open them again. Other than that, you should be ready to go. - -Try it! Open a new terminal, and type: - - $ nix-shell -p figlet -p lolcat --run "echo 'nix rules' | figlet | lolcat" - -Thank you for using this installer. If you have any feedback, don't -hesitate: - -$(contactme) -EOF +poly_group_id_get() { + dsclattr "/Groups/$1" "PrimaryGroupID" } - -validate_starting_assumptions() { - if [ "$(uname -s)" != "Darwin" ]; then - failure "This script is for use with macOS!" - fi - - if [ $EUID -eq 0 ]; then - failure <<EOF -Please do not run this script with root privileges. We will call sudo -when we need to. -EOF - fi - - if type nix-env 2> /dev/null >&2; then - failure <<EOF -Nix already appears to be installed, and this tool assumes it is -_not_ yet installed. - -$(uninstall_directions) -EOF - fi - - if [ "${NIX_REMOTE:-}" != "" ]; then - failure <<EOF -For some reason, \$NIX_REMOTE is set. It really should not be set -before this installer runs, and it hints that Nix is currently -installed. Please delete the old Nix installation and start again. - -Note: You might need to close your shell window and open a new shell -to clear the variable. -EOF - fi - - if echo "${SSL_CERT_FILE:-}" | grep -qE "(nix/var/nix|nix-profile)"; then - failure <<EOF -It looks like \$SSL_CERT_FILE is set to a path that used to be part of -the old Nix installation. Please unset that variable and try again: - - $ unset SSL_CERT_FILE - -EOF - fi - - for file in ~/.bash_profile ~/.bash_login ~/.profile ~/.zshenv ~/.zprofile ~/.zshrc ~/.zlogin; do - if [ -f "$file" ]; then - if grep -l "^[^#].*.nix-profile" "$file"; then - failure <<EOF -I found a reference to a ".nix-profile" in $file. -This has a high chance of breaking a new nix installation. It was most -likely put there by a previous Nix installer. - -Please remove this reference and try running this again. You should -also look for similar references in: - - - ~/.bash_profile - - ~/.bash_login - - ~/.profile - -or other shell init files that you may have. - -$(uninstall_directions) -EOF - fi - fi - done - - if [ -d /nix ]; then - failure <<EOF -There are some relics of a previous installation of Nix at /nix, and -this scripts assumes Nix is _not_ yet installed. Please delete the old -Nix installation and start again. - -$(uninstall_directions) -EOF - fi - - if [ -d /etc/nix ]; then - failure <<EOF -There are some relics of a previous installation of Nix at /etc/nix, and -this scripts assumes Nix is _not_ yet installed. Please delete the old -Nix installation and start again. - -$(uninstall_directions) -EOF - fi - - for profile_target in "${PROFILE_TARGETS[@]}"; do - if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then - failure <<EOF -When this script runs, it backs up the current $profile_target to -$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though. - -Please follow these instructions to clean up the old backup file: - -1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just -in case. - -2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like -it has anything nix-related in it. If it does, something is probably -quite wrong. Please open an issue or get in touch immediately. - -3. Take care to make sure that $profile_target doesn't look like it has -anything nix-related in it. If it does, and $profile_target _did not_, -run: - - $ /usr/bin/sudo /bin/mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target - -and try again. -EOF - fi - - if grep -qi "nix" "$profile_target"; then - failure <<EOF -It looks like $profile_target already has some Nix configuration in -there. There should be no reason to run this again. If you're having -trouble, please open an issue. -EOF - fi - done - - danger_paths=("$ROOT_HOME/.nix-defexpr" "$ROOT_HOME/.nix-channels" "$ROOT_HOME/.nix-profile") - for danger_path in "${danger_paths[@]}"; do - if _sudo "making sure that $danger_path doesn't exist" \ - test -e "$danger_path"; then - failure <<EOF -I found a file at $danger_path, which is a relic of a previous -installation. You must first delete this file before continuing. - -$(uninstall_directions) -EOF - fi - done +poly_create_build_group() { + _sudo "Create the Nix build group, $NIX_BUILD_GROUP_NAME" \ + /usr/sbin/dseditgroup -o create \ + -r "Nix build group for nix-daemon" \ + -i "$NIX_BUILD_GROUP_ID" \ + "$NIX_BUILD_GROUP_NAME" >&2 } -setup_report() { - header "hardware report" - row " Cores" "$CORES" - - header "Nix config report" - row " Temp Dir" "$SCRATCH" - row " Nix Root" "$NIX_ROOT" - row " Build Users" "$NIX_USER_COUNT" - row " Build Group ID" "$NIX_BUILD_GROUP_ID" - row "Build Group Name" "$NIX_BUILD_GROUP_NAME" - if [ "${ALLOW_PREEXISTING_INSTALLATION:-}" != "" ]; then - row "Preexisting Install" "Allowed" - fi - - subheader "build users:" - - row " Username" "UID" - for i in $(seq 1 "$NIX_USER_COUNT"); do - row " $(nix_user_for_core "$i")" "$(nix_uid_for_core "$i")" - done - echo "" +poly_user_exists() { + /usr/bin/dscl . -read "/Users/$1" > /dev/null 2>&1 } -create_build_group() { - local primary_group_id - - task "Setting up the build group $NIX_BUILD_GROUP_NAME" - if ! /usr/bin/dscl . -read "/Groups/$NIX_BUILD_GROUP_NAME" > /dev/null 2>&1; then - _sudo "Create the Nix build group, $NIX_BUILD_GROUP_NAME" \ - /usr/sbin/dseditgroup -o create \ - -r "Nix build group for nix-daemon" \ - -i "$NIX_BUILD_GROUP_ID" \ - "$NIX_BUILD_GROUP_NAME" >&2 - row " Created" "Yes" - else - primary_group_id=$(dsclattr "/Groups/$NIX_BUILD_GROUP_NAME" "PrimaryGroupID") - if [ "$primary_group_id" -ne "$NIX_BUILD_GROUP_ID" ]; then - failure <<EOF -It seems the build group $NIX_BUILD_GROUP_NAME already exists, but -with the UID $primary_group_id. This script can't really handle -that right now, so I'm going to give up. - -You can fix this by editing this script and changing the -NIX_BUILD_GROUP_ID variable near the top to from $NIX_BUILD_GROUP_ID -to $primary_group_id and re-run. -EOF - else - row " Exists" "Yes" - fi - fi +poly_user_id_get() { + dsclattr "/Users/$1" "UniqueID" } -create_build_user_for_core() { - local coreid - local username - local uid - - coreid="$1" - username=$(nix_user_for_core "$coreid") - uid=$(nix_uid_for_core "$coreid") - dsclpath="/Users/$username" - - task "Setting up the build user $username" - - if ! /usr/bin/dscl . -read "$dsclpath" > /dev/null 2>&1; then - _sudo "Creating the Nix build user, $username" \ - /usr/sbin/sysadminctl -addUser -fullName "Nix build user $coreid" \ - -home /var/empty \ - -UID "${uid}" \ - -addUser "${username}" - row " Created" "Yes" - else - actual_uid=$(dsclattr "$dsclpath" "UniqueID") - if [ "$actual_uid" -ne "$uid" ]; then - failure <<EOF -It seems the build user $username already exists, but with the UID -with the UID $actual_uid. This script can't really handle that right -now, so I'm going to give up. - -If you already created the users and you know they start from -$actual_uid and go up from there, you can edit this script and change -NIX_FIRST_BUILD_UID near the top of the file to $actual_uid and try -again. -EOF - else - row " Exists" "Yes" - fi - fi - - if [ "$(dsclattr "$dsclpath" "IsHidden")" = "1" ]; then - row " IsHidden" "Yes" - else - _sudo "in order to make $username a hidden user" \ - /usr/bin/dscl . -create "$dsclpath" "IsHidden" "1" - row " IsHidden" "Yes" - fi - - if [ "$(dsclattr "$dsclpath" "UserShell")" = "/sbin/nologin" ]; then - row " Logins Disabled" "Yes" - else - _sudo "in order to prevent $username from logging in" \ - /usr/bin/dscl . -create "$dsclpath" "UserShell" "/sbin/nologin" - row " Logins Disabled" "Yes" - fi - - if dseditgroup -o checkmember -m "$username" "$NIX_BUILD_GROUP_NAME" > /dev/null 2>&1 ; then - row " Member of $NIX_BUILD_GROUP_NAME" "Yes" - else - _sudo "Add $username to the $NIX_BUILD_GROUP_NAME group"\ - /usr/sbin/dseditgroup -o edit -t user \ - -a "$username" "$NIX_BUILD_GROUP_NAME" - row " Member of $NIX_BUILD_GROUP_NAME" "Yes" - fi - - if [ "$(dsclattr "$dsclpath" "PrimaryGroupId")" = "$NIX_BUILD_GROUP_ID" ]; then - row " PrimaryGroupID" "$NIX_BUILD_GROUP_ID" - else - _sudo "to let the nix daemon use this user for builds (this might seem redundant, but there are two concepts of group membership)" \ - /usr/bin/dscl . -create "$dsclpath" "PrimaryGroupId" "$NIX_BUILD_GROUP_ID" - row " PrimaryGroupID" "$NIX_BUILD_GROUP_ID" - - fi +poly_user_hidden_get() { + dsclattr "/Users/$1" "IsHidden" } -create_build_users() { - for i in $(seq 1 "$NIX_USER_COUNT"); do - create_build_user_for_core "$i" - done +poly_user_hidden_set() { + _sudo "in order to make $1 a hidden user" \ + /usr/bin/dscl . -create "/Users/$1" "IsHidden" "1" } -create_directories() { - _sudo "to make the basic directory structure of Nix (part 1)" \ - mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} - - _sudo "to make the basic directory structure of Nix (part 2)" \ - mkdir -pv -m 1777 /nix/var/nix/{gcroots,profiles}/per-user - - _sudo "to make the basic directory structure of Nix (part 3)" \ - mkdir -pv -m 1775 /nix/store - - _sudo "to make the basic directory structure of Nix (part 4)" \ - chgrp "$NIX_BUILD_GROUP_NAME" /nix/store - - _sudo "to set up the root user's profile (part 1)" \ - mkdir -pv -m 0755 /nix/var/nix/profiles/per-user/root - - _sudo "to set up the root user's profile (part 2)" \ - mkdir -pv -m 0700 "$ROOT_HOME/.nix-defexpr" - - _sudo "to place the default nix daemon configuration (part 1)" \ - mkdir -pv -m 0555 /etc/nix +poly_user_home_get() { + dsclattr "/Users/$1" "NFSHomeDirectory" } -place_channel_configuration() { - echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels" - _sudo "to set up the default system channel (part 1)" \ - install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels" +poly_user_home_set() { + _sudo "in order to give $1 a safe home directory" \ + /usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2" } -welcome_to_nix() { - ok "Welcome to the Multi-User Nix Installation" - - cat <<EOF - -This installation tool will set up your computer with the Nix package -manager. This will happen in a few stages: - -1. Make sure your computer doesn't already have Nix. If it does, I - will show you instructions on how to clean up your old one. - -2. Show you what we are going to install and where. Then we will ask - if you are ready to continue. - -3. Create the system users and groups that the Nix daemon uses to run - builds. - -4. Perform the basic installation of the Nix files daemon. - -5. Configure your shell to import special Nix Profile files, so you - can use Nix. - -6. Start the Nix daemon. - -EOF - - if ui_confirm "Would you like to see a more detailed list of what we will do?"; then - cat <<EOF - -We will: - - - make sure your computer doesn't already have Nix files - (if it does, I will tell you how to clean them up.) - - create local users (see the list above for the users we'll make) - - create a local group ($NIX_BUILD_GROUP_NAME) - - install Nix in to $NIX_ROOT - - create a configuration file in /etc/nix - - set up the "default profile" by creating some Nix-related files in - $ROOT_HOME -EOF - for profile_target in "${PROFILE_TARGETS[@]}"; do - if [ -e "$profile_target" ]; then - cat <<EOF - - back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX - - update $profile_target to include some Nix configuration -EOF - fi - done - cat <<EOF - - load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon - -EOF - if ! ui_confirm "Ready to continue?"; then - failure <<EOF -Okay, maybe you would like to talk to the team. -EOF - fi - fi +poly_user_note_get() { + dsclattr "/Users/$1" "RealName" } -chat_about_sudo() { - header "let's talk about sudo" - - if headless; then - cat <<EOF -This script is going to call sudo a lot. Normally, it would show you -exactly what commands it is running and why. However, the script is -run in a headless fashion, like this: - - $ curl https://nixos.org/nix/install | sh - -or maybe in a CI pipeline. Because of that, we're going to skip the -verbose output in the interest of brevity. - -If you would like to -see the output, try like this: - - $ curl -o install-nix https://nixos.org/nix/install - $ sh ./install-nix - -EOF - return 0 - fi - - cat <<EOF -This script is going to call sudo a lot. Every time we do, it'll -output exactly what it'll do, and why. - -Just like this: -EOF - - __sudo "to demonstrate how our sudo prompts look" \ - echo "this is a sudo prompt" - - cat <<EOF - -This might look scary, but everything can be undone by running just a -few commands. We used to ask you to confirm each time sudo ran, but it -was too many times. Instead, I'll just ask you this one time: - -EOF - if ui_confirm "Can we use sudo?"; then - ok "Yay! Thanks! Let's get going!" - else - failure <<EOF -That is okay, but we can't install. -EOF - fi +poly_user_note_set() { + _sudo "in order to give $username a useful note" \ + /usr/bin/dscl . -create "/Users/$1" "RealName" "$2" } -install_from_extracted_nix() { - ( - cd "$EXTRACTED_NIX_PATH" - - _sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \ - rsync -rlpt "$(pwd)/store/" "$NIX_ROOT/store/" - - if [ -d "$NIX_INSTALLED_NIX" ]; then - echo " Alright! We have our first nix at $NIX_INSTALLED_NIX" - else - failure <<EOF -Something went wrong, and I didn't find Nix installed at -$NIX_INSTALLED_NIX. -EOF - fi - - _sudo "to initialize the Nix Database" \ - $NIX_INSTALLED_NIX/bin/nix-store --init - - cat ./.reginfo \ - | _sudo "to load data for the first time in to the Nix Database" \ - "$NIX_INSTALLED_NIX/bin/nix-store" --load-db - - echo " Just finished getting the nix database ready." - ) +poly_user_shell_get() { + dsclattr "/Users/$1" "UserShell" } -shell_source_lines() { - cat <<EOF - -# Nix -if [ -e '$PROFILE_NIX_FILE' ]; then - . '$PROFILE_NIX_FILE' -fi -# End Nix - -EOF +poly_user_shell_set() { + _sudo "in order to give $1 a safe home directory" \ + /usr/bin/dscl . -create "/Users/$1" "UserShell" "$2" } -configure_shell_profile() { - for profile_target in "${PROFILE_TARGETS[@]}"; do - if [ -e "$profile_target" ]; then - _sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \ - cp "$profile_target" "$profile_target$PROFILE_BACKUP_SUFFIX" - - shell_source_lines \ - | _sudo "extend your $profile_target with nix-daemon settings" \ - tee -a "$profile_target" - fi - done +poly_user_in_group_check() { + username=$1 + group=$2 + dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1 } -setup_default_profile() { - _sudo "to installing a bootstrapping Nix in to the default Profile" \ - HOME=$ROOT_HOME "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX" - - _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \ - HOME=$ROOT_HOME "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT" +poly_user_in_group_set() { + username=$1 + group=$2 - _sudo "to update the default channel in the default profile" \ - HOME=$ROOT_HOME NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs + _sudo "Add $username to the $group group"\ + /usr/sbin/dseditgroup -o edit -t user \ + -a "$username" "$group" } - -place_nix_configuration() { - cat <<EOF > "$SCRATCH/nix.conf" -build-users-group = $NIX_BUILD_GROUP_NAME - -max-jobs = $NIX_USER_COUNT -cores = 1 -sandbox = false - -binary-caches = https://cache.nixos.org/ -trusted-binary-caches = -binary-cache-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= -signed-binary-caches = * - -trusted-users = root -allowed-users = * -EOF - _sudo "to place the default nix daemon configuration (part 2)" \ - install -m 0664 "$SCRATCH/nix.conf" /etc/nix/nix.conf +poly_user_primary_group_get() { + dsclattr "/Users/$1" "PrimaryGroupID" } -configure_nix_daemon_plist() { - _sudo "to set up the nix-daemon as a LaunchDaemon" \ - ln -sfn "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST" - - _sudo "to load the LaunchDaemon plist for nix-daemon" \ - launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist - - _sudo "to start the nix-daemon" \ - launchctl start org.nixos.nix-daemon - +poly_user_primary_group_set() { + _sudo "to let the nix daemon use this user for builds (this might seem redundant, but there are two concepts of group membership)" \ + /usr/bin/dscl . -create "/Users/$1" "PrimaryGroupID" "$2" } +poly_create_build_user() { + username=$1 + uid=$2 + builder_num=$3 -main() { - welcome_to_nix - chat_about_sudo - - if [ "${ALLOW_PREEXISTING_INSTALLATION:-}" = "" ]; then - validate_starting_assumptions - fi - - setup_report - - if ! ui_confirm "Ready to continue?"; then - ok "Alright, no changes have been made :)" - contactme - trap finish_cleanup EXIT - exit 1 - fi - - create_build_group - create_build_users - create_directories - place_channel_configuration - install_from_extracted_nix - - configure_shell_profile - - set +eu - . /etc/profile - set -eu - - setup_default_profile - place_nix_configuration - configure_nix_daemon_plist - - trap finish_success EXIT + _sudo "Creating the Nix build user (#$builder_num), $username" \ + /usr/bin/dscl . create "/Users/$username" \ + UniqueID "${uid}" } - - -main diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh new file mode 100644 index 000000000000..5f6542355e0c --- /dev/null +++ b/scripts/install-multi-user.sh @@ -0,0 +1,797 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + +# Sourced from: +# - https://github.com/LnL7/nix-darwin/blob/8c29d0985d74b4a990238497c47a2542a5616b3c/bootstrap.sh +# - https://gist.github.com/expipiplus1/e571ce88c608a1e83547c918591b149f/ac504c6c1b96e65505fbda437a28ce563408ecb0 +# - https://github.com/NixOS/nixos-org-configurations/blob/a122f418797713d519aadf02e677fce0dc1cb446/delft/scripts/nix-mac-installer.sh +# - https://github.com/matthewbauer/macNixOS/blob/f6045394f9153edea417be90c216788e754feaba/install-macNixOS.sh +# - https://gist.github.com/LnL7/9717bd6cdcb30b086fd7f2093e5f8494/86b26f852ce563e973acd30f796a9a416248c34a +# +# however tracking which bits came from which would be impossible. + +readonly ESC='\033[0m' +readonly BOLD='\033[38;1m' +readonly BLUE='\033[38;34m' +readonly BLUE_UL='\033[38;4;34m' +readonly GREEN='\033[38;32m' +readonly GREEN_UL='\033[38;4;32m' +readonly RED='\033[38;31m' +readonly RED_UL='\033[38;4;31m' +readonly YELLOW='\033[38;33m' +readonly YELLOW_UL='\033[38;4;33m' + +readonly NIX_USER_COUNT="32" +readonly NIX_BUILD_GROUP_ID="30000" +readonly NIX_BUILD_GROUP_NAME="nixbld" +readonly NIX_FIRST_BUILD_UID="30001" +# Please don't change this. We don't support it, because the +# default shell profile that comes with Nix doesn't support it. +readonly NIX_ROOT="/nix" + +readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc") +readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" +readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" + +readonly NIX_INSTALLED_NIX="@nix@" +readonly NIX_INSTALLED_CACERT="@cacert@" +readonly EXTRACTED_NIX_PATH="$(dirname "$0")" + +readonly ROOT_HOME=$(echo ~root) + +if [ -t 0 ]; then + readonly IS_HEADLESS='no' +else + readonly IS_HEADLESS='yes' +fi + +headless() { + if [ "$IS_HEADLESS" = "yes" ]; then + return 0 + else + return 1 + fi +} + +contactme() { + echo "We'd love to help if you need it." + echo "" + echo "If you can, open an issue at https://github.com/nixos/nix/issues" + echo "" + echo "Or feel free to contact the team," + echo " - on IRC #nixos on irc.freenode.net" + echo " - on twitter @nixos_org" +} + +uninstall_directions() { + subheader "Uninstalling nix:" + local step=0 + + if poly_service_installed_check; then + step=$((step + 1)) + poly_service_uninstall_directions "$step" + fi + + for profile_target in "${PROFILE_TARGETS[@]}"; do + if [ -e "$profile_target" ] && [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then + step=$((step + 1)) + cat <<EOF +$step. Restore $profile_target$PROFILE_BACKUP_SUFFIX back to $profile_target + + sudo mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target + +(after this one, you may need to re-open any terminals that were +opened while it existed.) + +EOF + fi + done + + step=$((step + 1)) + cat <<EOF +$step. Delete the files Nix added to your system: + + sudo rm -rf /etc/nix $NIX_ROOT $ROOT_HOME/.nix-profile $ROOT_HOME/.nix-defexpr $ROOT_HOME/.nix-channels $HOME/.nix-profile $HOME/.nix-defexpr $HOME/.nix-channels + +and that is it. + +EOF + +} + +nix_user_for_core() { + printf "nixbld%d" "$1" +} + +nix_uid_for_core() { + echo $((NIX_FIRST_BUILD_UID + $1 - 1)) +} + +_textout() { + echo -en "$1" + shift + if [ "$*" = "" ]; then + cat + else + echo "$@" + fi + echo -en "$ESC" +} + +header() { + follow="---------------------------------------------------------" + header=$(echo "---- $* $follow$follow$follow" | head -c 80) + echo "" + _textout "$BLUE" "$header" +} + +warningheader() { + follow="---------------------------------------------------------" + header=$(echo "---- $* $follow$follow$follow" | head -c 80) + echo "" + _textout "$RED" "$header" +} + +subheader() { + echo "" + _textout "$BLUE_UL" "$*" +} + +row() { + printf "$BOLD%s$ESC:\\t%s\\n" "$1" "$2" +} + +task() { + echo "" + ok "~~> $1" +} + +bold() { + echo "$BOLD$*$ESC" +} + +ok() { + _textout "$GREEN" "$@" +} + +warning() { + warningheader "warning!" + cat + echo "" +} + +failure() { + header "oh no!" + _textout "$RED" "$@" + echo "" + _textout "$RED" "$(contactme)" + trap finish_cleanup EXIT + exit 1 +} + +ui_confirm() { + _textout "$GREEN$GREEN_UL" "$1" + + if headless; then + echo "No TTY, assuming you would say yes :)" + return 0 + fi + + local prompt="[y/n] " + echo -n "$prompt" + while read -r y; do + if [ "$y" = "y" ]; then + echo "" + return 0 + elif [ "$y" = "n" ]; then + echo "" + return 1 + else + _textout "$RED" "Sorry, I didn't understand. I can only understand answers of y or n" + echo -n "$prompt" + fi + done + echo "" + return 1 +} + +__sudo() { + local expl="$1" + local cmd="$2" + shift + header "sudo execution" + + echo "I am executing:" + echo "" + printf " $ sudo %s\\n" "$cmd" + echo "" + echo "$expl" + echo "" + + return 0 +} + +_sudo() { + local expl="$1" + shift + if ! headless; then + __sudo "$expl" "$*" + fi + sudo "$@" +} + + +readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX) +function finish_cleanup { + rm -rf "$SCRATCH" +} + +function finish_fail { + finish_cleanup + + failure <<EOF +Jeeze, something went wrong. If you can take all the output and open +an issue, we'd love to fix the problem so nobody else has this issue. + +:( +EOF +} +trap finish_fail EXIT + +function finish_success { + finish_cleanup + + ok "Alright! We're done!" + cat <<EOF + +Before Nix will work in your existing shells, you'll need to close +them and open them again. Other than that, you should be ready to go. + +Try it! Open a new terminal, and type: + + $ nix-shell -p nix-info --run "nix-info -m" + +Thank you for using this installer. If you have any feedback, don't +hesitate: + +$(contactme) +EOF +} + + +validate_starting_assumptions() { + poly_validate_assumptions + + if [ $EUID -eq 0 ]; then + failure <<EOF +Please do not run this script with root privileges. We will call sudo +when we need to. +EOF + fi + + if type nix-env 2> /dev/null >&2; then + failure <<EOF +Nix already appears to be installed, and this tool assumes it is +_not_ yet installed. + +$(uninstall_directions) +EOF + fi + + if [ "${NIX_REMOTE:-}" != "" ]; then + failure <<EOF +For some reason, \$NIX_REMOTE is set. It really should not be set +before this installer runs, and it hints that Nix is currently +installed. Please delete the old Nix installation and start again. + +Note: You might need to close your shell window and open a new shell +to clear the variable. +EOF + fi + + if echo "${SSL_CERT_FILE:-}" | grep -qE "(nix/var/nix|nix-profile)"; then + failure <<EOF +It looks like \$SSL_CERT_FILE is set to a path that used to be part of +the old Nix installation. Please unset that variable and try again: + + $ unset SSL_CERT_FILE + +EOF + fi + + for file in ~/.bash_profile ~/.bash_login ~/.profile ~/.zshenv ~/.zprofile ~/.zshrc ~/.zlogin; do + if [ -f "$file" ]; then + if grep -l "^[^#].*.nix-profile" "$file"; then + failure <<EOF +I found a reference to a ".nix-profile" in $file. +This has a high chance of breaking a new nix installation. It was most +likely put there by a previous Nix installer. + +Please remove this reference and try running this again. You should +also look for similar references in: + + - ~/.bash_profile + - ~/.bash_login + - ~/.profile + +or other shell init files that you may have. + +$(uninstall_directions) +EOF + fi + fi + done + + if [ -d /nix ]; then + failure <<EOF +There are some relics of a previous installation of Nix at /nix, and +this scripts assumes Nix is _not_ yet installed. Please delete the old +Nix installation and start again. + +$(uninstall_directions) +EOF + fi + + if [ -d /etc/nix ]; then + failure <<EOF +There are some relics of a previous installation of Nix at /etc/nix, and +this scripts assumes Nix is _not_ yet installed. Please delete the old +Nix installation and start again. + +$(uninstall_directions) +EOF + fi + + for profile_target in "${PROFILE_TARGETS[@]}"; do + if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then + failure <<EOF +When this script runs, it backs up the current $profile_target to +$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though. + +Please follow these instructions to clean up the old backup file: + +1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just +in case. + +2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like +it has anything nix-related in it. If it does, something is probably +quite wrong. Please open an issue or get in touch immediately. + +3. Take care to make sure that $profile_target doesn't look like it has +anything nix-related in it. If it does, and $profile_target _did not_, +run: + + $ /usr/bin/sudo /bin/mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target + +and try again. +EOF + fi + + if [ -e "$profile_target" ] && grep -qi "nix" "$profile_target"; then + failure <<EOF +It looks like $profile_target already has some Nix configuration in +there. There should be no reason to run this again. If you're having +trouble, please open an issue. +EOF + fi + done + + danger_paths=("$ROOT_HOME/.nix-defexpr" "$ROOT_HOME/.nix-channels" "$ROOT_HOME/.nix-profile") + for danger_path in "${danger_paths[@]}"; do + if _sudo "making sure that $danger_path doesn't exist" \ + test -e "$danger_path"; then + failure <<EOF +I found a file at $danger_path, which is a relic of a previous +installation. You must first delete this file before continuing. + +$(uninstall_directions) +EOF + fi + done +} + +setup_report() { + header "Nix config report" + row " Temp Dir" "$SCRATCH" + row " Nix Root" "$NIX_ROOT" + row " Build Users" "$NIX_USER_COUNT" + row " Build Group ID" "$NIX_BUILD_GROUP_ID" + row "Build Group Name" "$NIX_BUILD_GROUP_NAME" + if [ "${ALLOW_PREEXISTING_INSTALLATION:-}" != "" ]; then + row "Preexisting Install" "Allowed" + fi + + subheader "build users:" + + row " Username" "UID" + for i in $(seq 1 "$NIX_USER_COUNT"); do + row " $(nix_user_for_core "$i")" "$(nix_uid_for_core "$i")" + done + echo "" +} + +create_build_group() { + local primary_group_id + + task "Setting up the build group $NIX_BUILD_GROUP_NAME" + if ! poly_group_exists "$NIX_BUILD_GROUP_NAME"; then + poly_create_build_group + row " Created" "Yes" + else + primary_group_id=$(poly_group_id_get "$NIX_BUILD_GROUP_NAME") + if [ "$primary_group_id" -ne "$NIX_BUILD_GROUP_ID" ]; then + failure <<EOF +It seems the build group $NIX_BUILD_GROUP_NAME already exists, but +with the UID $primary_group_id. This script can't really handle +that right now, so I'm going to give up. + +You can fix this by editing this script and changing the +NIX_BUILD_GROUP_ID variable near the top to from $NIX_BUILD_GROUP_ID +to $primary_group_id and re-run. +EOF + else + row " Exists" "Yes" + fi + fi +} + +create_build_user_for_core() { + local coreid + local username + local uid + + coreid="$1" + username=$(nix_user_for_core "$coreid") + uid=$(nix_uid_for_core "$coreid") + + task "Setting up the build user $username" + + if ! poly_user_exists "$username"; then + poly_create_build_user "$username" "$uid" "$coreid" + row " Created" "Yes" + else + actual_uid=$(poly_user_id_get "$username") + if [ "$actual_uid" != "$uid" ]; then + failure <<EOF +It seems the build user $username already exists, but with the UID +with the UID '$actual_uid'. This script can't really handle that right +now, so I'm going to give up. + +If you already created the users and you know they start from +$actual_uid and go up from there, you can edit this script and change +NIX_FIRST_BUILD_UID near the top of the file to $actual_uid and try +again. +EOF + else + row " Exists" "Yes" + fi + fi + + if [ "$(poly_user_hidden_get "$username")" = "1" ]; then + row " Hidden" "Yes" + else + poly_user_hidden_set "$username" + row " Hidden" "Yes" + fi + + if [ "$(poly_user_home_get "$username")" = "/var/empty" ]; then + row " Home Directory" "/var/empty" + else + poly_user_home_set "$username" "/var/empty" + row " Home Directory" "/var/empty" + fi + + # We use grep instead of an equality check because it is difficult + # to extract _just_ the user's note, instead it is prefixed with + # some plist junk. This was causing the user note to always be set, + # even if there was no reason for it. + if ! poly_user_note_get "$username" | grep -q "Nix build user $coreid"; then + row " Note" "Nix build user $coreid" + else + poly_user_note_set "$username" "Nix build user $coreid" + row " Note" "Nix build user $coreid" + fi + + if [ "$(poly_user_shell_get "$username")" = "/sbin/nologin" ]; then + row " Logins Disabled" "Yes" + else + poly_user_shell_set "$username" "/sbin/nologin" + row " Logins Disabled" "Yes" + fi + + if poly_user_in_group_check "$username" "$NIX_BUILD_GROUP_NAME"; then + row " Member of $NIX_BUILD_GROUP_NAME" "Yes" + else + poly_user_in_group_set "$username" "$NIX_BUILD_GROUP_NAME" + row " Member of $NIX_BUILD_GROUP_NAME" "Yes" + fi + + if [ "$(poly_user_primary_group_get "$username")" = "$NIX_BUILD_GROUP_ID" ]; then + row " PrimaryGroupID" "$NIX_BUILD_GROUP_ID" + else + poly_user_primary_group_set "$username" "$NIX_BUILD_GROUP_ID" + row " PrimaryGroupID" "$NIX_BUILD_GROUP_ID" + fi +} + +create_build_users() { + for i in $(seq 1 "$NIX_USER_COUNT"); do + create_build_user_for_core "$i" + done +} + +create_directories() { + _sudo "to make the basic directory structure of Nix (part 1)" \ + mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} + + _sudo "to make the basic directory structure of Nix (part 2)" \ + mkdir -pv -m 1777 /nix/var/nix/{gcroots,profiles}/per-user + + _sudo "to make the basic directory structure of Nix (part 3)" \ + mkdir -pv -m 1775 /nix/store + + _sudo "to make the basic directory structure of Nix (part 4)" \ + chgrp "$NIX_BUILD_GROUP_NAME" /nix/store + + _sudo "to set up the root user's profile (part 1)" \ + mkdir -pv -m 0755 /nix/var/nix/profiles/per-user/root + + _sudo "to set up the root user's profile (part 2)" \ + mkdir -pv -m 0700 "$ROOT_HOME/.nix-defexpr" + + _sudo "to place the default nix daemon configuration (part 1)" \ + mkdir -pv -m 0555 /etc/nix +} + +place_channel_configuration() { + echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels" + _sudo "to set up the default system channel (part 1)" \ + install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels" +} + +welcome_to_nix() { + ok "Welcome to the Multi-User Nix Installation" + + cat <<EOF + +This installation tool will set up your computer with the Nix package +manager. This will happen in a few stages: + +1. Make sure your computer doesn't already have Nix. If it does, I + will show you instructions on how to clean up your old one. + +2. Show you what we are going to install and where. Then we will ask + if you are ready to continue. + +3. Create the system users and groups that the Nix daemon uses to run + builds. + +4. Perform the basic installation of the Nix files daemon. + +5. Configure your shell to import special Nix Profile files, so you + can use Nix. + +6. Start the Nix daemon. + +EOF + + if ui_confirm "Would you like to see a more detailed list of what we will do?"; then + cat <<EOF + +We will: + + - make sure your computer doesn't already have Nix files + (if it does, I will tell you how to clean them up.) + - create local users (see the list above for the users we'll make) + - create a local group ($NIX_BUILD_GROUP_NAME) + - install Nix in to $NIX_ROOT + - create a configuration file in /etc/nix + - set up the "default profile" by creating some Nix-related files in + $ROOT_HOME +EOF + for profile_target in "${PROFILE_TARGETS[@]}"; do + if [ -e "$profile_target" ]; then + cat <<EOF + - back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX + - update $profile_target to include some Nix configuration +EOF + fi + done + poly_service_setup_note + if ! ui_confirm "Ready to continue?"; then + failure <<EOF +Okay, maybe you would like to talk to the team. +EOF + fi + fi +} + +chat_about_sudo() { + header "let's talk about sudo" + + if headless; then + cat <<EOF +This script is going to call sudo a lot. Normally, it would show you +exactly what commands it is running and why. However, the script is +run in a headless fashion, like this: + + $ curl https://nixos.org/nix/install | sh + +or maybe in a CI pipeline. Because of that, we're going to skip the +verbose output in the interest of brevity. + +If you would like to +see the output, try like this: + + $ curl -o install-nix https://nixos.org/nix/install + $ sh ./install-nix + +EOF + return 0 + fi + + cat <<EOF +This script is going to call sudo a lot. Every time we do, it'll +output exactly what it'll do, and why. + +Just like this: +EOF + + __sudo "to demonstrate how our sudo prompts look" \ + echo "this is a sudo prompt" + + cat <<EOF + +This might look scary, but everything can be undone by running just a +few commands. We used to ask you to confirm each time sudo ran, but it +was too many times. Instead, I'll just ask you this one time: + +EOF + if ui_confirm "Can we use sudo?"; then + ok "Yay! Thanks! Let's get going!" + else + failure <<EOF +That is okay, but we can't install. +EOF + fi +} + +install_from_extracted_nix() { + ( + cd "$EXTRACTED_NIX_PATH" + + _sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \ + rsync -rlpt ./store/* "$NIX_ROOT/store/" + + if [ -d "$NIX_INSTALLED_NIX" ]; then + echo " Alright! We have our first nix at $NIX_INSTALLED_NIX" + else + failure <<EOF +Something went wrong, and I didn't find Nix installed at +$NIX_INSTALLED_NIX. +EOF + fi + + _sudo "to initialize the Nix Database" \ + $NIX_INSTALLED_NIX/bin/nix-store --init + + cat ./.reginfo \ + | _sudo "to load data for the first time in to the Nix Database" \ + "$NIX_INSTALLED_NIX/bin/nix-store" --load-db + + echo " Just finished getting the nix database ready." + ) +} + +shell_source_lines() { + cat <<EOF + +# Nix +if [ -e '$PROFILE_NIX_FILE' ]; then + . '$PROFILE_NIX_FILE' +fi +# End Nix + +EOF +} + +configure_shell_profile() { + # If there is an /etc/profile.d directory, we want to ensure there + # is a nix.sh within it, so we can use the following loop to add + # the source lines to it. Note that I'm _not_ adding the source + # lines here, because we want to be using the regular machinery. + # + # If we go around that machinery, it becomes more complicated and + # adds complications to the uninstall instruction generator and + # old instruction sniffer as well. + if [ -d /etc/profile.d ]; then + _sudo "create a stub /etc/profile.d/nix.sh which will be updated" \ + touch /etc/profile.d/nix.sh + fi + + for profile_target in "${PROFILE_TARGETS[@]}"; do + if [ -e "$profile_target" ]; then + _sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \ + cp "$profile_target" "$profile_target$PROFILE_BACKUP_SUFFIX" + + shell_source_lines \ + | _sudo "extend your $profile_target with nix-daemon settings" \ + tee -a "$profile_target" + fi + done +} + +setup_default_profile() { + _sudo "to installing a bootstrapping Nix in to the default Profile" \ + HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX" + + _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \ + HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT" + + _sudo "to update the default channel in the default profile" \ + HOME="$ROOT_HOME" NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs +} + + +place_nix_configuration() { + cat <<EOF > "$SCRATCH/nix.conf" +build-users-group = $NIX_BUILD_GROUP_NAME + +max-jobs = $NIX_USER_COUNT +cores = 1 +sandbox = false +EOF + _sudo "to place the default nix daemon configuration (part 2)" \ + install -m 0664 "$SCRATCH/nix.conf" /etc/nix/nix.conf +} + +main() { + if [ "$(uname -s)" = "Darwin" ]; then + # shellcheck source=./install-darwin-multi-user.sh + . "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh" + elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then + # shellcheck source=./install-systemd-multi-user.sh + . "$EXTRACTED_NIX_PATH/install-systemd-multi-user.sh" + else + failure "Sorry, I don't know what to do on $(uname)" + fi + + welcome_to_nix + chat_about_sudo + + if [ "${ALLOW_PREEXISTING_INSTALLATION:-}" = "" ]; then + validate_starting_assumptions + fi + + setup_report + + if ! ui_confirm "Ready to continue?"; then + ok "Alright, no changes have been made :)" + contactme + trap finish_cleanup EXIT + exit 1 + fi + + create_build_group + create_build_users + create_directories + place_channel_configuration + install_from_extracted_nix + + configure_shell_profile + + set +eu + . /etc/profile + set -eu + + setup_default_profile + place_nix_configuration + poly_configure_nix_daemon_service + + trap finish_success EXIT +} + + +main diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh index 3e5676f419ba..cd71d7947d77 100644 --- a/scripts/install-nix-from-closure.sh +++ b/scripts/install-nix-from-closure.sh @@ -28,9 +28,41 @@ if [ "$(uname -s)" = "Darwin" ]; then echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.10 or higher" exit 1 fi +fi + +# Determine if we should punt to the single-user installer or not +if [ "$(uname -s)" = "Darwin" ]; then + INSTALL_MODE=daemon +elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then + INSTALL_MODE=daemon +else + INSTALL_MODE=no-daemon +fi + +# Trivially handle the --daemon / --no-daemon options +if [ "x${1:-}" = "x--no-daemon" ]; then + INSTALL_MODE=no-daemon +elif [ "x${1:-}" = "x--daemon" ]; then + INSTALL_MODE=daemon +elif [ "x${1:-}" != "x" ]; then + ( + echo "Nix Installer [--daemon|--no-daemon]" + echo "" + echo " --daemon: Force the installer to use the Daemon" + echo " based installer, even though it may not" + echo " work." + echo "" + echo " --no-daemon: Force a no-daemon, single-user" + echo " installation even when the preferred" + echo " method is with the daemon." + echo "" + ) >&2 + exit +fi - printf '\e[1;31mSwitching to the Multi-User Darwin Installer\e[0m\n' - exec "$self/install-darwin-multi-user" +if [ "$INSTALL_MODE" = "daemon" ]; then + printf '\e[1;31mSwitching to the Daemon-based Installer\e[0m\n' + exec "$self/install-multi-user" exit 0 fi diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh new file mode 100644 index 000000000000..04bc539a1099 --- /dev/null +++ b/scripts/install-systemd-multi-user.sh @@ -0,0 +1,154 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + +readonly SERVICE_SRC=/lib/systemd/system/nix-daemon.service +readonly SERVICE_DEST=/etc/systemd/system/nix-daemon.service + +readonly SOCKET_SRC=/lib/systemd/system/nix-daemon.socket +readonly SOCKET_DEST=/etc/systemd/system/nix-daemon.socket + +poly_validate_assumptions() { + if [ "$(uname -s)" != "Linux" ]; then + failure "This script is for use with Linux!" + fi +} + +poly_service_installed_check() { + [ "$(systemctl is-enabled nix-daemon.service)" = "linked" ] \ + || [ "$(systemctl is-enabled nix-daemon.socket)" = "enabled" ] +} + +poly_service_uninstall_directions() { + cat <<EOF +$1. Delete the systemd service and socket units + + sudo systemctl stop nix-daemon.socket + sudo systemctl stop nix-daemon.service + sudo systemctl disable nix-daemon.socket + sudo systemctl disable nix-daemon.service + sudo systemctl daemon-reload +EOF +} + +poly_service_setup_note() { + cat <<EOF + - load and start a service (at $SERVICE_DEST + and $SOCKET_DEST) for nix-daemon + +EOF +} + +poly_configure_nix_daemon_service() { + _sudo "to set up the nix-daemon service" \ + systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC" + + _sudo "to set up the nix-daemon socket service" \ + systemctl enable "/nix/var/nix/profiles/default$SOCKET_SRC" + + _sudo "to load the systemd unit for nix-daemon" \ + systemctl daemon-reload + + _sudo "to start the nix-daemon.socket" \ + systemctl start nix-daemon.socket + + _sudo "to start the nix-daemon.service" \ + systemctl start nix-daemon.service + +} + +poly_group_exists() { + getent group "$1" > /dev/null 2>&1 +} + +poly_group_id_get() { + getent group "$1" | cut -d: -f3 +} + +poly_create_build_group() { + _sudo "Create the Nix build group, $NIX_BUILD_GROUP_NAME" \ + groupadd -g "$NIX_BUILD_GROUP_ID" --system \ + "$NIX_BUILD_GROUP_NAME" >&2 +} + +poly_user_exists() { + getent passwd "$1" > /dev/null 2>&1 +} + +poly_user_id_get() { + getent passwd "$1" | cut -d: -f3 +} + +poly_user_hidden_get() { + echo "1" +} + +poly_user_hidden_set() { + true +} + +poly_user_home_get() { + getent passwd "$1" | cut -d: -f6 +} + +poly_user_home_set() { + _sudo "in order to give $1 a safe home directory" \ + usermod --home "$2" "$1" +} + +poly_user_note_get() { + getent passwd "$1" | cut -d: -f5 +} + +poly_user_note_set() { + _sudo "in order to give $1 a useful comment" \ + usermod --comment "$2" "$1" +} + +poly_user_shell_get() { + getent passwd "$1" | cut -d: -f7 +} + +poly_user_shell_set() { + _sudo "in order to prevent $1 from logging in" \ + usermod --shell "$2" "$1" +} + +poly_user_in_group_check() { + groups "$1" | grep -q "$2" > /dev/null 2>&1 +} + +poly_user_in_group_set() { + _sudo "Add $1 to the $2 group"\ + usermod --append --groups "$2" "$1" +} + +poly_user_primary_group_get() { + getent passwd "$1" | cut -d: -f4 +} + +poly_user_primary_group_set() { + _sudo "to let the nix daemon use this user for builds (this might seem redundant, but there are two concepts of group membership)" \ + usermod --gid "$2" "$1" + +} + +poly_create_build_user() { + username=$1 + uid=$2 + builder_num=$3 + + _sudo "Creating the Nix build user, $username" \ + useradd \ + --home-dir /var/empty \ + --comment "Nix build user $builder_num" \ + --gid "$NIX_BUILD_GROUP_ID" \ + --groups "$NIX_BUILD_GROUP_NAME" \ + --no-user-group \ + --system \ + --shell /sbin/nologin \ + --uid "$uid" \ + --password "!" \ + "$username" +} diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index ab95c09c8305..a5f52274fc70 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -60,12 +60,6 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_USER_PROFILE_DIR" - for i in $NIX_PROFILES; do - if [ -d "$i/lib/aspell" ]; then - export ASPELL_CONF="dict-dir $i/lib/aspell" - fi - done - # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt @@ -81,7 +75,7 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt" fi - if [ -n ${MANPATH} ]; then + if [ -n "${MANPATH}" ]; then export MANPATH="$NIX_LINK/share/man:$MANPATH" fi diff --git a/shell.nix b/shell.nix index f47952582fce..c04bcd151309 100644 --- a/shell.nix +++ b/shell.nix @@ -1,34 +1,13 @@ { useClang ? false }: -with import <nixpkgs> {}; +with import (builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.03"; }) {}; with import ./release-common.nix { inherit pkgs; }; (if useClang then clangStdenv else stdenv).mkDerivation { name = "nix"; - buildInputs = - [ curl bison flex libxml2 libxslt - bzip2 xz brotli - pkgconfig sqlite libsodium boehmgc - docbook5 docbook5_xsl - autoconf-archive - (aws-sdk-cpp.override { - apis = ["s3"]; - customMemoryManagement = false; - }) - autoreconfHook - nlohmann_json - - # For nix-perl - perl - perlPackages.DBDSQLite - - # Tests - git - mercurial - ] - ++ lib.optional stdenv.isLinux libseccomp; + buildInputs = buildDeps ++ tarballDeps ++ perlDeps; inherit configureFlags; diff --git a/src/boost/assert.hpp b/src/boost/assert.hpp deleted file mode 100644 index 754ebb954bce..000000000000 --- a/src/boost/assert.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// -// boost/assert.hpp - BOOST_ASSERT(expr) -// -// Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd. -// -// Permission to copy, use, modify, sell and distribute this software -// is granted provided this copyright notice appears in all copies. -// This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. -// -// Note: There are no include guards. This is intentional. -// -// See http://www.boost.org/libs/utility/assert.html for documentation. -// - -#undef BOOST_ASSERT - -#if defined(BOOST_DISABLE_ASSERTS) - -# define BOOST_ASSERT(expr) ((void)0) - -#elif defined(BOOST_ENABLE_ASSERT_HANDLER) - -#include <boost/current_function.hpp> - -namespace boost -{ - -void assertion_failed(char const * expr, char const * function, char const * file, long line); // user defined - -} // namespace boost - -#define BOOST_ASSERT(expr) ((expr)? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__)) - -#else -# include <assert.h> -# define BOOST_ASSERT(expr) assert(expr) -#endif diff --git a/src/boost/format.hpp b/src/boost/format.hpp deleted file mode 100644 index f965f0f33e9a..000000000000 --- a/src/boost/format.hpp +++ /dev/null @@ -1,64 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream - -// ---------------------------------------------------------------------------- -// format.hpp : primary header -// ---------------------------------------------------------------------------- - -#ifndef BOOST_FORMAT_HPP -#define BOOST_FORMAT_HPP - -#include <vector> -#include <string> -#include <sstream> -#include <cassert> - -#if HAVE_LOCALE -#include <locale> -#else -#define BOOST_NO_STD_LOCALE -#define BOOST_NO_LOCALE_ISIDIGIT -#include <cctype> -#endif - -#include <boost/format/macros_default.hpp> - - -// **** Forward declarations ---------------------------------- -#include <boost/format/format_fwd.hpp> // basic_format<Ch,Tr>, and other frontends -#include <boost/format/internals_fwd.hpp> // misc forward declarations for internal use - - -// **** Auxiliary structs (stream_format_state<Ch,Tr> , and format_item<Ch,Tr> ) -#include <boost/format/internals.hpp> - -// **** Format class interface -------------------------------- -#include <boost/format/format_class.hpp> - -// **** Exceptions ----------------------------------------------- -#include <boost/format/exceptions.hpp> - -// **** Implementation ------------------------------------------- -//#include <boost/format/format_implementation.hpp> // member functions - -#include <boost/format/group.hpp> // class for grouping arguments - -#include <boost/format/feed_args.hpp> // argument-feeding functions -//#include <boost/format/parsing.hpp> // format-string parsing (member-)functions - -// **** Implementation of the free functions ---------------------- -//#include <boost/format/free_funcs.hpp> - - -#endif // BOOST_FORMAT_HPP diff --git a/src/boost/format/exceptions.hpp b/src/boost/format/exceptions.hpp deleted file mode 100644 index a7641458c95e..000000000000 --- a/src/boost/format/exceptions.hpp +++ /dev/null @@ -1,96 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// exceptions.hpp -// ------------------------------------------------------------------------------ - - -#ifndef BOOST_FORMAT_EXCEPTIONS_HPP -#define BOOST_FORMAT_EXCEPTIONS_HPP - - -#include <stdexcept> - - -namespace boost { - -namespace io { - -// **** exceptions ----------------------------------------------- - -class format_error : public std::exception -{ -public: - format_error() { abort(); } - virtual const char *what() const throw() - { - return "boost::format_error: " - "format generic failure"; - } -}; - -class bad_format_string : public format_error -{ -public: - bad_format_string() { abort(); } - virtual const char *what() const throw() - { - return "boost::bad_format_string: " - "format-string is ill-formed"; - } -}; - -class too_few_args : public format_error -{ -public: - too_few_args() { abort(); } - virtual const char *what() const throw() - { - return "boost::too_few_args: " - "format-string refered to more arguments than were passed"; - } -}; - -class too_many_args : public format_error -{ -public: - too_many_args() { abort(); } - virtual const char *what() const throw() - { - return "boost::too_many_args: " - "format-string refered to less arguments than were passed"; - } -}; - - -class out_of_range : public format_error -{ -public: - out_of_range() { abort(); } - virtual const char *what() const throw() - { - return "boost::out_of_range: " - "tried to refer to an argument (or item) number which is out of range, " - "according to the format string."; - } -}; - - -} // namespace io - -} // namespace boost - - -#endif // BOOST_FORMAT_EXCEPTIONS_HPP diff --git a/src/boost/format/feed_args.hpp b/src/boost/format/feed_args.hpp deleted file mode 100644 index cdd57fdf2bf1..000000000000 --- a/src/boost/format/feed_args.hpp +++ /dev/null @@ -1,254 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream - -// ---------------------------------------------------------------------------- -// feed_args.hpp : functions for processing each argument -// (feed, feed_manip, and distribute) -// ---------------------------------------------------------------------------- - - -#ifndef BOOST_FORMAT_FEED_ARGS_HPP -#define BOOST_FORMAT_FEED_ARGS_HPP - -#include "boost/format/format_class.hpp" -#include "boost/format/group.hpp" - -#include "boost/throw_exception.hpp" - -namespace boost { -namespace io { -namespace detail { -namespace { - - inline - void empty_buf(BOOST_IO_STD ostringstream & os) { - static const std::string emptyStr; - os.str(emptyStr); - } - - void do_pad( std::string & s, - std::streamsize w, - const char c, - std::ios::fmtflags f, - bool center) - __attribute__ ((unused)); - - void do_pad( std::string & s, - std::streamsize w, - const char c, - std::ios::fmtflags f, - bool center) - // applies centered / left / right padding to the string s. - // Effects : string s is padded. - { - std::streamsize n=w-s.size(); - if(n<=0) { - return; - } - if(center) - { - s.reserve(w); // allocate once for the 2 inserts - const std::streamsize n1 = n /2, n0 = n - n1; - s.insert(s.begin(), n0, c); - s.append(n1, c); - } - else - { - if(f & std::ios::left) { - s.append(n, c); - } - else { - s.insert(s.begin(), n, c); - } - } - } // -do_pad(..) - - - template<class T> inline - void put_head(BOOST_IO_STD ostream& , const T& ) { - } - - template<class T> inline - void put_head( BOOST_IO_STD ostream& os, const group1<T>& x ) { - os << group_head(x.a1_); // send the first N-1 items, not the last - } - - template<class T> inline - void put_last( BOOST_IO_STD ostream& os, const T& x ) { - os << x ; - } - - template<class T> inline - void put_last( BOOST_IO_STD ostream& os, const group1<T>& x ) { - os << group_last(x.a1_); // this selects the last element - } - -#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST - template<class T> inline - void put_head( BOOST_IO_STD ostream& , T& ) { - } - - template<class T> inline - void put_last( BOOST_IO_STD ostream& os, T& x ) { - os << x ; - } -#endif - - - - -template<class T> -void put( T x, - const format_item& specs, - std::string & res, - BOOST_IO_STD ostringstream& oss_ ) -{ - // does the actual conversion of x, with given params, into a string - // using the *supplied* strinstream. (the stream state is important) - - typedef std::string string_t; - typedef format_item format_item_t; - - stream_format_state prev_state(oss_); - - specs.state_.apply_on(oss_); - - // in case x is a group, apply the manip part of it, - // in order to find width - put_head( oss_, x ); - empty_buf( oss_); - - const std::streamsize w=oss_.width(); - const std::ios::fmtflags fl=oss_.flags(); - const bool internal = (fl & std::ios::internal) != 0; - const bool two_stepped_padding = internal - && ! ( specs.pad_scheme_ & format_item_t::spacepad ) - && specs.truncate_ < 0 ; - - - if(! two_stepped_padding) - { - if(w>0) // handle simple padding via do_pad, not natively in stream - oss_.width(0); - put_last( oss_, x); - res = oss_.str(); - - if (specs.truncate_ >= 0) - res.erase(specs.truncate_); - - // complex pads : - if(specs.pad_scheme_ & format_item_t::spacepad) - { - if( res.size()==0 || ( res[0]!='+' && res[0]!='-' )) - { - res.insert(res.begin(), 1, ' '); // insert 1 space at pos 0 - } - } - if(w > 0) // need do_pad - { - do_pad(res,w,oss_.fill(), fl, (specs.pad_scheme_ & format_item_t::centered) !=0 ); - } - } - else // 2-stepped padding - { - put_last( oss_, x); // oss_.width() may result in padding. - res = oss_.str(); - - if (specs.truncate_ >= 0) - res.erase(specs.truncate_); - - if( res.size() - w > 0) - { // length w exceeded - // either it was multi-output with first output padding up all width.. - // either it was one big arg and we are fine. - empty_buf( oss_); - oss_.width(0); - put_last(oss_, x ); - string_t tmp = oss_.str(); // minimal-length output - std::streamsize d; - if( (d=w - tmp.size()) <=0 ) - { - // minimal length is already >= w, so no padding (cool!) - res.swap(tmp); - } - else - { // hum.. we need to pad (it was necessarily multi-output) - typedef typename string_t::size_type size_type; - size_type i = 0; - while( i<tmp.size() && tmp[i] == res[i] ) // find where we should pad. - ++i; - tmp.insert(i, static_cast<size_type>( d ), oss_.fill()); - res.swap( tmp ); - } - } - else - { // okay, only one thing was printed and padded, so res is fine. - } - } - - prev_state.apply_on(oss_); - empty_buf( oss_); - oss_.clear(); -} // end- put(..) - - -} // local namespace - - - - - -template<class T> -void distribute(basic_format& self, T x) - // call put(x, ..) on every occurence of the current argument : -{ - if(self.cur_arg_ >= self.num_args_) - { - if( self.exceptions() & too_many_args_bit ) - boost::throw_exception(too_many_args()); // too many variables have been supplied ! - else return; - } - for(unsigned long i=0; i < self.items_.size(); ++i) - { - if(self.items_[i].argN_ == self.cur_arg_) - { - put<T> (x, self.items_[i], self.items_[i].res_, self.oss_ ); - } - } -} - -template<class T> -basic_format& feed(basic_format& self, T x) -{ - if(self.dumped_) self.clear(); - distribute<T> (self, x); - ++self.cur_arg_; - if(self.bound_.size() != 0) - { - while( self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_] ) - ++self.cur_arg_; - } - - // this arg is finished, reset the stream's format state - self.state0_.apply_on(self.oss_); - return self; -} - - -} // namespace detail -} // namespace io -} // namespace boost - - -#endif // BOOST_FORMAT_FEED_ARGS_HPP diff --git a/src/boost/format/format_class.hpp b/src/boost/format/format_class.hpp deleted file mode 100644 index 6875623acb47..000000000000 --- a/src/boost/format/format_class.hpp +++ /dev/null @@ -1,135 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// format_class.hpp : class interface -// ------------------------------------------------------------------------------ - - -#ifndef BOOST_FORMAT_CLASS_HPP -#define BOOST_FORMAT_CLASS_HPP - -#include <vector> -#include <string> - -#include <boost/format/format_fwd.hpp> -#include <boost/format/internals_fwd.hpp> - -#include <boost/format/internals.hpp> - -namespace boost { - -class basic_format -{ -public: - typedef std::string string_t; - typedef BOOST_IO_STD ostringstream internal_stream_t; -private: - typedef BOOST_IO_STD ostream stream_t; - typedef io::detail::stream_format_state stream_format_state; - typedef io::detail::format_item format_item_t; - -public: - basic_format(const char* str); - basic_format(const string_t& s); -#ifndef BOOST_NO_STD_LOCALE - basic_format(const char* str, const std::locale & loc); - basic_format(const string_t& s, const std::locale & loc); -#endif // no locale - basic_format(const basic_format& x); - basic_format& operator= (const basic_format& x); - - basic_format& clear(); // empty the string buffers (except bound arguments, see clear_binds() ) - - // pass arguments through those operators : - template<class T> basic_format& operator%(const T& x) - { - return io::detail::feed<const T&>(*this,x); - } - -#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST - template<class T> basic_format& operator%(T& x) - { - return io::detail::feed<T&>(*this,x); - } -#endif - - - // system for binding arguments : - template<class T> - basic_format& bind_arg(int argN, const T& val) - { - return io::detail::bind_arg_body(*this, argN, val); - } - basic_format& clear_bind(int argN); - basic_format& clear_binds(); - - // modify the params of a directive, by applying a manipulator : - template<class T> - basic_format& modify_item(int itemN, const T& manipulator) - { - return io::detail::modify_item_body(*this, itemN, manipulator) ; - } - - // Choosing which errors will throw exceptions : - unsigned char exceptions() const; - unsigned char exceptions(unsigned char newexcept); - - // final output - string_t str() const; - friend BOOST_IO_STD ostream& - operator<< ( BOOST_IO_STD ostream& , const basic_format& ); - - - template<class T> friend basic_format& - io::detail::feed(basic_format&, T); - - template<class T> friend - void io::detail::distribute(basic_format&, T); - - template<class T> friend - basic_format& io::detail::modify_item_body(basic_format&, int, const T&); - - template<class T> friend - basic_format& io::detail::bind_arg_body(basic_format&, int, const T&); - -// make the members private only if the friend templates are supported -private: - - // flag bits, used for style_ - enum style_values { ordered = 1, // set only if all directives are positional directives - special_needs = 4 }; - - // parse the format string : - void parse(const string_t&); - - int style_; // style of format-string : positional or not, etc - int cur_arg_; // keep track of wich argument will come - int num_args_; // number of expected arguments - mutable bool dumped_; // true only after call to str() or << - std::vector<format_item_t> items_; // vector of directives (aka items) - string_t prefix_; // piece of string to insert before first item - - std::vector<bool> bound_; // stores which arguments were bound - // size = num_args OR zero - internal_stream_t oss_; // the internal stream. - stream_format_state state0_; // reference state for oss_ - unsigned char exceptions_; -}; // class basic_format - - -} // namespace boost - - -#endif // BOOST_FORMAT_CLASS_HPP diff --git a/src/boost/format/format_fwd.hpp b/src/boost/format/format_fwd.hpp deleted file mode 100644 index 97c55f6684c3..000000000000 --- a/src/boost/format/format_fwd.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// format_fwd.hpp : forward declarations, for primary header format.hpp -// ------------------------------------------------------------------------------ - -#ifndef BOOST_FORMAT_FWD_HPP -#define BOOST_FORMAT_FWD_HPP - -#include <string> -#include <iosfwd> - -namespace boost { - -class basic_format; - -typedef basic_format format; - -namespace io { -enum format_error_bits { bad_format_string_bit = 1, - too_few_args_bit = 2, too_many_args_bit = 4, - out_of_range_bit = 8, - all_error_bits = 255, no_error_bits=0 }; - -// Convertion: format to string -std::string str(const basic_format& ) ; - -} // namespace io - - -BOOST_IO_STD ostream& -operator<<( BOOST_IO_STD ostream&, const basic_format&); - - -} // namespace boost - -#endif // BOOST_FORMAT_FWD_HPP diff --git a/src/boost/format/format_implementation.cc b/src/boost/format/format_implementation.cc deleted file mode 100644 index aa191afe1132..000000000000 --- a/src/boost/format/format_implementation.cc +++ /dev/null @@ -1,256 +0,0 @@ -// -*- C++ -*- -// Boost general library format --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream - -// ---------------------------------------------------------------------------- -// format_implementation.hpp Implementation of the basic_format class -// ---------------------------------------------------------------------------- - - -#ifndef BOOST_FORMAT_IMPLEMENTATION_HPP -#define BOOST_FORMAT_IMPLEMENTATION_HPP - -#include <boost/throw_exception.hpp> -#include <boost/assert.hpp> -#include <boost/format.hpp> - -namespace boost { - -// -------- format:: ------------------------------------------- -basic_format::basic_format(const char* str) - : style_(0), cur_arg_(0), num_args_(0), dumped_(false), - items_(), oss_(), exceptions_(io::all_error_bits) -{ - state0_.set_by_stream(oss_); - string_t emptyStr; - if( !str) str = emptyStr.c_str(); - parse( str ); -} - -#ifndef BOOST_NO_STD_LOCALE -basic_format::basic_format(const char* str, const std::locale & loc) - : style_(0), cur_arg_(0), num_args_(0), dumped_(false), - items_(), oss_(), exceptions_(io::all_error_bits) -{ - oss_.imbue( loc ); - state0_.set_by_stream(oss_); - string_t emptyStr; - if( !str) str = emptyStr.c_str(); - parse( str ); -} - -basic_format::basic_format(const string_t& s, const std::locale & loc) - : style_(0), cur_arg_(0), num_args_(0), dumped_(false), - items_(), oss_(), exceptions_(io::all_error_bits) -{ - oss_.imbue( loc ); - state0_.set_by_stream(oss_); - parse(s); -} -#endif //BOOST_NO_STD_LOCALE - -basic_format::basic_format(const string_t& s) - : style_(0), cur_arg_(0), num_args_(0), dumped_(false), - items_(), oss_(), exceptions_(io::all_error_bits) -{ - state0_.set_by_stream(oss_); - parse(s); -} - -basic_format:: basic_format(const basic_format& x) - : style_(x.style_), cur_arg_(x.cur_arg_), num_args_(x.num_args_), dumped_(false), - items_(x.items_), prefix_(x.prefix_), bound_(x.bound_), - oss_(), // <- we obviously can't copy x.oss_ - state0_(x.state0_), exceptions_(x.exceptions_) -{ - state0_.apply_on(oss_); -} - -basic_format& basic_format::operator= (const basic_format& x) -{ - if(this == &x) - return *this; - state0_ = x.state0_; - state0_.apply_on(oss_); - - // plus all the other (trivial) assignments : - exceptions_ = x.exceptions_; - items_ = x.items_; - prefix_ = x.prefix_; - bound_=x.bound_; - style_=x.style_; - cur_arg_=x.cur_arg_; - num_args_=x.num_args_; - dumped_=x.dumped_; - return *this; -} - - -unsigned char basic_format::exceptions() const -{ - return exceptions_; -} - -unsigned char basic_format::exceptions(unsigned char newexcept) -{ - unsigned char swp = exceptions_; - exceptions_ = newexcept; - return swp; -} - - -basic_format& basic_format ::clear() - // empty the string buffers (except bound arguments, see clear_binds() ) - // and make the format object ready for formatting a new set of arguments -{ - BOOST_ASSERT( bound_.size()==0 || num_args_ == static_cast<int>(bound_.size()) ); - - for(unsigned long i=0; i<items_.size(); ++i){ - items_[i].state_ = items_[i].ref_state_; - // clear converted strings only if the corresponding argument is not bound : - if( bound_.size()==0 || !bound_[ items_[i].argN_ ] ) items_[i].res_.resize(0); - } - cur_arg_=0; dumped_=false; - // maybe first arg is bound: - if(bound_.size() != 0) - { - while(cur_arg_ < num_args_ && bound_[cur_arg_] ) ++cur_arg_; - } - return *this; -} - -basic_format& basic_format ::clear_binds() - // cancel all bindings, and clear() -{ - bound_.resize(0); - clear(); - return *this; -} - -basic_format& basic_format::clear_bind(int argN) - // cancel the binding of ONE argument, and clear() -{ - if(argN<1 || argN > num_args_ || bound_.size()==0 || !bound_[argN-1] ) - { - if( exceptions() & io::out_of_range_bit ) - boost::throw_exception(io::out_of_range()); // arg not in range. - else return *this; - } - bound_[argN-1]=false; - clear(); - return *this; -} - - - -std::string basic_format::str() const -{ - dumped_=true; - if(items_.size()==0) - return prefix_; - if( cur_arg_ < num_args_) - if( exceptions() & io::too_few_args_bit ) - boost::throw_exception(io::too_few_args()); // not enough variables have been supplied ! - - unsigned long sz = prefix_.size(); - unsigned long i; - for(i=0; i < items_.size(); ++i) - sz += items_[i].res_.size() + items_[i].appendix_.size(); - string_t res; - res.reserve(sz); - - res += prefix_; - for(i=0; i < items_.size(); ++i) - { - const format_item_t& item = items_[i]; - res += item.res_; - if( item.argN_ == format_item_t::argN_tabulation) - { - BOOST_ASSERT( item.pad_scheme_ & format_item_t::tabulation); - std::streamsize n = item.state_.width_ - res.size(); - if( n > 0 ) - res.append( n, item.state_.fill_ ); - } - res += item.appendix_; - } - return res; -} - -namespace io { -namespace detail { - -template<class T> -basic_format& bind_arg_body( basic_format& self, - int argN, - const T& val) - // bind one argument to a fixed value - // this is persistent over clear() calls, thus also over str() and << -{ - if(self.dumped_) self.clear(); // needed, because we will modify cur_arg_.. - if(argN<1 || argN > self.num_args_) - { - if( self.exceptions() & io::out_of_range_bit ) - boost::throw_exception(io::out_of_range()); // arg not in range. - else return self; - } - if(self.bound_.size()==0) - self.bound_.assign(self.num_args_,false); - else - BOOST_ASSERT( self.num_args_ == static_cast<signed int>(self.bound_.size()) ); - int o_cur_arg = self.cur_arg_; - self.cur_arg_ = argN-1; // arrays begin at 0 - - self.bound_[self.cur_arg_]=false; // if already set, we unset and re-sets.. - self.operator%(val); // put val at the right place, because cur_arg is set - - - // Now re-position cur_arg before leaving : - self.cur_arg_ = o_cur_arg; - self.bound_[argN-1]=true; - if(self.cur_arg_ == argN-1 ) - // hum, now this arg is bound, so move to next free arg - { - while(self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_]) ++self.cur_arg_; - } - // In any case, we either have all args, or are on a non-binded arg : - BOOST_ASSERT( self.cur_arg_ >= self.num_args_ || ! self.bound_[self.cur_arg_]); - return self; -} - -template<class T> -basic_format& modify_item_body( basic_format& self, - int itemN, - const T& manipulator) - // applies a manipulator to the format_item describing a given directive. - // this is a permanent change, clear or clear_binds won't cancel that. -{ - if(itemN<1 || itemN >= static_cast<signed int>(self.items_.size() )) - { - if( self.exceptions() & io::out_of_range_bit ) - boost::throw_exception(io::out_of_range()); // item not in range. - else return self; - } - self.items_[itemN-1].ref_state_.apply_manip( manipulator ); - self.items_[itemN-1].state_ = self.items_[itemN-1].ref_state_; - return self; -} - -} // namespace detail - -} // namespace io - -} // namespace boost - - - -#endif // BOOST_FORMAT_IMPLEMENTATION_HPP diff --git a/src/boost/format/free_funcs.cc b/src/boost/format/free_funcs.cc deleted file mode 100644 index 151db37a0ac9..000000000000 --- a/src/boost/format/free_funcs.cc +++ /dev/null @@ -1,71 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// free_funcs.hpp : implementation of the free functions declared in namespace format -// ------------------------------------------------------------------------------ - -#ifndef BOOST_FORMAT_FUNCS_HPP -#define BOOST_FORMAT_FUNCS_HPP - -#include "boost/format.hpp" -#include "boost/throw_exception.hpp" - -namespace boost { - -namespace io { - inline - std::string str(const basic_format& f) - // adds up all pieces of strings and converted items, and return the formatted string - { - return f.str(); - } -} // - namespace io - -BOOST_IO_STD ostream& -operator<<( BOOST_IO_STD ostream& os, - const boost::basic_format& f) - // effect: "return os << str(f);" but we can try to do it faster -{ - typedef boost::basic_format format_t; - if(f.items_.size()==0) - os << f.prefix_; - else { - if(f.cur_arg_ < f.num_args_) - if( f.exceptions() & io::too_few_args_bit ) - boost::throw_exception(io::too_few_args()); // not enough variables have been supplied ! - if(f.style_ & format_t::special_needs) - os << f.str(); - else { - // else we dont have to count chars output, so we dump directly to os : - os << f.prefix_; - for(unsigned long i=0; i<f.items_.size(); ++i) - { - const format_t::format_item_t& item = f.items_[i]; - os << item.res_; - os << item.appendix_; - - } - } - } - f.dumped_=true; - return os; -} - - - -} // namespace boost - - -#endif // BOOST_FORMAT_FUNCS_HPP diff --git a/src/boost/format/group.hpp b/src/boost/format/group.hpp deleted file mode 100644 index ac63f3f0bab0..000000000000 --- a/src/boost/format/group.hpp +++ /dev/null @@ -1,680 +0,0 @@ - -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream - -// ---------------------------------------------------------------------------- - -// group.hpp : encapsulates a group of manipulators along with an argument -// -// group_head : cut the last element of a group out. -// (is overloaded below on each type of group) - -// group_last : returns the last element of a group -// (is overloaded below on each type of group) - -// ---------------------------------------------------------------------------- - - -#ifndef BOOST_FORMAT_GROUP_HPP -#define BOOST_FORMAT_GROUP_HPP - - -namespace boost { -namespace io { - - -namespace detail { - - -// empty group, but useful even though. -struct group0 -{ - group0() {} -}; - -template <class Ch, class Tr> -inline -BOOST_IO_STD ostream& -operator << ( BOOST_IO_STD ostream& os, - const group0& ) -{ - return os; -} - -template <class T1> -struct group1 -{ - T1 a1_; - group1(T1 a1) - : a1_(a1) - {} -}; - -template <class Ch, class Tr, class T1> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group1<T1>& x) -{ - os << x.a1_; - return os; -} - - - - -template <class T1,class T2> -struct group2 -{ - T1 a1_; - T2 a2_; - group2(T1 a1,T2 a2) - : a1_(a1),a2_(a2) - {} -}; - -template <class Ch, class Tr, class T1,class T2> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group2<T1,T2>& x) -{ - os << x.a1_<< x.a2_; - return os; -} - -template <class T1,class T2,class T3> -struct group3 -{ - T1 a1_; - T2 a2_; - T3 a3_; - group3(T1 a1,T2 a2,T3 a3) - : a1_(a1),a2_(a2),a3_(a3) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group3<T1,T2,T3>& x) -{ - os << x.a1_<< x.a2_<< x.a3_; - return os; -} - -template <class T1,class T2,class T3,class T4> -struct group4 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - group4(T1 a1,T2 a2,T3 a3,T4 a4) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group4<T1,T2,T3,T4>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_; - return os; -} - -template <class T1,class T2,class T3,class T4,class T5> -struct group5 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - T5 a5_; - group5(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group5<T1,T2,T3,T4,T5>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_; - return os; -} - -template <class T1,class T2,class T3,class T4,class T5,class T6> -struct group6 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - T5 a5_; - T6 a6_; - group6(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group6<T1,T2,T3,T4,T5,T6>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_; - return os; -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7> -struct group7 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - T5 a5_; - T6 a6_; - T7 a7_; - group7(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group7<T1,T2,T3,T4,T5,T6,T7>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_; - return os; -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8> -struct group8 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - T5 a5_; - T6 a6_; - T7 a7_; - T8 a8_; - group8(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group8<T1,T2,T3,T4,T5,T6,T7,T8>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_; - return os; -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9> -struct group9 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - T5 a5_; - T6 a6_; - T7 a7_; - T8 a8_; - T9 a9_; - group9(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group9<T1,T2,T3,T4,T5,T6,T7,T8,T9>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_; - return os; -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10> -struct group10 -{ - T1 a1_; - T2 a2_; - T3 a3_; - T4 a4_; - T5 a5_; - T6 a6_; - T7 a7_; - T8 a8_; - T9 a9_; - T10 a10_; - group10(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9,T10 a10) - : a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9),a10_(a10) - {} -}; - -template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10> -inline -BOOST_IO_STD ostream& -operator << (BOOST_IO_STD ostream& os, - const group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10>& x) -{ - os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_<< x.a10_; - return os; -} - - - - -template <class T1,class T2> -inline -group1<T1> -group_head( group2<T1,T2> const& x) -{ - return group1<T1> (x.a1_); -} - -template <class T1,class T2> -inline -group1<T2> -group_last( group2<T1,T2> const& x) -{ - return group1<T2> (x.a2_); -} - - - -template <class T1,class T2,class T3> -inline -group2<T1,T2> -group_head( group3<T1,T2,T3> const& x) -{ - return group2<T1,T2> (x.a1_,x.a2_); -} - -template <class T1,class T2,class T3> -inline -group1<T3> -group_last( group3<T1,T2,T3> const& x) -{ - return group1<T3> (x.a3_); -} - - - -template <class T1,class T2,class T3,class T4> -inline -group3<T1,T2,T3> -group_head( group4<T1,T2,T3,T4> const& x) -{ - return group3<T1,T2,T3> (x.a1_,x.a2_,x.a3_); -} - -template <class T1,class T2,class T3,class T4> -inline -group1<T4> -group_last( group4<T1,T2,T3,T4> const& x) -{ - return group1<T4> (x.a4_); -} - - - -template <class T1,class T2,class T3,class T4,class T5> -inline -group4<T1,T2,T3,T4> -group_head( group5<T1,T2,T3,T4,T5> const& x) -{ - return group4<T1,T2,T3,T4> (x.a1_,x.a2_,x.a3_,x.a4_); -} - -template <class T1,class T2,class T3,class T4,class T5> -inline -group1<T5> -group_last( group5<T1,T2,T3,T4,T5> const& x) -{ - return group1<T5> (x.a5_); -} - - - -template <class T1,class T2,class T3,class T4,class T5,class T6> -inline -group5<T1,T2,T3,T4,T5> -group_head( group6<T1,T2,T3,T4,T5,T6> const& x) -{ - return group5<T1,T2,T3,T4,T5> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6> -inline -group1<T6> -group_last( group6<T1,T2,T3,T4,T5,T6> const& x) -{ - return group1<T6> (x.a6_); -} - - - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7> -inline -group6<T1,T2,T3,T4,T5,T6> -group_head( group7<T1,T2,T3,T4,T5,T6,T7> const& x) -{ - return group6<T1,T2,T3,T4,T5,T6> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7> -inline -group1<T7> -group_last( group7<T1,T2,T3,T4,T5,T6,T7> const& x) -{ - return group1<T7> (x.a7_); -} - - - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8> -inline -group7<T1,T2,T3,T4,T5,T6,T7> -group_head( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x) -{ - return group7<T1,T2,T3,T4,T5,T6,T7> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8> -inline -group1<T8> -group_last( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x) -{ - return group1<T8> (x.a8_); -} - - - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9> -inline -group8<T1,T2,T3,T4,T5,T6,T7,T8> -group_head( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x) -{ - return group8<T1,T2,T3,T4,T5,T6,T7,T8> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9> -inline -group1<T9> -group_last( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x) -{ - return group1<T9> (x.a9_); -} - - - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10> -inline -group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> -group_head( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x) -{ - return group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_,x.a9_); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10> -inline -group1<T10> -group_last( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x) -{ - return group1<T10> (x.a10_); -} - - - - - -} // namespace detail - - - -// helper functions - - -inline detail::group1< detail::group0 > -group() { return detail::group1< detail::group0 > ( detail::group0() ); } - -template <class T1, class Var> -inline -detail::group1< detail::group2<T1, Var const&> > - group(T1 a1, Var const& var) -{ - return detail::group1< detail::group2<T1, Var const&> > - ( detail::group2<T1, Var const&> - (a1, var) - ); -} - -template <class T1,class T2, class Var> -inline -detail::group1< detail::group3<T1,T2, Var const&> > - group(T1 a1,T2 a2, Var const& var) -{ - return detail::group1< detail::group3<T1,T2, Var const&> > - ( detail::group3<T1,T2, Var const&> - (a1,a2, var) - ); -} - -template <class T1,class T2,class T3, class Var> -inline -detail::group1< detail::group4<T1,T2,T3, Var const&> > - group(T1 a1,T2 a2,T3 a3, Var const& var) -{ - return detail::group1< detail::group4<T1,T2,T3, Var const&> > - ( detail::group4<T1,T2,T3, Var const&> - (a1,a2,a3, var) - ); -} - -template <class T1,class T2,class T3,class T4, class Var> -inline -detail::group1< detail::group5<T1,T2,T3,T4, Var const&> > - group(T1 a1,T2 a2,T3 a3,T4 a4, Var const& var) -{ - return detail::group1< detail::group5<T1,T2,T3,T4, Var const&> > - ( detail::group5<T1,T2,T3,T4, Var const&> - (a1,a2,a3,a4, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5, class Var> -inline -detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var const& var) -{ - return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> > - ( detail::group6<T1,T2,T3,T4,T5, Var const&> - (a1,a2,a3,a4,a5, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6, class Var> -inline -detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var const& var) -{ - return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> > - ( detail::group7<T1,T2,T3,T4,T5,T6, Var const&> - (a1,a2,a3,a4,a5,a6, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var> -inline -detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var const& var) -{ - return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> > - ( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> - (a1,a2,a3,a4,a5,a6,a7, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var> -inline -detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var const& var) -{ - return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> > - ( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> - (a1,a2,a3,a4,a5,a6,a7,a8, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var> -inline -detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var const& var) -{ - return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> > - ( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> - (a1,a2,a3,a4,a5,a6,a7,a8,a9, var) - ); -} - - -#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST - -template <class T1, class Var> -inline -detail::group1< detail::group2<T1, Var&> > - group(T1 a1, Var& var) -{ - return detail::group1< detail::group2<T1, Var&> > - ( detail::group2<T1, Var&> - (a1, var) - ); -} - -template <class T1,class T2, class Var> -inline -detail::group1< detail::group3<T1,T2, Var&> > - group(T1 a1,T2 a2, Var& var) -{ - return detail::group1< detail::group3<T1,T2, Var&> > - ( detail::group3<T1,T2, Var&> - (a1,a2, var) - ); -} - -template <class T1,class T2,class T3, class Var> -inline -detail::group1< detail::group4<T1,T2,T3, Var&> > - group(T1 a1,T2 a2,T3 a3, Var& var) -{ - return detail::group1< detail::group4<T1,T2,T3, Var&> > - ( detail::group4<T1,T2,T3, Var&> - (a1,a2,a3, var) - ); -} - -template <class T1,class T2,class T3,class T4, class Var> -inline -detail::group1< detail::group5<T1,T2,T3,T4, Var&> > - group(T1 a1,T2 a2,T3 a3,T4 a4, Var& var) -{ - return detail::group1< detail::group5<T1,T2,T3,T4, Var&> > - ( detail::group5<T1,T2,T3,T4, Var&> - (a1,a2,a3,a4, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5, class Var> -inline -detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var& var) -{ - return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> > - ( detail::group6<T1,T2,T3,T4,T5, Var&> - (a1,a2,a3,a4,a5, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6, class Var> -inline -detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var& var) -{ - return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> > - ( detail::group7<T1,T2,T3,T4,T5,T6, Var&> - (a1,a2,a3,a4,a5,a6, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var> -inline -detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var& var) -{ - return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> > - ( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> - (a1,a2,a3,a4,a5,a6,a7, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var> -inline -detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var& var) -{ - return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> > - ( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> - (a1,a2,a3,a4,a5,a6,a7,a8, var) - ); -} - -template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var> -inline -detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> > - group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var& var) -{ - return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> > - ( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> - (a1,a2,a3,a4,a5,a6,a7,a8,a9, var) - ); -} - - -#endif //end- #ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST - - -} // namespace io - -} // namespace boost - - -#endif // BOOST_FORMAT_GROUP_HPP diff --git a/src/boost/format/internals.hpp b/src/boost/format/internals.hpp deleted file mode 100644 index d25eb4c864c4..000000000000 --- a/src/boost/format/internals.hpp +++ /dev/null @@ -1,167 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream - -// ---------------------------------------------------------------------------- -// internals.hpp : internal structs. included by format.hpp -// stream_format_state, and format_item -// ---------------------------------------------------------------------------- - - -#ifndef BOOST_FORMAT_INTERNALS_HPP -#define BOOST_FORMAT_INTERNALS_HPP - - -#include <string> -#include <sstream> - -namespace boost { -namespace io { -namespace detail { - - -// -------------- -// set of params that define the format state of a stream - -struct stream_format_state -{ - typedef std::ios basic_ios; - - std::streamsize width_; - std::streamsize precision_; - char fill_; - std::ios::fmtflags flags_; - - stream_format_state() : width_(-1), precision_(-1), fill_(0), flags_(std::ios::dec) {} - stream_format_state(basic_ios& os) {set_by_stream(os); } - - void apply_on(basic_ios & os) const; //- applies format_state to the stream - template<class T> void apply_manip(T manipulator) //- modifies state by applying manipulator. - { apply_manip_body<T>( *this, manipulator) ; } - void reset(); //- sets to default state. - void set_by_stream(const basic_ios& os); //- sets to os's state. -}; - - - -// -------------- -// format_item : stores all parameters that can be defined by directives in the format-string - -struct format_item -{ - enum pad_values { zeropad = 1, spacepad =2, centered=4, tabulation = 8 }; - - enum arg_values { argN_no_posit = -1, // non-positional directive. argN will be set later. - argN_tabulation = -2, // tabulation directive. (no argument read) - argN_ignored = -3 // ignored directive. (no argument read) - }; - typedef BOOST_IO_STD ios basic_ios; - typedef detail::stream_format_state stream_format_state; - typedef std::string string_t; - typedef BOOST_IO_STD ostringstream internal_stream_t; - - - int argN_; //- argument number (starts at 0, eg : %1 => argN=0) - // negative values are used for items that don't process - // an argument - string_t res_; //- result of the formatting of this item - string_t appendix_; //- piece of string between this item and the next - - stream_format_state ref_state_;// set by parsing the format_string, is only affected by modify_item - stream_format_state state_; // always same as ref_state, _unless_ modified by manipulators 'group(..)' - - // non-stream format-state parameters - signed int truncate_; //- is >=0 for directives like %.5s (take 5 chars from the string) - unsigned int pad_scheme_; //- several possible padding schemes can mix. see pad_values - - format_item() : argN_(argN_no_posit), truncate_(-1), pad_scheme_(0) {} - - void compute_states(); // sets states according to truncate and pad_scheme. -}; - - - -// ----------------------------------------------------------- -// Definitions -// ----------------------------------------------------------- - -// --- stream_format_state:: ------------------------------------------- -inline -void stream_format_state::apply_on(basic_ios & os) const - // set the state of this stream according to our params -{ - if(width_ != -1) - os.width(width_); - if(precision_ != -1) - os.precision(precision_); - if(fill_ != 0) - os.fill(fill_); - os.flags(flags_); -} - -inline -void stream_format_state::set_by_stream(const basic_ios& os) - // set our params according to the state of this stream -{ - flags_ = os.flags(); - width_ = os.width(); - precision_ = os.precision(); - fill_ = os.fill(); -} - -template<class T> inline -void apply_manip_body( stream_format_state& self, - T manipulator) - // modify our params according to the manipulator -{ - BOOST_IO_STD stringstream ss; - self.apply_on( ss ); - ss << manipulator; - self.set_by_stream( ss ); -} - -inline -void stream_format_state::reset() - // set our params to standard's default state -{ - width_=-1; precision_=-1; fill_=0; - flags_ = std::ios::dec; -} - - -// --- format_items:: ------------------------------------------- -inline -void format_item::compute_states() - // reflect pad_scheme_ on state_ and ref_state_ - // because some pad_schemes has complex consequences on several state params. -{ - if(pad_scheme_ & zeropad) - { - if(ref_state_.flags_ & std::ios::left) - { - pad_scheme_ = pad_scheme_ & (~zeropad); // ignore zeropad in left alignment - } - else - { - ref_state_.fill_='0'; - ref_state_.flags_ |= std::ios::internal; - } - } - state_ = ref_state_; -} - - -} } } // namespaces boost :: io :: detail - - -#endif // BOOST_FORMAT_INTERNALS_HPP diff --git a/src/boost/format/internals_fwd.hpp b/src/boost/format/internals_fwd.hpp deleted file mode 100644 index a8ebf7c3abc1..000000000000 --- a/src/boost/format/internals_fwd.hpp +++ /dev/null @@ -1,65 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// internals_fwd.hpp : forward declarations, for internal headers -// ------------------------------------------------------------------------------ - -#ifndef BOOST_FORMAT_INTERNAL_FWD_HPP -#define BOOST_FORMAT_INTERNAL_FWD_HPP - -#include "boost/format/format_fwd.hpp" - - -namespace boost { -namespace io { - -namespace detail { - struct stream_format_state; - struct format_item; -} - - -namespace detail { - - // these functions were intended as methods, - // but MSVC have problems with template member functions : - - // defined in format_implementation.hpp : - template<class T> - basic_format& modify_item_body( basic_format& self, - int itemN, const T& manipulator); - - template<class T> - basic_format& bind_arg_body( basic_format& self, - int argN, const T& val); - - template<class T> - void apply_manip_body( stream_format_state& self, - T manipulator); - - // argument feeding (defined in feed_args.hpp ) : - template<class T> - void distribute(basic_format& self, T x); - - template<class T> - basic_format& feed(basic_format& self, T x); - -} // namespace detail - -} // namespace io -} // namespace boost - - -#endif // BOOST_FORMAT_INTERNAL_FWD_HPP diff --git a/src/boost/format/local.mk b/src/boost/format/local.mk deleted file mode 100644 index 3776eff382fe..000000000000 --- a/src/boost/format/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -libraries += libformat - -libformat_NAME = libnixformat - -libformat_DIR := $(d) - -libformat_SOURCES := $(wildcard $(d)/*.cc) diff --git a/src/boost/format/macros_default.hpp b/src/boost/format/macros_default.hpp deleted file mode 100644 index 4fd84a163fb3..000000000000 --- a/src/boost/format/macros_default.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rdiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// macros_default.hpp : configuration for the format library -// provides default values for the stl workaround macros -// ------------------------------------------------------------------------------ - -#ifndef BOOST_FORMAT_MACROS_DEFAULT_HPP -#define BOOST_FORMAT_MACROS_DEFAULT_HPP - -// *** This should go to "boost/config/suffix.hpp". - -#ifndef BOOST_IO_STD -# define BOOST_IO_STD std:: -#endif - -// **** Workaround for io streams, stlport and msvc. -#ifdef BOOST_IO_NEEDS_USING_DECLARATION -namespace boost { - using std::char_traits; - using std::basic_ostream; - using std::basic_ostringstream; - namespace io { - using std::basic_ostream; - namespace detail { - using std::basic_ios; - using std::basic_ostream; - using std::basic_ostringstream; - } - } -} -#endif - -// ------------------------------------------------------------------------------ - -#endif // BOOST_FORMAT_MACROS_DEFAULT_HPP diff --git a/src/boost/format/parsing.cc b/src/boost/format/parsing.cc deleted file mode 100644 index 34c36adeb734..000000000000 --- a/src/boost/format/parsing.cc +++ /dev/null @@ -1,454 +0,0 @@ -// -*- C++ -*- -// Boost general library 'format' --------------------------- -// See http://www.boost.org for updates, documentation, and revision history. - -// (C) Samuel Krempp 2001 -// krempp@crans.ens-cachan.fr -// Permission to copy, use, modify, sell and -// distribute this software is granted provided this copyright notice appears -// in all copies. This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. - -// ideas taken from Rudiger Loos's format class -// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing) - -// ------------------------------------------------------------------------------ -// parsing.hpp : implementation of the parsing member functions -// ( parse, parse_printf_directive) -// ------------------------------------------------------------------------------ - - -#ifndef BOOST_FORMAT_PARSING_HPP -#define BOOST_FORMAT_PARSING_HPP - - -#include <boost/format.hpp> -#include <boost/throw_exception.hpp> -#include <boost/assert.hpp> - - -namespace boost { -namespace io { -namespace detail { - - template<class Stream> inline - bool wrap_isdigit(char c, Stream &os) - { -#ifndef BOOST_NO_LOCALE_ISIDIGIT - return std::isdigit(c, os.rdbuf()->getloc() ); -# else - using namespace std; - return isdigit(c); -#endif - } //end- wrap_isdigit(..) - - template<class Res> inline - Res str2int(const std::string& s, - std::string::size_type start, - BOOST_IO_STD ios &os, - const Res = Res(0) ) - // Input : char string, with starting index - // a basic_ios& merely to call its widen/narrow member function in the desired locale. - // Effects : reads s[start:] and converts digits into an integral n, of type Res - // Returns : n - { - Res n = 0; - while(start<s.size() && wrap_isdigit(s[start], os) ) { - char cur_ch = s[start]; - BOOST_ASSERT(cur_ch != 0 ); // since we called isdigit, this should not happen. - n *= 10; - n += cur_ch - '0'; // 22.2.1.1.2 of the C++ standard - ++start; - } - return n; - } - - void skip_asterisk(const std::string & buf, - std::string::size_type * pos_p, - BOOST_IO_STD ios &os) - // skip printf's "asterisk-fields" directives in the format-string buf - // Input : char string, with starting index *pos_p - // a basic_ios& merely to call its widen/narrow member function in the desired locale. - // Effects : advance *pos_p by skipping printf's asterisk fields. - // Returns : nothing - { - using namespace std; - BOOST_ASSERT( pos_p != 0); - if(*pos_p >= buf.size() ) return; - if(buf[ *pos_p]=='*') { - ++ (*pos_p); - while (*pos_p < buf.size() && wrap_isdigit(buf[*pos_p],os)) ++(*pos_p); - if(buf[*pos_p]=='$') ++(*pos_p); - } - } - - - inline void maybe_throw_exception( unsigned char exceptions) - // auxiliary func called by parse_printf_directive - // for centralising error handling - // it either throws if user sets the corresponding flag, or does nothing. - { - if(exceptions & io::bad_format_string_bit) - boost::throw_exception(io::bad_format_string()); - } - - - - bool parse_printf_directive(const std::string & buf, - std::string::size_type * pos_p, - detail::format_item * fpar, - BOOST_IO_STD ios &os, - unsigned char exceptions) - // Input : a 'printf-directive' in the format-string, starting at buf[ *pos_p ] - // a basic_ios& merely to call its widen/narrow member function in the desired locale. - // a bitset'excpetions' telling whether to throw exceptions on errors. - // Returns : true if parse somehow succeeded (possibly ignoring errors if exceptions disabled) - // false if it failed so bad that the directive should be printed verbatim - // Effects : - *pos_p is incremented so that buf[*pos_p] is the first char after the directive - // - *fpar is set with the parameters read in the directive - { - typedef format_item format_item_t; - BOOST_ASSERT( pos_p != 0); - std::string::size_type &i1 = *pos_p, - i0; - fpar->argN_ = format_item_t::argN_no_posit; // if no positional-directive - - bool in_brackets=false; - if(buf[i1]=='|') - { - in_brackets=true; - if( ++i1 >= buf.size() ) { - maybe_throw_exception(exceptions); - return false; - } - } - - // the flag '0' would be picked as a digit for argument order, but here it's a flag : - if(buf[i1]=='0') - goto parse_flags; - - // handle argument order (%2$d) or possibly width specification: %2d - i0 = i1; // save position before digits - while (i1 < buf.size() && wrap_isdigit(buf[i1], os)) - ++i1; - if (i1!=i0) - { - if( i1 >= buf.size() ) { - maybe_throw_exception(exceptions); - return false; - } - int n=str2int(buf,i0, os, int(0) ); - - // %N% case : this is already the end of the directive - if( buf[i1] == '%' ) - { - fpar->argN_ = n-1; - ++i1; - if( in_brackets) - maybe_throw_exception(exceptions); - // but don't return. maybe "%" was used in lieu of '$', so we go on. - else return true; - } - - if ( buf[i1]=='$' ) - { - fpar->argN_ = n-1; - ++i1; - } - else - { - // non-positionnal directive - fpar->ref_state_.width_ = n; - fpar->argN_ = format_item_t::argN_no_posit; - goto parse_precision; - } - } - - parse_flags: - // handle flags - while ( i1 <buf.size()) // as long as char is one of + - = # 0 l h or ' ' - { - // misc switches - switch (buf[i1]) - { - case '\'' : break; // no effect yet. (painful to implement) - case 'l': - case 'h': // short/long modifier : for printf-comaptibility (no action needed) - break; - case '-': - fpar->ref_state_.flags_ |= std::ios::left; - break; - case '=': - fpar->pad_scheme_ |= format_item_t::centered; - break; - case ' ': - fpar->pad_scheme_ |= format_item_t::spacepad; - break; - case '+': - fpar->ref_state_.flags_ |= std::ios::showpos; - break; - case '0': - fpar->pad_scheme_ |= format_item_t::zeropad; - // need to know alignment before really setting flags, - // so just add 'zeropad' flag for now, it will be processed later. - break; - case '#': - fpar->ref_state_.flags_ |= std::ios::showpoint | std::ios::showbase; - break; - default: - goto parse_width; - } - ++i1; - } // loop on flag. - if( i1>=buf.size()) { - maybe_throw_exception(exceptions); - return true; - } - - parse_width: - // handle width spec - skip_asterisk(buf, &i1, os); // skips 'asterisk fields' : *, or *N$ - i0 = i1; // save position before digits - while (i1<buf.size() && wrap_isdigit(buf[i1], os)) - i1++; - - if (i1!=i0) - { fpar->ref_state_.width_ = str2int( buf,i0, os, std::streamsize(0) ); } - - parse_precision: - if( i1>=buf.size()) { - maybe_throw_exception(exceptions); - return true; - } - // handle precision spec - if (buf[i1]=='.') - { - ++i1; - skip_asterisk(buf, &i1, os); - i0 = i1; // save position before digits - while (i1<buf.size() && wrap_isdigit(buf[i1], os)) - ++i1; - - if(i1==i0) - fpar->ref_state_.precision_ = 0; - else - fpar->ref_state_.precision_ = str2int(buf,i0, os, std::streamsize(0) ); - } - - // handle formatting-type flags : - while( i1<buf.size() && - ( buf[i1]=='l' || buf[i1]=='L' || buf[i1]=='h') ) - ++i1; - if( i1>=buf.size()) { - maybe_throw_exception(exceptions); - return true; - } - - if( in_brackets && buf[i1]=='|' ) - { - ++i1; - return true; - } - switch (buf[i1]) - { - case 'X': - fpar->ref_state_.flags_ |= std::ios::uppercase; - case 'p': // pointer => set hex. - case 'x': - fpar->ref_state_.flags_ &= ~std::ios::basefield; - fpar->ref_state_.flags_ |= std::ios::hex; - break; - - case 'o': - fpar->ref_state_.flags_ &= ~std::ios::basefield; - fpar->ref_state_.flags_ |= std::ios::oct; - break; - - case 'E': - fpar->ref_state_.flags_ |= std::ios::uppercase; - case 'e': - fpar->ref_state_.flags_ &= ~std::ios::floatfield; - fpar->ref_state_.flags_ |= std::ios::scientific; - - fpar->ref_state_.flags_ &= ~std::ios::basefield; - fpar->ref_state_.flags_ |= std::ios::dec; - break; - - case 'f': - fpar->ref_state_.flags_ &= ~std::ios::floatfield; - fpar->ref_state_.flags_ |= std::ios::fixed; - case 'u': - case 'd': - case 'i': - fpar->ref_state_.flags_ &= ~std::ios::basefield; - fpar->ref_state_.flags_ |= std::ios::dec; - break; - - case 'T': - ++i1; - if( i1 >= buf.size()) - maybe_throw_exception(exceptions); - else - fpar->ref_state_.fill_ = buf[i1]; - fpar->pad_scheme_ |= format_item_t::tabulation; - fpar->argN_ = format_item_t::argN_tabulation; - break; - case 't': - fpar->ref_state_.fill_ = ' '; - fpar->pad_scheme_ |= format_item_t::tabulation; - fpar->argN_ = format_item_t::argN_tabulation; - break; - - case 'G': - fpar->ref_state_.flags_ |= std::ios::uppercase; - break; - case 'g': // 'g' conversion is default for floats. - fpar->ref_state_.flags_ &= ~std::ios::basefield; - fpar->ref_state_.flags_ |= std::ios::dec; - - // CLEAR all floatield flags, so stream will CHOOSE - fpar->ref_state_.flags_ &= ~std::ios::floatfield; - break; - - case 'C': - case 'c': - fpar->truncate_ = 1; - break; - case 'S': - case 's': - fpar->truncate_ = fpar->ref_state_.precision_; - fpar->ref_state_.precision_ = -1; - break; - case 'n' : - fpar->argN_ = format_item_t::argN_ignored; - break; - default: - maybe_throw_exception(exceptions); - } - ++i1; - - if( in_brackets ) - { - if( i1<buf.size() && buf[i1]=='|' ) - { - ++i1; - return true; - } - else maybe_throw_exception(exceptions); - } - return true; - } - -} // detail namespace -} // io namespace - - -// ----------------------------------------------- -// format :: parse(..) - -void basic_format::parse(const string_t & buf) - // parse the format-string -{ - using namespace std; - const char arg_mark = '%'; - bool ordered_args=true; - int max_argN=-1; - string_t::size_type i1=0; - int num_items=0; - - // A: find upper_bound on num_items and allocates arrays - i1=0; - while( (i1=buf.find(arg_mark,i1)) != string::npos ) - { - if( i1+1 >= buf.size() ) { - if(exceptions() & io::bad_format_string_bit) - boost::throw_exception(io::bad_format_string()); // must not end in "bla bla %" - else break; // stop there, ignore last '%' - } - if(buf[i1+1] == buf[i1] ) { i1+=2; continue; } // escaped "%%" / "##" - ++i1; - - // in case of %N% directives, dont count it double (wastes allocations..) : - while(i1 < buf.size() && io::detail::wrap_isdigit(buf[i1],oss_)) ++i1; - if( i1 < buf.size() && buf[i1] == arg_mark ) ++ i1; - - ++num_items; - } - items_.assign( num_items, format_item_t() ); - - // B: Now the real parsing of the format string : - num_items=0; - i1 = 0; - string_t::size_type i0 = i1; - bool special_things=false; - int cur_it=0; - while( (i1=buf.find(arg_mark,i1)) != string::npos ) - { - string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_; - - if( buf[i1+1] == buf[i1] ) // escaped mark, '%%' - { - piece += buf.substr(i0, i1-i0) + buf[i1]; - i1+=2; i0=i1; - continue; - } - BOOST_ASSERT( static_cast<unsigned int>(cur_it) < items_.size() || cur_it==0); - - if(i1!=i0) piece += buf.substr(i0, i1-i0); - ++i1; - - bool parse_ok; - parse_ok = io::detail::parse_printf_directive(buf, &i1, &items_[cur_it], oss_, exceptions()); - if( ! parse_ok ) continue; // the directive will be printed verbatim - - i0=i1; - items_[cur_it].compute_states(); // process complex options, like zeropad, into stream params. - - int argN=items_[cur_it].argN_; - if(argN == format_item_t::argN_ignored) - continue; - if(argN ==format_item_t::argN_no_posit) - ordered_args=false; - else if(argN == format_item_t::argN_tabulation) special_things=true; - else if(argN > max_argN) max_argN = argN; - ++num_items; - ++cur_it; - } // loop on %'s - BOOST_ASSERT(cur_it == num_items); - - // store the final piece of string - string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_; - piece += buf.substr(i0); - - if( !ordered_args) - { - if(max_argN >= 0 ) // dont mix positional with non-positionnal directives - { - if(exceptions() & io::bad_format_string_bit) - boost::throw_exception(io::bad_format_string()); - // else do nothing. => positionnal arguments are processed as non-positionnal - } - // set things like it would have been with positional directives : - int non_ordered_items = 0; - for(int i=0; i< num_items; ++i) - if(items_[i].argN_ == format_item_t::argN_no_posit) - { - items_[i].argN_ = non_ordered_items; - ++non_ordered_items; - } - max_argN = non_ordered_items-1; - } - - // C: set some member data : - items_.resize(num_items); - - if(special_things) style_ |= special_needs; - num_args_ = max_argN + 1; - if(ordered_args) style_ |= ordered; - else style_ &= ~ordered; -} - -} // namespace boost - - -#endif // BOOST_FORMAT_PARSING_HPP diff --git a/src/boost/throw_exception.hpp b/src/boost/throw_exception.hpp deleted file mode 100644 index 07b4ae5ceae7..000000000000 --- a/src/boost/throw_exception.hpp +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED -#define BOOST_THROW_EXCEPTION_HPP_INCLUDED - -// MS compatible compilers support #pragma once - -#if defined(_MSC_VER) && (_MSC_VER >= 1020) -# pragma once -#endif - -// -// boost/throw_exception.hpp -// -// Copyright (c) 2002 Peter Dimov and Multi Media Ltd. -// -// Permission to copy, use, modify, sell and distribute this software -// is granted provided this copyright notice appears in all copies. -// This software is provided "as is" without express or implied -// warranty, and with no claim as to its suitability for any purpose. -// -// http://www.boost.org/libs/utility/throw_exception.html -// - -//#include <boost/config.hpp> - -#ifdef BOOST_NO_EXCEPTIONS -# include <exception> -#endif - -namespace boost -{ - -#ifdef BOOST_NO_EXCEPTIONS - -void throw_exception(std::exception const & e); // user defined - -#else - -template<class E> void throw_exception(E const & e) -{ - throw e; -} - -#endif - -} // namespace boost - -#endif // #ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 6e05e165545d..38dbe3e58b26 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -64,6 +64,8 @@ int main (int argc, char * * argv) settings.maxBuildJobs.set("1"); // hack to make tests with local?root= work + initPlugins(); + auto store = openStore().cast<LocalStore>(); /* It would be more appropriate to use $XDG_RUNTIME_DIR, since @@ -96,7 +98,9 @@ int main (int argc, char * * argv) source >> drvPath; auto requiredFeatures = readStrings<std::set<std::string>>(source); - auto canBuildLocally = amWilling && (neededSystem == settings.thisSystem); + auto canBuildLocally = amWilling + && ( neededSystem == settings.thisSystem + || settings.extraPlatforms.get().count(neededSystem) > 0); /* Error ignored here, will be caught later */ mkdir(currentLoad.c_str(), 0777); @@ -177,7 +181,7 @@ int main (int argc, char * * argv) Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", bestMachine->storeUri)); Store::Params storeParams; - if (hasPrefix(storeUri, "ssh://")) { + if (hasPrefix(bestMachine->storeUri, "ssh://")) { storeParams["max-connections"] ="1"; storeParams["log-fd"] = "4"; if (bestMachine->sshKey != "") @@ -189,8 +193,10 @@ int main (int argc, char * * argv) storeUri = bestMachine->storeUri; } catch (std::exception & e) { - printError("unable to open SSH connection to '%s': %s; trying other available machines...", - bestMachine->storeUri, e.what()); + auto msg = chomp(drainFD(5, false)); + printError("cannot build on '%s': %s%s", + bestMachine->storeUri, e.what(), + (msg.empty() ? "" : ": " + msg)); bestMachine->enabled = false; continue; } @@ -200,6 +206,8 @@ int main (int argc, char * * argv) } connected: + close(5); + std::cerr << "# accept\n" << storeUri << "\n"; auto inputs = readStrings<PathSet>(source); @@ -218,9 +226,11 @@ connected: signal(SIGALRM, old); } + auto substitute = settings.buildersUseSubstitutes ? Substitute : NoSubstitute; + { Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri)); - copyPaths(store, ref<Store>(sshStore), inputs, NoRepair, NoCheckSigs); + copyPaths(store, ref<Store>(sshStore), inputs, NoRepair, NoCheckSigs, substitute); } uploadLock = -1; @@ -239,8 +249,8 @@ connected: if (!missing.empty()) { Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri)); - setenv("NIX_HELD_LOCKS", concatStringsSep(" ", missing).c_str(), 1); /* FIXME: ugly */ - copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs); + store->locksHeld.insert(missing.begin(), missing.end()); /* FIXME: ugly */ + copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs, NoSubstitute); } return; diff --git a/src/build-remote/local.mk b/src/build-remote/local.mk index 64368a43ff73..50b0409d1886 100644 --- a/src/build-remote/local.mk +++ b/src/build-remote/local.mk @@ -4,6 +4,6 @@ build-remote_DIR := $(d) build-remote_INSTALL_DIR := $(libexecdir)/nix -build-remote_LIBS = libmain libutil libformat libstore +build-remote_LIBS = libmain libformat libstore libutil build-remote_SOURCES := $(d)/build-remote.cc diff --git a/src/buildenv/buildenv.cc b/src/buildenv/buildenv.cc deleted file mode 100644 index f05aa7bf2fbb..000000000000 --- a/src/buildenv/buildenv.cc +++ /dev/null @@ -1,187 +0,0 @@ -#include "shared.hh" -#include <sys/stat.h> -#include <sys/types.h> -#include <fcntl.h> -#include <algorithm> - -using namespace nix; - -typedef std::map<Path,int> Priorities; - -static bool isDirectory (const Path & path) -{ - struct stat st; - if (stat(path.c_str(), &st) == -1) - throw SysError(format("getting status of '%1%'") % path); - return S_ISDIR(st.st_mode); -} - -static auto priorities = Priorities{}; - -static auto symlinks = 0; - -/* For each activated package, create symlinks */ -static void createLinks(const Path & srcDir, const Path & dstDir, int priority) -{ - auto srcFiles = readDirectory(srcDir); - for (const auto & ent : srcFiles) { - if (ent.name[0] == '.') - /* not matched by glob */ - continue; - const auto & srcFile = srcDir + "/" + ent.name; - auto dstFile = dstDir + "/" + ent.name; - - /* The files below are special-cased to that they don't show up - * in user profiles, either because they are useless, or - * because they would cauase pointless collisions (e.g., each - * Python package brings its own - * `$out/lib/pythonX.Y/site-packages/easy-install.pth'.) - */ - if (hasSuffix(srcFile, "/propagated-build-inputs") || - hasSuffix(srcFile, "/nix-support") || - hasSuffix(srcFile, "/perllocal.pod") || - hasSuffix(srcFile, "/info/dir") || - hasSuffix(srcFile, "/log")) { - continue; - } else if (isDirectory(srcFile)) { - struct stat dstSt; - auto res = lstat(dstFile.c_str(), &dstSt); - if (res == 0) { - if (S_ISDIR(dstSt.st_mode)) { - createLinks(srcFile, dstFile, priority); - continue; - } else if (S_ISLNK(dstSt.st_mode)) { - auto target = readLink(dstFile); - if (!isDirectory(target)) - throw Error(format("collision between '%1%' and non-directory '%2%'") - % srcFile % target); - if (unlink(dstFile.c_str()) == -1) - throw SysError(format("unlinking '%1%'") % dstFile); - if (mkdir(dstFile.c_str(), 0755) == -1) - throw SysError(format("creating directory '%1%'")); - createLinks(target, dstFile, priorities[dstFile]); - createLinks(srcFile, dstFile, priority); - continue; - } - } else if (errno != ENOENT) - throw SysError(format("getting status of '%1%'") % dstFile); - } else { - struct stat dstSt; - auto res = lstat(dstFile.c_str(), &dstSt); - if (res == 0) { - if (S_ISLNK(dstSt.st_mode)) { - auto target = readLink(dstFile); - auto prevPriority = priorities[dstFile]; - if (prevPriority == priority) - throw Error(format( - "Packages '%1%' and '%2%' have the same priority '%3%'" - "use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' " - "to change the priority of one of the conflicting packages" - " ('0' being the highest priority)" - ) % srcFile % target % priority); - if (prevPriority < priority) - continue; - if (unlink(dstFile.c_str()) == -1) - throw SysError(format("unlinking '%1%'") % dstFile); - } - } else if (errno != ENOENT) - throw SysError(format("getting status of '%1%'") % dstFile); - } - createSymlink(srcFile, dstFile); - priorities[dstFile] = priority; - symlinks++; - } -} - -typedef std::set<Path> FileProp; - -static auto done = FileProp{}; -static auto postponed = FileProp{}; - -static auto out = string{}; - -static void addPkg(const Path & pkgDir, int priority) -{ - if (done.find(pkgDir) != done.end()) - return; - done.insert(pkgDir); - createLinks(pkgDir, out, priority); - auto propagatedFN = pkgDir + "/nix-support/propagated-user-env-packages"; - auto propagated = string{}; - { - AutoCloseFD fd = open(propagatedFN.c_str(), O_RDONLY | O_CLOEXEC); - if (!fd) { - if (errno == ENOENT) - return; - throw SysError(format("opening '%1%'") % propagatedFN); - } - propagated = readLine(fd.get()); - } - for (const auto & p : tokenizeString<std::vector<string>>(propagated, " ")) - if (done.find(p) == done.end()) - postponed.insert(p); -} - -struct Package { - Path path; - bool active; - int priority; - Package(Path path, bool active, int priority) : path{std::move(path)}, active{active}, priority{priority} {} -}; - -typedef std::vector<Package> Packages; - -int main(int argc, char ** argv) -{ - return handleExceptions(argv[0], [&]() { - initNix(); - out = getEnv("out"); - if (mkdir(out.c_str(), 0755) == -1) - throw SysError(format("creating %1%") % out); - - /* Convert the stuff we get from the environment back into a coherent - * data type. - */ - auto pkgs = Packages{}; - auto derivations = tokenizeString<Strings>(getEnv("derivations")); - while (!derivations.empty()) { - /* !!! We're trusting the caller to structure derivations env var correctly */ - auto active = derivations.front(); derivations.pop_front(); - auto priority = stoi(derivations.front()); derivations.pop_front(); - auto outputs = stoi(derivations.front()); derivations.pop_front(); - for (auto n = 0; n < outputs; n++) { - auto path = derivations.front(); derivations.pop_front(); - pkgs.emplace_back(path, active != "false", priority); - } - } - - /* Symlink to the packages that have been installed explicitly by the - * user. Process in priority order to reduce unnecessary - * symlink/unlink steps. - */ - std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) { - return a.priority < b.priority || (a.priority == b.priority && a.path < b.path); - }); - for (const auto & pkg : pkgs) - if (pkg.active) - addPkg(pkg.path, pkg.priority); - - /* Symlink to the packages that have been "propagated" by packages - * installed by the user (i.e., package X declares that it wants Y - * installed as well). We do these later because they have a lower - * priority in case of collisions. - */ - auto priorityCounter = 1000; - while (!postponed.empty()) { - auto pkgDirs = postponed; - postponed = FileProp{}; - for (const auto & pkgDir : pkgDirs) - addPkg(pkgDir, priorityCounter++); - } - - std::cerr << "created " << symlinks << " symlinks in user environment\n"; - - createSymlink(getEnv("manifest"), out + "/manifest.nix"); - }); -} - diff --git a/src/buildenv/local.mk b/src/buildenv/local.mk deleted file mode 100644 index 17ec13b235f4..000000000000 --- a/src/buildenv/local.mk +++ /dev/null @@ -1,9 +0,0 @@ -programs += buildenv - -buildenv_DIR := $(d) - -buildenv_INSTALL_DIR := $(libexecdir)/nix - -buildenv_LIBS = libmain libstore libutil libformat - -buildenv_SOURCES := $(d)/buildenv.cc diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc index 910428c02686..0474865c6d7d 100644 --- a/src/libexpr/attr-set.cc +++ b/src/libexpr/attr-set.cc @@ -7,13 +7,14 @@ namespace nix { +/* Note: Various places expect the allocated memory to be zeroed. */ static void * allocBytes(size_t n) { void * p; #if HAVE_BOEHMGC p = GC_malloc(n); #else - p = malloc(n); + p = calloc(n, 1); #endif if (!p) throw std::bad_alloc(); return p; @@ -23,13 +24,15 @@ static void * allocBytes(size_t n) /* Allocate a new array of attributes for an attribute set with a specific capacity. The space is implicitly reserved after the Bindings structure. */ -Bindings * EvalState::allocBindings(Bindings::size_t capacity) +Bindings * EvalState::allocBindings(size_t capacity) { - return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings(capacity); + if (capacity > std::numeric_limits<Bindings::size_t>::max()) + throw Error("attribute set of size %d is too big", capacity); + return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings((Bindings::size_t) capacity); } -void EvalState::mkAttrs(Value & v, unsigned int capacity) +void EvalState::mkAttrs(Value & v, size_t capacity) { if (capacity == 0) { v = vEmptySet; diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh index e1fc2bf6d796..3119a1848af2 100644 --- a/src/libexpr/attr-set.hh +++ b/src/libexpr/attr-set.hh @@ -83,7 +83,7 @@ public: for (size_t n = 0; n < size_; n++) res.emplace_back(&attrs[n]); std::sort(res.begin(), res.end(), [](const Attr * a, const Attr * b) { - return (string) a->name < (string) b->name; + return (const string &) a->name < (const string &) b->name; }); return res; } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 63de2d60a147..2473157b3f63 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -43,13 +43,14 @@ static char * dupString(const char * s) } +/* Note: Various places expect the allocated memory to be zeroed. */ static void * allocBytes(size_t n) { void * p; #if HAVE_BOEHMGC p = GC_malloc(n); #else - p = malloc(n); + p = calloc(n, 1); #endif if (!p) throw std::bad_alloc(); return p; @@ -293,6 +294,10 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store) , sWrong(symbols.create("wrong")) , sStructuredAttrs(symbols.create("__structuredAttrs")) , sBuilder(symbols.create("builder")) + , sArgs(symbols.create("args")) + , sOutputHash(symbols.create("outputHash")) + , sOutputHashAlgo(symbols.create("outputHashAlgo")) + , sOutputHashMode(symbols.create("outputHashMode")) , repair(NoRepair) , store(store) , baseEnv(allocEnv(128)) @@ -300,15 +305,36 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store) { countCalls = getEnv("NIX_COUNT_CALLS", "0") != "0"; - restricted = settings.restrictEval; - assert(gcInitialised); + static_assert(sizeof(Env) == 16); + /* Initialise the Nix expression search path. */ - Strings paths = parseNixPath(getEnv("NIX_PATH", "")); - for (auto & i : _searchPath) addToSearchPath(i); - for (auto & i : paths) addToSearchPath(i); - addToSearchPath("nix=" + settings.nixDataDir + "/nix/corepkgs"); + if (!settings.pureEval) { + Strings paths = parseNixPath(getEnv("NIX_PATH", "")); + for (auto & i : _searchPath) addToSearchPath(i); + for (auto & i : paths) addToSearchPath(i); + } + addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true)); + + if (settings.restrictEval || settings.pureEval) { + allowedPaths = PathSet(); + + for (auto & i : searchPath) { + auto r = resolveSearchPathElem(i); + if (!r.first) continue; + + auto path = r.second; + + if (store->isInStore(r.second)) { + PathSet closure; + store->computeFSClosure(store->toStorePath(r.second), closure); + for (auto & path : closure) + allowedPaths->insert(path); + } else + allowedPaths->insert(r.second); + } + } clearValue(vEmptySet); vEmptySet.type = tAttrs; @@ -326,38 +352,42 @@ EvalState::~EvalState() Path EvalState::checkSourcePath(const Path & path_) { - if (!restricted) return path_; + if (!allowedPaths) return path_; + + auto i = resolvedPaths.find(path_); + if (i != resolvedPaths.end()) + return i->second; + + bool found = false; + + for (auto & i : *allowedPaths) { + if (isDirOrInDir(path_, i)) { + found = true; + break; + } + } + + if (!found) + throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path_); /* Resolve symlinks. */ debug(format("checking access to '%s'") % path_); Path path = canonPath(path_, true); - for (auto & i : searchPath) { - auto r = resolveSearchPathElem(i); - if (!r.first) continue; - if (path == r.second || isInDir(path, r.second)) + for (auto & i : *allowedPaths) { + if (isDirOrInDir(path, i)) { + resolvedPaths[path_] = path; return path; + } } - /* To support import-from-derivation, allow access to anything in - the store. FIXME: only allow access to paths that have been - constructed by this evaluation. */ - if (store->isInStore(path)) return path; - -#if 0 - /* Hack to support the chroot dependencies of corepkgs (see - corepkgs/config.nix.in). */ - if (path == settings.nixPrefix && isStorePath(settings.nixPrefix)) - return path; -#endif - - throw RestrictedPathError(format("access to path '%1%' is forbidden in restricted mode") % path_); + throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path); } void EvalState::checkURI(const std::string & uri) { - if (!restricted) return; + if (!settings.restrictEval) return; /* 'uri' should be equal to a prefix, or in a subdirectory of a prefix. Thus, the prefix https://github.co does not permit @@ -371,11 +401,33 @@ void EvalState::checkURI(const std::string & uri) && (prefix[prefix.size() - 1] == '/' || uri[prefix.size()] == '/'))) return; + /* If the URI is a path, then check it against allowedPaths as + well. */ + if (hasPrefix(uri, "/")) { + checkSourcePath(uri); + return; + } + + if (hasPrefix(uri, "file://")) { + checkSourcePath(std::string(uri, 7)); + return; + } + throw RestrictedPathError("access to URI '%s' is forbidden in restricted mode", uri); } -void EvalState::addConstant(const string & name, Value & v) +Path EvalState::toRealPath(const Path & path, const PathSet & context) +{ + // FIXME: check whether 'path' is in 'context'. + return + !context.empty() && store->isInStore(path) + ? store->toRealPath(path) + : path; +}; + + +Value * EvalState::addConstant(const string & name, Value & v) { Value * v2 = allocValue(); *v2 = v; @@ -383,12 +435,18 @@ void EvalState::addConstant(const string & name, Value & v) baseEnv.values[baseEnvDispl++] = v2; string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2)); + return v2; } -void EvalState::addPrimOp(const string & name, - unsigned int arity, PrimOpFun primOp) +Value * EvalState::addPrimOp(const string & name, + size_t arity, PrimOpFun primOp) { + if (arity == 0) { + Value v; + primOp(*this, noPos, nullptr, v); + return addConstant(name, v); + } Value * v = allocValue(); string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; Symbol sym = symbols.create(name2); @@ -397,6 +455,7 @@ void EvalState::addPrimOp(const string & name, staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl; baseEnv.values[baseEnvDispl++] = v; baseEnv.values[0]->attrs->push_back(Attr(sym, v)); + return v; } @@ -487,7 +546,7 @@ Value & mkString(Value & v, const string & s, const PathSet & context) { mkString(v, s.c_str()); if (!context.empty()) { - unsigned int n = 0; + size_t n = 0; v.string.context = (const char * *) allocBytes((context.size() + 1) * sizeof(char *)); for (auto & i : context) @@ -506,17 +565,17 @@ void mkPath(Value & v, const char * s) inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) { - for (unsigned int l = var.level; l; --l, env = env->up) ; + for (size_t l = var.level; l; --l, env = env->up) ; if (!var.fromWith) return env->values[var.displ]; while (1) { - if (!env->haveWithAttrs) { + if (env->type == Env::HasWithExpr) { if (noEval) return 0; Value * v = allocValue(); evalAttrs(*env->up, (Expr *) env->values[0], *v); env->values[0] = v; - env->haveWithAttrs = true; + env->type = Env::HasWithAttrs; } Bindings::iterator j = env->values[0]->attrs->find(var.name); if (j != env->values[0]->attrs->end()) { @@ -525,7 +584,7 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) } if (!env->prevWith) throwUndefinedVarError("undefined variable '%1%' at %2%", var.name, var.pos); - for (unsigned int l = env->prevWith; l; --l, env = env->up) ; + for (size_t l = env->prevWith; l; --l, env = env->up) ; } } @@ -537,24 +596,24 @@ Value * EvalState::allocValue() } -Env & EvalState::allocEnv(unsigned int size) +Env & EvalState::allocEnv(size_t size) { - assert(size <= std::numeric_limits<decltype(Env::size)>::max()); + if (size > std::numeric_limits<decltype(Env::size)>::max()) + throw Error("environment size %d is too big", size); nrEnvs++; nrValuesInEnvs += size; Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *)); - env->size = size; + env->size = (decltype(Env::size)) size; + env->type = Env::Plain; - /* Clear the values because maybeThunk() and lookupVar fromWith expect this. */ - for (unsigned i = 0; i < size; ++i) - env->values[i] = 0; + /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */ return *env; } -void EvalState::mkList(Value & v, unsigned int size) +void EvalState::mkList(Value & v, size_t size) { clearValue(v); if (size == 1) @@ -589,7 +648,7 @@ void EvalState::mkThunk_(Value & v, Expr * expr) void EvalState::mkPos(Value & v, Pos * pos) { - if (pos) { + if (pos && pos->file.set()) { mkAttrs(v, 3); mkString(*allocAttr(v, sFile), pos->file); mkInt(*allocAttr(v, sLine), pos->line); @@ -649,8 +708,10 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) } -void EvalState::evalFile(const Path & path, Value & v) +void EvalState::evalFile(const Path & path_, Value & v) { + auto path = checkSourcePath(path_); + FileEvalCache::iterator i; if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) { v = i->second; @@ -764,7 +825,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v) /* The recursive attributes are evaluated in the new environment, while the inherited attributes are evaluated in the original environment. */ - unsigned int displ = 0; + size_t displ = 0; for (auto & i : attrs) { Value * vAttr; if (hasOverrides && !i.second.inherited) { @@ -838,7 +899,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v) /* The recursive attributes are evaluated in the new environment, while the inherited attributes are evaluated in the original environment. */ - unsigned int displ = 0; + size_t displ = 0; for (auto & i : attrs->attrs) env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2); @@ -849,7 +910,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v) void ExprList::eval(EvalState & state, Env & env, Value & v) { state.mkList(v, elems.size()); - for (unsigned int n = 0; n < elems.size(); ++n) + for (size_t n = 0; n < elems.size(); ++n) v.listElems()[n] = elems[n]->maybeThunk(state, env); } @@ -971,22 +1032,22 @@ void ExprApp::eval(EvalState & state, Env & env, Value & v) void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos) { /* Figure out the number of arguments still needed. */ - unsigned int argsDone = 0; + size_t argsDone = 0; Value * primOp = &fun; while (primOp->type == tPrimOpApp) { argsDone++; primOp = primOp->primOpApp.left; } assert(primOp->type == tPrimOp); - unsigned int arity = primOp->primOp->arity; - unsigned int argsLeft = arity - argsDone; + auto arity = primOp->primOp->arity; + auto argsLeft = arity - argsDone; if (argsLeft == 1) { /* We have all the arguments, so call the primop. */ /* Put all the arguments in an array. */ Value * vArgs[arity]; - unsigned int n = arity - 1; + auto n = arity - 1; vArgs[n--] = &arg; for (Value * arg = &fun; arg->type == tPrimOpApp; arg = arg->primOpApp.left) vArgs[n--] = arg->primOpApp.right; @@ -1035,13 +1096,13 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po ExprLambda & lambda(*fun.lambda.fun); - unsigned int size = + auto size = (lambda.arg.empty() ? 0 : 1) + (lambda.matchAttrs ? lambda.formals->formals.size() : 0); Env & env2(allocEnv(size)); env2.up = fun.lambda.env; - unsigned int displ = 0; + size_t displ = 0; if (!lambda.matchAttrs) env2.values[displ++] = &arg; @@ -1055,7 +1116,7 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po /* For each formal argument, get the actual argument. If there is no matching actual argument but the formal argument has a default, use the default. */ - unsigned int attrsUsed = 0; + size_t attrsUsed = 0; for (auto & i : lambda.formals->formals) { Bindings::iterator j = arg.attrs->find(i.name); if (j == arg.attrs->end()) { @@ -1147,7 +1208,7 @@ void ExprWith::eval(EvalState & state, Env & env, Value & v) Env & env2(state.allocEnv(1)); env2.up = &env; env2.prevWith = prevWith; - env2.haveWithAttrs = false; + env2.type = Env::HasWithExpr; env2.values[0] = (Value *) attrs; body->eval(state, env2, v); @@ -1253,15 +1314,15 @@ void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v) } -void EvalState::concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos) +void EvalState::concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos) { nrListConcats++; Value * nonEmpty = 0; - unsigned int len = 0; - for (unsigned int n = 0; n < nrLists; ++n) { + size_t len = 0; + for (size_t n = 0; n < nrLists; ++n) { forceList(*lists[n], pos); - unsigned int l = lists[n]->listSize(); + auto l = lists[n]->listSize(); len += l; if (l) nonEmpty = lists[n]; } @@ -1273,9 +1334,10 @@ void EvalState::concatLists(Value & v, unsigned int nrLists, Value * * lists, co mkList(v, len); auto out = v.listElems(); - for (unsigned int n = 0, pos = 0; n < nrLists; ++n) { - unsigned int l = lists[n]->listSize(); - memcpy(out + pos, lists[n]->listElems(), l * sizeof(Value *)); + for (size_t n = 0, pos = 0; n < nrLists; ++n) { + auto l = lists[n]->listSize(); + if (l) + memcpy(out + pos, lists[n]->listElems(), l * sizeof(Value *)); pos += l; } } @@ -1368,7 +1430,7 @@ void EvalState::forceValueDeep(Value & v) } else if (v.isList()) { - for (unsigned int n = 0; n < v.listSize(); ++n) + for (size_t n = 0; n < v.listSize(); ++n) recurse(*v.listElems()[n]); } }; @@ -1520,7 +1582,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, if (v.isList()) { string result; - for (unsigned int n = 0; n < v.listSize(); ++n) { + for (size_t n = 0; n < v.listSize(); ++n) { result += coerceToString(pos, *v.listElems()[n], context, coerceMore, copyToStore); if (n < v.listSize() - 1 @@ -1546,7 +1608,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path) dstPath = srcToStore[path]; else { dstPath = settings.readOnlyMode - ? store->computeStorePathForPath(checkSourcePath(path)).first + ? store->computeStorePathForPath(baseNameOf(path), checkSourcePath(path)).first : store->addToStore(baseNameOf(path), checkSourcePath(path), true, htSHA256, defaultPathFilter, repair); srcToStore[path] = dstPath; printMsg(lvlChatty, format("copied source '%1%' -> '%2%'") @@ -1607,7 +1669,7 @@ bool EvalState::eqValues(Value & v1, Value & v2) case tList2: case tListN: if (v1.listSize() != v2.listSize()) return false; - for (unsigned int n = 0; n < v1.listSize(); ++n) + for (size_t n = 0; n < v1.listSize(); ++n) if (!eqValues(*v1.listElems()[n], *v2.listElems()[n])) return false; return true; @@ -1668,10 +1730,13 @@ void EvalState::printStats() printMsg(v, format(" time elapsed: %1%") % cpuTime); printMsg(v, format(" size of a value: %1%") % sizeof(Value)); printMsg(v, format(" size of an attr: %1%") % sizeof(Attr)); - printMsg(v, format(" environments allocated: %1% (%2% bytes)") % nrEnvs % bEnvs); - printMsg(v, format(" list elements: %1% (%2% bytes)") % nrListElems % bLists); + printMsg(v, format(" environments allocated count: %1%") % nrEnvs); + printMsg(v, format(" environments allocated bytes: %1%") % bEnvs); + printMsg(v, format(" list elements count: %1%") % nrListElems); + printMsg(v, format(" list elements bytes: %1%") % bLists); printMsg(v, format(" list concatenations: %1%") % nrListConcats); - printMsg(v, format(" values allocated: %1% (%2% bytes)") % nrValues % bValues); + printMsg(v, format(" values allocated count: %1%") % nrValues); + printMsg(v, format(" values allocated bytes: %1%") % bValues); printMsg(v, format(" sets allocated: %1% (%2% bytes)") % nrAttrsets % bAttrsets); printMsg(v, format(" right-biased unions: %1%") % nrOpUpdates); printMsg(v, format(" values copied in right-biased unions: %1%") % nrOpUpdateValuesCopied); @@ -1695,26 +1760,26 @@ void EvalState::printStats() v = lvlInfo; printMsg(v, format("calls to %1% primops:") % primOpCalls.size()); - typedef std::multimap<unsigned int, Symbol> PrimOpCalls_; + typedef std::multimap<size_t, Symbol> PrimOpCalls_; PrimOpCalls_ primOpCalls_; for (auto & i : primOpCalls) - primOpCalls_.insert(std::pair<unsigned int, Symbol>(i.second, i.first)); + primOpCalls_.insert(std::pair<size_t, Symbol>(i.second, i.first)); for (auto i = primOpCalls_.rbegin(); i != primOpCalls_.rend(); ++i) printMsg(v, format("%1$10d %2%") % i->first % i->second); printMsg(v, format("calls to %1% functions:") % functionCalls.size()); - typedef std::multimap<unsigned int, ExprLambda *> FunctionCalls_; + typedef std::multimap<size_t, ExprLambda *> FunctionCalls_; FunctionCalls_ functionCalls_; for (auto & i : functionCalls) - functionCalls_.insert(std::pair<unsigned int, ExprLambda *>(i.second, i.first)); + functionCalls_.insert(std::pair<size_t, ExprLambda *>(i.second, i.first)); for (auto i = functionCalls_.rbegin(); i != functionCalls_.rend(); ++i) printMsg(v, format("%1$10d %2%") % i->first % i->second->showNamePos()); printMsg(v, format("evaluations of %1% attributes:") % attrSelects.size()); - typedef std::multimap<unsigned int, Pos> AttrSelects_; + typedef std::multimap<size_t, Pos> AttrSelects_; AttrSelects_ attrSelects_; for (auto & i : attrSelects) - attrSelects_.insert(std::pair<unsigned int, Pos>(i.second, i.first)); + attrSelects_.insert(std::pair<size_t, Pos>(i.second, i.first)); for (auto i = attrSelects_.rbegin(); i != attrSelects_.rend(); ++i) printMsg(v, format("%1$10d %2%") % i->first % i->second); @@ -1765,7 +1830,7 @@ size_t valueSize(Value & v) if (seen.find(v.listElems()) == seen.end()) { seen.insert(v.listElems()); sz += v.listSize() * sizeof(Value *); - for (unsigned int n = 0; n < v.listSize(); ++n) + for (size_t n = 0; n < v.listSize(); ++n) sz += doValue(*v.listElems()[n]); } break; @@ -1801,9 +1866,10 @@ size_t valueSize(Value & v) size_t sz = sizeof(Env) + sizeof(Value *) * env.size; - for (unsigned int i = 0; i < env.size; ++i) - if (env.values[i]) - sz += doValue(*env.values[i]); + if (env.type != Env::HasWithExpr) + for (size_t i = 0; i < env.size; ++i) + if (env.values[i]) + sz += doValue(*env.values[i]); if (env.up) sz += doEnv(*env.up); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index f0ab1435bff3..3725e45dad9b 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -7,6 +7,7 @@ #include "hash.hh" #include <map> +#include <unordered_map> namespace nix { @@ -34,8 +35,8 @@ struct Env { Env * up; unsigned short size; // used by ‘valueSize’ - unsigned short prevWith:15; // nr of levels up to next `with' environment - unsigned short haveWithAttrs:1; + unsigned short prevWith:14; // nr of levels up to next `with' environment + enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2; Value * values[0]; }; @@ -69,16 +70,17 @@ public: const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue, sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls, sFile, sLine, sColumn, sFunctor, sToString, - sRight, sWrong, sStructuredAttrs, sBuilder; + sRight, sWrong, sStructuredAttrs, sBuilder, sArgs, + sOutputHash, sOutputHashAlgo, sOutputHashMode; Symbol sDerivationNix; /* If set, force copying files to the Nix store even if they already exist there. */ RepairFlag repair; - /* If set, don't allow access to files outside of the Nix search - path or to environment variables. */ - bool restricted; + /* The allowed filesystem paths in restricted or pure evaluation + mode. */ + std::experimental::optional<PathSet> allowedPaths; Value vEmptySet; @@ -99,6 +101,9 @@ private: std::map<std::string, std::pair<bool, std::string>> searchPathResolved; + /* Cache used by checkSourcePath(). */ + std::unordered_map<Path, Path> resolvedPaths; + public: EvalState(const Strings & _searchPath, ref<Store> store); @@ -112,6 +117,15 @@ public: void checkURI(const std::string & uri); + /* When using a diverted store and 'path' is in the Nix store, map + 'path' to the diverted location (e.g. /nix/store/foo is mapped + to /home/alice/my-nix/nix/store/foo). However, this is only + done if the context is not empty, since otherwise we're + probably trying to read from the actual /nix/store. This is + intended to distinguish between import-from-derivation and + sources stored in the actual /nix/store. */ + Path toRealPath(const Path & path, const PathSet & context); + /* Parse a Nix expression from the specified file. */ Expr * parseExprFromFile(const Path & path); Expr * parseExprFromFile(const Path & path, StaticEnv & staticEnv); @@ -201,10 +215,10 @@ private: void createBaseEnv(); - void addConstant(const string & name, Value & v); + Value * addConstant(const string & name, Value & v); - void addPrimOp(const string & name, - unsigned int arity, PrimOpFun primOp); + Value * addPrimOp(const string & name, + size_t arity, PrimOpFun primOp); public: @@ -238,18 +252,18 @@ public: /* Allocation primitives. */ Value * allocValue(); - Env & allocEnv(unsigned int size); + Env & allocEnv(size_t size); Value * allocAttr(Value & vAttrs, const Symbol & name); - Bindings * allocBindings(Bindings::size_t capacity); + Bindings * allocBindings(size_t capacity); - void mkList(Value & v, unsigned int length); - void mkAttrs(Value & v, unsigned int capacity); + void mkList(Value & v, size_t length); + void mkAttrs(Value & v, size_t capacity); void mkThunk_(Value & v, Expr * expr); void mkPos(Value & v, Pos * pos); - void concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos); + void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos); /* Print statistics. */ void printStats(); @@ -272,15 +286,15 @@ private: bool countCalls; - typedef std::map<Symbol, unsigned int> PrimOpCalls; + typedef std::map<Symbol, size_t> PrimOpCalls; PrimOpCalls primOpCalls; - typedef std::map<ExprLambda *, unsigned int> FunctionCalls; + typedef std::map<ExprLambda *, size_t> FunctionCalls; FunctionCalls functionCalls; void incrFunctionCall(ExprLambda * fun); - typedef std::map<Pos, unsigned int> AttrSelects; + typedef std::map<Pos, size_t> AttrSelects; AttrSelects attrSelects; friend struct ExprOpUpdate; diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index d5bc42352a26..d38ed2df3b18 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -1,6 +1,7 @@ #include "get-drvs.hh" #include "util.hh" #include "eval-inline.hh" +#include "derivations.hh" #include <cstring> #include <regex> @@ -15,6 +16,33 @@ DrvInfo::DrvInfo(EvalState & state, const string & attrPath, Bindings * attrs) } +DrvInfo::DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs) + : state(&state), attrs(nullptr), attrPath("") +{ + auto spec = parseDrvPathWithOutputs(drvPathWithOutputs); + + drvPath = spec.first; + + auto drv = store->derivationFromPath(drvPath); + + name = storePathToName(drvPath); + + if (spec.second.size() > 1) + throw Error("building more than one derivation output is not supported, in '%s'", drvPathWithOutputs); + + outputName = + spec.second.empty() + ? get(drv.env, "outputName", "out") + : *spec.second.begin(); + + auto i = drv.outputs.find(outputName); + if (i == drv.outputs.end()) + throw Error("derivation '%s' does not have output '%s'", drvPath, outputName); + + outPath = i->second.path; +} + + string DrvInfo::queryName() const { if (name == "" && attrs) { diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh index 32294e458751..4d9128e3f448 100644 --- a/src/libexpr/get-drvs.hh +++ b/src/libexpr/get-drvs.hh @@ -37,6 +37,7 @@ public: DrvInfo(EvalState & state) : state(&state) { }; DrvInfo(EvalState & state, const string & attrPath, Bindings * attrs); + DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs); string queryName() const; string querySystem() const; diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc index 9380de3a66b3..8b1404595548 100644 --- a/src/libexpr/json-to-value.cc +++ b/src/libexpr/json-to-value.cc @@ -106,10 +106,16 @@ static void parseJSON(EvalState & state, const char * & s, Value & v) tmp_number += *s++; } - if (number_type == tFloat) - mkFloat(v, stod(tmp_number)); - else - mkInt(v, stoi(tmp_number)); + try { + if (number_type == tFloat) + mkFloat(v, stod(tmp_number)); + else + mkInt(v, stoi(tmp_number)); + } catch (std::invalid_argument e) { + throw JSONParseError("invalid JSON number"); + } catch (std::out_of_range e) { + throw JSONParseError("out-of-range JSON number"); + } } else if (strncmp(s, "true", 4) == 0) { diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index 828356bbf447..29ca327c1e4e 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -49,9 +49,10 @@ static void adjustLoc(YYLTYPE * loc, const char * s, size_t len) } -static Expr * unescapeStr(SymbolTable & symbols, const char * s) +static Expr * unescapeStr(SymbolTable & symbols, const char * s, size_t length) { string t; + t.reserve(length); char c; while ((c = *s++)) { if (c == '\\') { @@ -84,13 +85,14 @@ static Expr * unescapeStr(SymbolTable & symbols, const char * s) %} +ANY .|\n ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]* INT [0-9]+ FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)? PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/? HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/? SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\> -URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:\/\/[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+|channel\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ +URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ %% @@ -145,12 +147,12 @@ or { return OR_KW; } <INITIAL,INSIDE_DOLLAR_CURLY>\" { PUSH_STATE(STRING); return '"'; } -<STRING>([^\$\"\\]|\$[^\{\"\\]|\\.|\$\\.)*\$/\" | -<STRING>([^\$\"\\]|\$[^\{\"\\]|\\.|\$\\.)+ { +<STRING>([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})*\$/\" | +<STRING>([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})+ { /* It is impossible to match strings ending with '$' with one regex because trailing contexts are only valid at the end of a rule. (A sane but undocumented limitation.) */ - yylval->e = unescapeStr(data->symbols, yytext); + yylval->e = unescapeStr(data->symbols, yytext, yyleng); return STR; } <STRING>\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } @@ -177,8 +179,8 @@ or { return OR_KW; } yylval->e = new ExprIndStr("''"); return IND_STR; } -<IND_STRING>\'\'\\. { - yylval->e = unescapeStr(data->symbols, yytext + 2); +<IND_STRING>\'\'\\{ANY} { + yylval->e = unescapeStr(data->symbols, yytext + 2, yyleng - 2); return IND_STR; } <IND_STRING>\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } @@ -207,11 +209,13 @@ or { return OR_KW; } \#[^\r\n]* /* single-line comments */ \/\*([^*]|\*+[^*/])*\*+\/ /* long comments */ -. return yytext[0]; +{ANY} { + /* Don't return a negative number, as this will cause + Bison to stop parsing without an error. */ + return (unsigned char) yytext[0]; + } } -<<EOF>> { data->atEnd = true; return 0; } - %% diff --git a/src/libexpr/names.cc b/src/libexpr/names.cc index 6d78d2116121..382088c78872 100644 --- a/src/libexpr/names.cc +++ b/src/libexpr/names.cc @@ -41,7 +41,7 @@ bool DrvName::matches(DrvName & n) } -static string nextComponent(string::const_iterator & p, +string nextComponent(string::const_iterator & p, const string::const_iterator end) { /* Skip any dots and dashes (component separators). */ diff --git a/src/libexpr/names.hh b/src/libexpr/names.hh index 9667fc96fd0f..13c3093e77b0 100644 --- a/src/libexpr/names.hh +++ b/src/libexpr/names.hh @@ -24,6 +24,8 @@ private: typedef list<DrvName> DrvNames; +string nextComponent(string::const_iterator & p, + const string::const_iterator end); int compareVersions(const string & v1, const string & v2); DrvNames drvNamesFromArgs(const Strings & opArgs); diff --git a/src/libexpr/nix-expr.pc.in b/src/libexpr/nix-expr.pc.in index 21b6c38dd133..79f3e2f4506e 100644 --- a/src/libexpr/nix-expr.pc.in +++ b/src/libexpr/nix-expr.pc.in @@ -7,4 +7,4 @@ Description: Nix Package Manager Version: @PACKAGE_VERSION@ Requires: nix-store bdw-gc Libs: -L${libdir} -lnixexpr -Cflags: -I${includedir}/nix +Cflags: -I${includedir}/nix -std=c++14 diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 7b0a127cd41c..63cbef1ddf84 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -10,7 +10,7 @@ namespace nix { /* Displaying abstract syntax trees. */ -std::ostream & operator << (std::ostream & str, Expr & e) +std::ostream & operator << (std::ostream & str, const Expr & e) { e.show(str); return str; @@ -58,48 +58,48 @@ std::ostream & operator << (std::ostream & str, const Symbol & sym) return str; } -void Expr::show(std::ostream & str) +void Expr::show(std::ostream & str) const { abort(); } -void ExprInt::show(std::ostream & str) +void ExprInt::show(std::ostream & str) const { str << n; } -void ExprFloat::show(std::ostream & str) +void ExprFloat::show(std::ostream & str) const { str << nf; } -void ExprString::show(std::ostream & str) +void ExprString::show(std::ostream & str) const { showString(str, s); } -void ExprPath::show(std::ostream & str) +void ExprPath::show(std::ostream & str) const { str << s; } -void ExprVar::show(std::ostream & str) +void ExprVar::show(std::ostream & str) const { str << name; } -void ExprSelect::show(std::ostream & str) +void ExprSelect::show(std::ostream & str) const { str << "(" << *e << ")." << showAttrPath(attrPath); if (def) str << " or (" << *def << ")"; } -void ExprOpHasAttr::show(std::ostream & str) +void ExprOpHasAttr::show(std::ostream & str) const { str << "((" << *e << ") ? " << showAttrPath(attrPath) << ")"; } -void ExprAttrs::show(std::ostream & str) +void ExprAttrs::show(std::ostream & str) const { if (recursive) str << "rec "; str << "{ "; @@ -113,7 +113,7 @@ void ExprAttrs::show(std::ostream & str) str << "}"; } -void ExprList::show(std::ostream & str) +void ExprList::show(std::ostream & str) const { str << "[ "; for (auto & i : elems) @@ -121,7 +121,7 @@ void ExprList::show(std::ostream & str) str << "]"; } -void ExprLambda::show(std::ostream & str) +void ExprLambda::show(std::ostream & str) const { str << "("; if (matchAttrs) { @@ -143,7 +143,7 @@ void ExprLambda::show(std::ostream & str) str << ": " << *body << ")"; } -void ExprLet::show(std::ostream & str) +void ExprLet::show(std::ostream & str) const { str << "(let "; for (auto & i : attrs->attrs) @@ -155,27 +155,27 @@ void ExprLet::show(std::ostream & str) str << "in " << *body << ")"; } -void ExprWith::show(std::ostream & str) +void ExprWith::show(std::ostream & str) const { str << "(with " << *attrs << "; " << *body << ")"; } -void ExprIf::show(std::ostream & str) +void ExprIf::show(std::ostream & str) const { str << "(if " << *cond << " then " << *then << " else " << *else_ << ")"; } -void ExprAssert::show(std::ostream & str) +void ExprAssert::show(std::ostream & str) const { str << "assert " << *cond << "; " << *body; } -void ExprOpNot::show(std::ostream & str) +void ExprOpNot::show(std::ostream & str) const { str << "(! " << *e << ")"; } -void ExprConcatStrings::show(std::ostream & str) +void ExprConcatStrings::show(std::ostream & str) const { bool first = true; str << "("; @@ -186,7 +186,7 @@ void ExprConcatStrings::show(std::ostream & str) str << ")"; } -void ExprPos::show(std::ostream & str) +void ExprPos::show(std::ostream & str) const { str << "__curPos"; } diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh index 30be79bb57a6..665a42987dc1 100644 --- a/src/libexpr/nixexpr.hh +++ b/src/libexpr/nixexpr.hh @@ -11,7 +11,6 @@ namespace nix { MakeError(EvalError, Error) MakeError(ParseError, Error) -MakeError(IncompleteParseError, ParseError) MakeError(AssertionError, EvalError) MakeError(ThrownError, AssertionError) MakeError(Abort, EvalError) @@ -76,17 +75,17 @@ string showAttrPath(const AttrPath & attrPath); struct Expr { virtual ~Expr() { }; - virtual void show(std::ostream & str); + virtual void show(std::ostream & str) const; virtual void bindVars(const StaticEnv & env); virtual void eval(EvalState & state, Env & env, Value & v); virtual Value * maybeThunk(EvalState & state, Env & env); virtual void setName(Symbol & name); }; -std::ostream & operator << (std::ostream & str, Expr & e); +std::ostream & operator << (std::ostream & str, const Expr & e); #define COMMON_METHODS \ - void show(std::ostream & str); \ + void show(std::ostream & str) const; \ void eval(EvalState & state, Env & env, Value & v); \ void bindVars(const StaticEnv & env); @@ -255,7 +254,7 @@ struct ExprWith : Expr { Pos pos; Expr * attrs, * body; - unsigned int prevWith; + size_t prevWith; ExprWith(const Pos & pos, Expr * attrs, Expr * body) : pos(pos), attrs(attrs), body(body) { }; COMMON_METHODS }; @@ -283,13 +282,13 @@ struct ExprOpNot : Expr }; #define MakeBinOp(name, s) \ - struct Expr##name : Expr \ + struct name : Expr \ { \ Pos pos; \ Expr * e1, * e2; \ - Expr##name(Expr * e1, Expr * e2) : e1(e1), e2(e2) { }; \ - Expr##name(const Pos & pos, Expr * e1, Expr * e2) : pos(pos), e1(e1), e2(e2) { }; \ - void show(std::ostream & str) \ + name(Expr * e1, Expr * e2) : e1(e1), e2(e2) { }; \ + name(const Pos & pos, Expr * e1, Expr * e2) : pos(pos), e1(e1), e2(e2) { }; \ + void show(std::ostream & str) const \ { \ str << "(" << *e1 << " " s " " << *e2 << ")"; \ } \ @@ -300,14 +299,14 @@ struct ExprOpNot : Expr void eval(EvalState & state, Env & env, Value & v); \ }; -MakeBinOp(App, "") -MakeBinOp(OpEq, "==") -MakeBinOp(OpNEq, "!=") -MakeBinOp(OpAnd, "&&") -MakeBinOp(OpOr, "||") -MakeBinOp(OpImpl, "->") -MakeBinOp(OpUpdate, "//") -MakeBinOp(OpConcatLists, "++") +MakeBinOp(ExprApp, "") +MakeBinOp(ExprOpEq, "==") +MakeBinOp(ExprOpNEq, "!=") +MakeBinOp(ExprOpAnd, "&&") +MakeBinOp(ExprOpOr, "||") +MakeBinOp(ExprOpImpl, "->") +MakeBinOp(ExprOpUpdate, "//") +MakeBinOp(ExprOpConcatLists, "++") struct ExprConcatStrings : Expr { diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index ef11dd609217..eee48887dc22 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -31,12 +31,10 @@ namespace nix { Path basePath; Symbol path; string error; - bool atEnd; Symbol sLetBody; ParseData(EvalState & state) : state(state) , symbols(state.symbols) - , atEnd(false) , sLetBody(symbols.create("<let-body>")) { }; }; @@ -136,8 +134,8 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Ex whitespace-only final lines are not taken into account. (So the " " in "\n ''" is ignored, but the " " in "\n foo''" is.) */ bool atStartOfLine = true; /* = seen only whitespace in the current line */ - unsigned int minIndent = 1000000; - unsigned int curIndent = 0; + size_t minIndent = 1000000; + size_t curIndent = 0; for (auto & i : es) { ExprIndStr * e = dynamic_cast<ExprIndStr *>(i); if (!e) { @@ -148,7 +146,7 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Ex } continue; } - for (unsigned int j = 0; j < e->s.size(); ++j) { + for (size_t j = 0; j < e->s.size(); ++j) { if (atStartOfLine) { if (e->s[j] == ' ') curIndent++; @@ -170,8 +168,8 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Ex /* Strip spaces from each line. */ vector<Expr *> * es2 = new vector<Expr *>; atStartOfLine = true; - unsigned int curDropped = 0; - unsigned int n = es.size(); + size_t curDropped = 0; + size_t n = es.size(); for (vector<Expr *>::iterator i = es.begin(); i != es.end(); ++i, --n) { ExprIndStr * e = dynamic_cast<ExprIndStr *>(*i); if (!e) { @@ -182,7 +180,7 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Ex } string s2; - for (unsigned int j = 0; j < e->s.size(); ++j) { + for (size_t j = 0; j < e->s.size(); ++j) { if (atStartOfLine) { if (e->s[j] == ' ') { if (curDropped++ >= minIndent) @@ -541,12 +539,7 @@ Expr * EvalState::parse(const char * text, int res = yyparse(scanner, &data); yylex_destroy(scanner); - if (res) { - if (data.atEnd) - throw IncompleteParseError(data.error); - else - throw ParseError(data.error); - } + if (res) throw ParseError(data.error); data.result->bindVars(staticEnv); diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index e3b5dfb420b4..9dab8ecb0464 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -39,7 +39,7 @@ std::pair<string, string> decodeContext(const string & s) size_t index = s.find("!", 1); return std::pair<string, string>(string(s, index + 1), string(s, 1, index - 1)); } else - return std::pair<string, string>(s.at(0) == '/' ? s: string(s, 1), ""); + return std::pair<string, string>(s.at(0) == '/' ? s : string(s, 1), ""); } @@ -49,24 +49,38 @@ InvalidPathError::InvalidPathError(const Path & path) : void EvalState::realiseContext(const PathSet & context) { PathSet drvs; + for (auto & i : context) { std::pair<string, string> decoded = decodeContext(i); Path ctx = decoded.first; assert(store->isStorePath(ctx)); if (!store->isValidPath(ctx)) throw InvalidPathError(ctx); - if (!decoded.second.empty() && nix::isDerivation(ctx)) + if (!decoded.second.empty() && nix::isDerivation(ctx)) { drvs.insert(decoded.first + "!" + decoded.second); + + /* Add the output of this derivation to the allowed + paths. */ + if (allowedPaths) { + auto drv = store->derivationFromPath(decoded.first); + DerivationOutputs::iterator i = drv.outputs.find(decoded.second); + if (i == drv.outputs.end()) + throw Error("derivation '%s' does not have an output named '%s'", decoded.first, decoded.second); + allowedPaths->insert(i->second.path); + } + } } - if (!drvs.empty()) { - if (!settings.enableImportFromDerivation) - throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin())); - /* For performance, prefetch all substitute info. */ - PathSet willBuild, willSubstitute, unknown; - unsigned long long downloadSize, narSize; - store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize); - store->buildPaths(drvs); - } + + if (drvs.empty()) return; + + if (!settings.enableImportFromDerivation) + throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin())); + + /* For performance, prefetch all substitute info. */ + PathSet willBuild, willSubstitute, unknown; + unsigned long long downloadSize, narSize; + store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize); + store->buildPaths(drvs); } @@ -84,10 +98,10 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args % path % e.path % pos); } - path = state.checkSourcePath(path); + Path realPath = state.checkSourcePath(state.toRealPath(path, context)); if (state.store->isStorePath(path) && state.store->isValidPath(path) && isDerivation(path)) { - Derivation drv = readDerivation(path); + Derivation drv = readDerivation(realPath); Value & w = *state.allocValue(); state.mkAttrs(w, 3 + drv.outputs.size()); Value * v2 = state.allocAttr(w, state.sDrvPath); @@ -114,7 +128,7 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args } else { state.forceAttrs(*args[0]); if (args[0]->attrs->empty()) - state.evalFile(path, v); + state.evalFile(realPath, v); else { Env * env = &state.allocEnv(args[0]->attrs->size()); env->up = &state.baseEnv; @@ -127,8 +141,8 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args env->values[displ++] = attr.value; } - printTalkative("evaluating file '%1%'", path); - Expr * e = state.parseExprFromFile(resolveExprPath(path), staticEnv); + printTalkative("evaluating file '%1%'", realPath); + Expr * e = state.parseExprFromFile(resolveExprPath(realPath), staticEnv); e->eval(state, *env, v); } @@ -141,7 +155,7 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args extern "C" typedef void (*ValueInitializer)(EvalState & state, Value & v); /* Load a ValueInitializer from a DSO and return whatever it initializes */ -static void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v) +void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v) { PathSet context; Path path = state.coerceToPath(pos, *args[0], context); @@ -179,7 +193,7 @@ static void prim_importNative(EvalState & state, const Pos & pos, Value * * args /* Execute a program and parse its output */ -static void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v) +void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v) { state.forceList(*args[0], pos); auto elems = args[0]->listElems(); @@ -257,7 +271,18 @@ static void prim_isNull(EvalState & state, const Pos & pos, Value * * args, Valu static void prim_isFunction(EvalState & state, const Pos & pos, Value * * args, Value & v) { state.forceValue(*args[0]); - mkBool(v, args[0]->type == tLambda); + bool res; + switch (args[0]->type) { + case tLambda: + case tPrimOp: + case tPrimOpApp: + res = true; + break; + default: + res = false; + break; + } + mkBool(v, res); } @@ -439,7 +464,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v) { string name = state.forceStringNoCtx(*args[0], pos); - mkString(v, state.restricted ? "" : getEnv(name)); + mkString(v, settings.restrictEval || settings.pureEval ? "" : getEnv(name)); } @@ -539,7 +564,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * for (auto & i : args[0]->attrs->lexicographicOrder()) { if (i->name == state.sIgnoreNulls) continue; - string key = i->name; + const string & key = i->name; vomit("processing attribute '%1%'", key); auto handleHashMode = [&](const std::string & s) { @@ -575,7 +600,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * /* The `args' attribute is special: it supplies the command-line arguments to the builder. */ - if (key == "args") { + if (i->name == state.sArgs) { state.forceList(*i->value, pos); for (unsigned int n = 0; n < i->value->listSize(); ++n) { string s = state.coerceToString(posDrvName, *i->value->listElems()[n], context, true); @@ -598,15 +623,13 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * drv.builder = state.forceString(*i->value, context, posDrvName); else if (i->name == state.sSystem) drv.platform = state.forceStringNoCtx(*i->value, posDrvName); - else if (i->name == state.sName) - drvName = state.forceStringNoCtx(*i->value, posDrvName); - else if (key == "outputHash") + else if (i->name == state.sOutputHash) outputHash = state.forceStringNoCtx(*i->value, posDrvName); - else if (key == "outputHashAlgo") + else if (i->name == state.sOutputHashAlgo) outputHashAlgo = state.forceStringNoCtx(*i->value, posDrvName); - else if (key == "outputHashMode") + else if (i->name == state.sOutputHashMode) handleHashMode(state.forceStringNoCtx(*i->value, posDrvName)); - else if (key == "outputs") { + else if (i->name == state.sOutputs) { /* Require ‘outputs’ to be a list of strings. */ state.forceList(*i->value, posDrvName); Strings ss; @@ -620,14 +643,10 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * drv.env.emplace(key, s); if (i->name == state.sBuilder) drv.builder = s; else if (i->name == state.sSystem) drv.platform = s; - else if (i->name == state.sName) { - drvName = s; - printMsg(lvlVomit, format("derivation name is '%1%'") % drvName); - } - else if (key == "outputHash") outputHash = s; - else if (key == "outputHashAlgo") outputHashAlgo = s; - else if (key == "outputHashMode") handleHashMode(s); - else if (key == "outputs") + else if (i->name == state.sOutputHash) outputHash = s; + else if (i->name == state.sOutputHashAlgo) outputHashAlgo = s; + else if (i->name == state.sOutputHashMode) handleHashMode(s); + else if (i->name == state.sOutputs) handleOutputs(tokenizeString<Strings>(s)); } @@ -863,7 +882,7 @@ static void prim_readFile(EvalState & state, const Pos & pos, Value * * args, Va throw EvalError(format("cannot read '%1%', since path '%2%' is not valid, at %3%") % path % e.path % pos); } - string s = readFile(state.checkSourcePath(path)); + string s = readFile(state.checkSourcePath(state.toRealPath(path, context))); if (s.find((char) 0) != string::npos) throw Error(format("the contents of the file '%1%' cannot be represented as a Nix string") % path); mkString(v, s.c_str()); @@ -1009,20 +1028,13 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu } -static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v) +static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_, + Value * filterFun, bool recursive, const Hash & expectedHash, Value & v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[1], context); - if (!context.empty()) - throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % pos); - - state.forceValue(*args[0]); - if (args[0]->type != tLambda) - throw TypeError(format("first argument in call to 'filterSource' is not a function but %1%, at %2%") % showType(*args[0]) % pos); - - path = state.checkSourcePath(path); - - PathFilter filter = [&](const Path & path) { + const auto path = settings.pureEval && expectedHash ? + path_ : + state.checkSourcePath(path_); + PathFilter filter = filterFun ? ([&](const Path & path) { auto st = lstat(path); /* Call the filter function. The first argument is the path, @@ -1031,7 +1043,7 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args mkString(arg1, path); Value fun2; - state.callFunction(*args[0], arg1, fun2, noPos); + state.callFunction(*filterFun, arg1, fun2, noPos); Value arg2; mkString(arg2, @@ -1044,16 +1056,79 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args state.callFunction(fun2, arg2, res, noPos); return state.forceBool(res, pos); - }; + }) : defaultPathFilter; - Path dstPath = settings.readOnlyMode - ? state.store->computeStorePathForPath(path, true, htSHA256, filter).first - : state.store->addToStore(baseNameOf(path), path, true, htSHA256, filter, state.repair); + Path expectedStorePath; + if (expectedHash) { + expectedStorePath = + state.store->makeFixedOutputPath(recursive, expectedHash, name); + } + Path dstPath; + if (!expectedHash || !state.store->isValidPath(expectedStorePath)) { + dstPath = settings.readOnlyMode + ? state.store->computeStorePathForPath(name, path, recursive, htSHA256, filter).first + : state.store->addToStore(name, path, recursive, htSHA256, filter, state.repair); + if (expectedHash && expectedStorePath != dstPath) { + throw Error(format("store path mismatch in (possibly filtered) path added from '%1%'") % path); + } + } else + dstPath = expectedStorePath; mkString(v, dstPath, {dstPath}); } +static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + Path path = state.coerceToPath(pos, *args[1], context); + if (!context.empty()) + throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % pos); + + state.forceValue(*args[0]); + if (args[0]->type != tLambda) + throw TypeError(format("first argument in call to 'filterSource' is not a function but %1%, at %2%") % showType(*args[0]) % pos); + + addPath(state, pos, baseNameOf(path), path, args[0], true, Hash(), v); +} + +static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + state.forceAttrs(*args[0], pos); + Path path; + string name; + Value * filterFun = nullptr; + auto recursive = true; + Hash expectedHash; + + for (auto & attr : *args[0]->attrs) { + const string & n(attr.name); + if (n == "path") { + PathSet context; + path = state.coerceToPath(*attr.pos, *attr.value, context); + if (!context.empty()) + throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % *attr.pos); + } else if (attr.name == state.sName) + name = state.forceStringNoCtx(*attr.value, *attr.pos); + else if (n == "filter") { + state.forceValue(*attr.value); + filterFun = attr.value; + } else if (n == "recursive") + recursive = state.forceBool(*attr.value, *attr.pos); + else if (n == "sha256") + expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256); + else + throw EvalError(format("unsupported argument '%1%' to 'addPath', at %2%") % attr.name % *attr.pos); + } + if (path.empty()) + throw EvalError(format("'path' required, at %1%") % pos); + if (name.empty()) + name = baseNameOf(path); + + addPath(state, pos, name, path, filterFun, recursive, expectedHash, v); +} + + /************************************************************* * Sets *************************************************************/ @@ -1068,8 +1143,11 @@ static void prim_attrNames(EvalState & state, const Pos & pos, Value * * args, V state.mkList(v, args[0]->attrs->size()); size_t n = 0; - for (auto & i : args[0]->attrs->lexicographicOrder()) - mkString(*(v.listElems()[n++] = state.allocValue()), i->name); + for (auto & i : *args[0]->attrs) + mkString(*(v.listElems()[n++] = state.allocValue()), i.name); + + std::sort(v.listElems(), v.listElems() + n, + [](Value * v1, Value * v2) { return strcmp(v1->string.s, v2->string.s) < 0; }); } @@ -1534,12 +1612,16 @@ static void prim_partition(EvalState & state, const Pos & pos, Value * * args, V state.mkAttrs(v, 2); Value * vRight = state.allocAttr(v, state.sRight); - state.mkList(*vRight, right.size()); - memcpy(vRight->listElems(), right.data(), sizeof(Value *) * right.size()); + auto rsize = right.size(); + state.mkList(*vRight, rsize); + if (rsize) + memcpy(vRight->listElems(), right.data(), sizeof(Value *) * rsize); Value * vWrong = state.allocAttr(v, state.sWrong); - state.mkList(*vWrong, wrong.size()); - memcpy(vWrong->listElems(), wrong.data(), sizeof(Value *) * wrong.size()); + auto wsize = wrong.size(); + state.mkList(*vWrong, wsize); + if (wsize) + memcpy(vWrong->listElems(), wrong.data(), sizeof(Value *) * wsize); v.attrs->sort(); } @@ -1594,6 +1676,20 @@ static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & } } +static void prim_bitAnd(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + mkInt(v, state.forceInt(*args[0], pos) & state.forceInt(*args[1], pos)); +} + +static void prim_bitOr(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + mkInt(v, state.forceInt(*args[0], pos) | state.forceInt(*args[1], pos)); +} + +static void prim_bitXor(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + mkInt(v, state.forceInt(*args[0], pos) ^ state.forceInt(*args[1], pos)); +} static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v) { @@ -1653,6 +1749,14 @@ static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, } +static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + state.forceString(*args[0], context, pos); + mkBool(v, !context.empty()); +} + + /* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a builder without causing the derivation to be built (for instance, in the derivation that builds NARs in nix-push, when doing @@ -1838,21 +1942,32 @@ static void prim_replaceStrings(EvalState & state, const Pos & pos, Value * * ar auto s = state.forceString(*args[2], context, pos); string res; - for (size_t p = 0; p < s.size(); ) { + // Loops one past last character to handle the case where 'from' contains an empty string. + for (size_t p = 0; p <= s.size(); ) { bool found = false; auto i = from.begin(); auto j = to.begin(); for (; i != from.end(); ++i, ++j) if (s.compare(p, i->size(), *i) == 0) { found = true; - p += i->size(); res += j->first; + if (i->empty()) { + if (p < s.size()) + res += s[p]; + p++; + } else { + p += i->size(); + } for (auto& path : j->second) context.insert(path); j->second.clear(); break; } - if (!found) res += s[p++]; + if (!found) { + if (p < s.size()) + res += s[p]; + p++; + } } mkString(v, res, context); @@ -1883,6 +1998,26 @@ static void prim_compareVersions(EvalState & state, const Pos & pos, Value * * a } +static void prim_splitVersion(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + string version = state.forceStringNoCtx(*args[0], pos); + auto iter = version.cbegin(); + Strings components; + while (iter != version.cend()) { + auto component = nextComponent(iter, version.cend()); + if (component.empty()) + break; + components.emplace_back(std::move(component)); + } + state.mkList(v, components.size()); + unsigned int n = 0; + for (auto & component : components) { + auto listElem = v.listElems()[n++] = state.allocValue(); + mkString(*listElem, std::move(component)); + } +} + + /************************************************************* * Networking *************************************************************/ @@ -1921,7 +2056,14 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, state.checkURI(url); + if (settings.pureEval && !expectedHash) + throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who); + Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash); + + if (state.allowedPaths) + state.allowedPaths->insert(res); + mkString(v, res, PathSet({res})); } @@ -1973,11 +2115,24 @@ void EvalState::createBaseEnv() mkNull(v); addConstant("null", v); - mkInt(v, time(0)); - addConstant("__currentTime", v); + auto vThrow = addPrimOp("throw", 1, prim_throw); - mkString(v, settings.thisSystem); - addConstant("__currentSystem", v); + auto addPurityError = [&](const std::string & name) { + Value * v2 = allocValue(); + mkString(*v2, fmt("'%s' is not allowed in pure evaluation mode", name)); + mkApp(v, *vThrow, *v2); + addConstant(name, v); + }; + + if (!settings.pureEval) { + mkInt(v, time(0)); + addConstant("__currentTime", v); + } + + if (!settings.pureEval) { + mkString(v, settings.thisSystem); + addConstant("__currentSystem", v); + } mkString(v, nixVersion); addConstant("__nixVersion", v); @@ -1993,10 +2148,10 @@ void EvalState::createBaseEnv() addConstant("__langVersion", v); // Miscellaneous - addPrimOp("scopedImport", 2, prim_scopedImport); + auto vScopedImport = addPrimOp("scopedImport", 2, prim_scopedImport); Value * v2 = allocValue(); mkAttrs(*v2, 0); - mkApp(v, *baseEnv.values[baseEnvDispl - 1], *v2); + mkApp(v, *vScopedImport, *v2); forceValue(v); addConstant("import", v); if (settings.enableNativeCode) { @@ -2012,7 +2167,6 @@ void EvalState::createBaseEnv() addPrimOp("__isBool", 1, prim_isBool); addPrimOp("__genericClosure", 1, prim_genericClosure); addPrimOp("abort", 1, prim_abort); - addPrimOp("throw", 1, prim_throw); addPrimOp("__addErrorContext", 2, prim_addErrorContext); addPrimOp("__tryEval", 1, prim_tryEval); addPrimOp("__getEnv", 1, prim_getEnv); @@ -2027,7 +2181,10 @@ void EvalState::createBaseEnv() // Paths addPrimOp("__toPath", 1, prim_toPath); - addPrimOp("__storePath", 1, prim_storePath); + if (settings.pureEval) + addPurityError("__storePath"); + else + addPrimOp("__storePath", 1, prim_storePath); addPrimOp("__pathExists", 1, prim_pathExists); addPrimOp("baseNameOf", 1, prim_baseNameOf); addPrimOp("dirOf", 1, prim_dirOf); @@ -2041,6 +2198,7 @@ void EvalState::createBaseEnv() addPrimOp("__fromJSON", 1, prim_fromJSON); addPrimOp("__toFile", 2, prim_toFile); addPrimOp("__filterSource", 2, prim_filterSource); + addPrimOp("__path", 1, prim_path); // Sets addPrimOp("__attrNames", 1, prim_attrNames); @@ -2077,12 +2235,16 @@ void EvalState::createBaseEnv() addPrimOp("__sub", 2, prim_sub); addPrimOp("__mul", 2, prim_mul); addPrimOp("__div", 2, prim_div); + addPrimOp("__bitAnd", 2, prim_bitAnd); + addPrimOp("__bitOr", 2, prim_bitOr); + addPrimOp("__bitXor", 2, prim_bitXor); addPrimOp("__lessThan", 2, prim_lessThan); // String manipulation addPrimOp("toString", 1, prim_toString); addPrimOp("__substring", 3, prim_substring); addPrimOp("__stringLength", 1, prim_stringLength); + addPrimOp("__hasContext", 1, prim_hasContext); addPrimOp("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext); addPrimOp("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency); addPrimOp("__hashString", 2, prim_hashString); @@ -2094,6 +2256,7 @@ void EvalState::createBaseEnv() // Versions addPrimOp("__parseDrvName", 1, prim_parseDrvName); addPrimOp("__compareVersions", 2, prim_compareVersions); + addPrimOp("__splitVersion", 1, prim_splitVersion); // Derivations addPrimOp("derivationStrict", 1, prim_derivationStrict); @@ -2105,7 +2268,7 @@ void EvalState::createBaseEnv() /* Add a wrapper around the derivation primop that computes the `drvPath' and `outPath' attributes lazily. */ - string path = settings.nixDataDir + "/nix/corepkgs/derivation.nix"; + string path = canonPath(settings.nixDataDir + "/nix/corepkgs/derivation.nix", true); sDerivationNix = symbols.create(path); evalFile(path, v); addConstant("derivation", v); diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh index 39d23b04a5ce..c790b30f6d0b 100644 --- a/src/libexpr/primops.hh +++ b/src/libexpr/primops.hh @@ -9,7 +9,18 @@ struct RegisterPrimOp { typedef std::vector<std::tuple<std::string, size_t, PrimOpFun>> PrimOps; static PrimOps * primOps; + /* You can register a constant by passing an arity of 0. fun + will get called during EvalState initialization, so there + may be primops not yet added and builtins is not yet sorted. */ RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun); }; +/* These primops are disabled without enableNativeCode, but plugins + may wish to use them in limited contexts without globally enabling + them. */ +/* Load a ValueInitializer from a DSO and return whatever it initializes */ +void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v); +/* Execute a program and parse its output */ +void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v); + } diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index bca68ed72091..8bb74dad639e 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -22,10 +22,15 @@ struct GitInfo uint64_t revCount = 0; }; +std::regex revRegex("^[0-9a-fA-F]{40}$"); + GitInfo exportGit(ref<Store> store, const std::string & uri, - std::experimental::optional<std::string> ref, const std::string & rev, + std::experimental::optional<std::string> ref, std::string rev, const std::string & name) { + if (settings.pureEval && rev == "") + throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); + if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { bool clean = true; @@ -56,8 +61,9 @@ GitInfo exportGit(ref<Store> store, const std::string & uri, auto st = lstat(p); if (S_ISDIR(st.st_mode)) { - auto i = files.lower_bound(file); - return i != files.end() && hasPrefix(*i, file); + auto prefix = file + "/"; + auto i = files.lower_bound(prefix); + return i != files.end() && hasPrefix(*i, prefix); } return files.count(file); @@ -67,20 +73,20 @@ GitInfo exportGit(ref<Store> store, const std::string & uri, return gitInfo; } + + // clean working tree, but no ref or rev specified. Use 'HEAD'. + rev = chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" })); + ref = "HEAD"s; } - if (!ref) ref = "master"s; + if (!ref) ref = "HEAD"s; - if (rev != "") { - std::regex revRegex("^[0-9a-fA-F]{40}$"); - if (!std::regex_match(rev, revRegex)) - throw Error("invalid Git revision '%s'", rev); - } + if (rev != "" && !std::regex_match(rev, revRegex)) + throw Error("invalid Git revision '%s'", rev); Path cacheDir = getCacheDir() + "/nix/git"; if (!pathExists(cacheDir)) { - createDirs(cacheDir); runProgram("git", true, { "init", "--bare", cacheDir }); } @@ -88,32 +94,43 @@ GitInfo exportGit(ref<Store> store, const std::string & uri, Path localRefFile = cacheDir + "/refs/heads/" + localRef; - /* If the local ref is older than ‘tarball-ttl’ seconds, do a git - fetch to update the local ref to the remote ref. */ + bool doFetch; time_t now = time(0); - struct stat st; - if (stat(localRefFile.c_str(), &st) != 0 || - st.st_mtime <= now - settings.tarballTtl) - { - if (rev == "" || - chomp(runProgram( - RunOptions("git", { "-C", cacheDir, "cat-file", "-t", rev }) - .killStderr(true)).second) != "commit") - { - Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", uri)); - - // FIXME: git stderr messes up our progress indicator, so - // we're using --quiet for now. Should process its stderr. - runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, *ref + ":" + localRef }); - - struct timeval times[2]; - times[0].tv_sec = now; - times[0].tv_usec = 0; - times[1].tv_sec = now; - times[1].tv_usec = 0; - - utimes(localRefFile.c_str(), times); + /* If a rev was specified, we need to fetch if it's not in the + repo. */ + if (rev != "") { + try { + runProgram("git", true, { "-C", cacheDir, "cat-file", "-e", rev }); + doFetch = false; + } catch (ExecError & e) { + if (WIFEXITED(e.status)) { + doFetch = true; + } else { + throw; + } } + } else { + /* If the local ref is older than ‘tarball-ttl’ seconds, do a + git fetch to update the local ref to the remote ref. */ + struct stat st; + doFetch = stat(localRefFile.c_str(), &st) != 0 || + st.st_mtime + settings.tarballTtl <= now; + } + if (doFetch) + { + Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", uri)); + + // FIXME: git stderr messes up our progress indicator, so + // we're using --quiet for now. Should process its stderr. + runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, *ref + ":" + localRef }); + + struct timeval times[2]; + times[0].tv_sec = now; + times[0].tv_usec = 0; + times[1].tv_sec = now; + times[1].tv_usec = 0; + + utimes(localRefFile.c_str(), times); } // FIXME: check whether rev is an ancestor of ref. @@ -121,7 +138,7 @@ GitInfo exportGit(ref<Store> store, const std::string & uri, gitInfo.rev = rev != "" ? rev : chomp(readFile(localRefFile)); gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); - printTalkative("using revision %s of repo '%s'", uri, gitInfo.rev); + printTalkative("using revision %s of repo '%s'", gitInfo.rev, uri); std::string storeLinkName = hashString(htSHA512, name + std::string("\0"s) + gitInfo.rev).to_string(Base32, false); Path storeLink = cacheDir + "/" + storeLinkName + ".link"; @@ -216,6 +233,9 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.shortRev); mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount); v.attrs->sort(); + + if (state.allowedPaths) + state.allowedPaths->insert(gitInfo.storePath); } static RegisterPrimOp r("fetchGit", 1, prim_fetchGit); diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index 7def7103bf3d..a75c5fc2ddff 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -27,6 +27,9 @@ std::regex commitHashRegex("^[0-9a-fA-F]{40}$"); HgInfo exportMercurial(ref<Store> store, const std::string & uri, std::string rev, const std::string & name) { + if (settings.pureEval && rev == "") + throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision"); + if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) { bool clean = runProgram("hg", true, { "status", "-R", uri, "--modified", "--added", "--removed" }) == ""; @@ -52,8 +55,9 @@ HgInfo exportMercurial(ref<Store> store, const std::string & uri, auto st = lstat(p); if (S_ISDIR(st.st_mode)) { - auto i = files.lower_bound(file); - return i != files.end() && hasPrefix(*i, file); + auto prefix = file + "/"; + auto i = files.lower_bound(prefix); + return i != files.end() && hasPrefix(*i, prefix); } return files.count(file); @@ -76,7 +80,7 @@ HgInfo exportMercurial(ref<Store> store, const std::string & uri, time_t now = time(0); struct stat st; if (stat(stampFile.c_str(), &st) != 0 || - st.st_mtime <= now - settings.tarballTtl) + st.st_mtime + settings.tarballTtl <= now) { /* Except that if this is a commit hash that we already have, we don't have to pull again. */ @@ -195,6 +199,9 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar mkString(*state.allocAttr(v, state.symbols.create("shortRev")), std::string(hgInfo.rev, 0, 12)); mkInt(*state.allocAttr(v, state.symbols.create("revCount")), hgInfo.revCount); v.attrs->sort(); + + if (state.allowedPaths) + state.allowedPaths->insert(hgInfo.storePath); } static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial); diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh index c2ee49dd32fb..44929f7eea06 100644 --- a/src/libexpr/symbol-table.hh +++ b/src/libexpr/symbol-table.hh @@ -69,7 +69,7 @@ public: return Symbol(&*res.first); } - unsigned int size() const + size_t size() const { return symbols.size(); } diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 9df516f062ef..66b41a158400 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -128,7 +128,7 @@ struct Value const char * path; Bindings * attrs; struct { - unsigned int size; + size_t size; Value * * elems; } bigList; Value * smallList[2]; @@ -166,7 +166,7 @@ struct Value return type == tList1 || type == tList2 ? smallList : bigList.elems; } - unsigned int listSize() const + size_t listSize() const { return type == tList1 ? 1 : type == tList2 ? 2 : bigList.size; } diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index ea27aaa35e03..bcc05c2cdad6 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -34,6 +34,14 @@ MixCommonArgs::MixCommonArgs(const string & programName) warn(e.what()); } }); + + std::string cat = "config"; + settings.convertToArgs(*this, cat); + + // Backward compatibility hack: nix-env already had a --system flag. + if (programName == "nix-env") longFlags.erase("system"); + + hiddenCategories.insert(cat); } } diff --git a/src/libmain/nix-main.pc.in b/src/libmain/nix-main.pc.in index de1bdf706f72..38bc85c484eb 100644 --- a/src/libmain/nix-main.pc.in +++ b/src/libmain/nix-main.pc.in @@ -6,4 +6,4 @@ Name: Nix Description: Nix Package Manager Version: @PACKAGE_VERSION@ Libs: -L${libdir} -lnixmain -Cflags: -I${includedir}/nix +Cflags: -I${includedir}/nix -std=c++14 diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 0f599f388585..91a4eaf922a6 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -193,9 +193,6 @@ LegacyArgs::LegacyArgs(const std::string & programName, mkFlag(0, "readonly-mode", "do not write to the Nix store", &settings.readOnlyMode); - mkFlag(0, "show-trace", "show Nix expression stack trace in evaluation errors", - &settings.showTrace); - mkFlag(0, "no-gc-warning", "disable warning about not using '--add-root'", &gcWarning, false); @@ -265,7 +262,8 @@ void printVersion(const string & programName) void showManPage(const string & name) { restoreSignals(); - execlp("man", "man", name.c_str(), NULL); + setenv("MANPATH", settings.nixManDir.c_str(), 1); + execlp("man", "man", name.c_str(), nullptr); throw SysError(format("command 'man %1%' failed") % name.c_str()); } @@ -327,10 +325,10 @@ RunPager::RunPager() setenv("LESS", "FRSXMK", 1); restoreSignals(); if (pager) - execl("/bin/sh", "sh", "-c", pager, NULL); - execlp("pager", "pager", NULL); - execlp("less", "less", NULL); - execlp("more", "more", NULL); + execl("/bin/sh", "sh", "-c", pager, nullptr); + execlp("pager", "pager", nullptr); + execlp("less", "less", nullptr); + execlp("more", "more", nullptr); throw SysError(format("executing '%1%'") % pager); }); @@ -369,5 +367,6 @@ PrintFreed::~PrintFreed() % showBytes(results.bytesFreed); } +Exit::~Exit() { } } diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 9219dbed8325..8e4861232db5 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -17,10 +17,12 @@ public: int status; Exit() : status(0) { } Exit(int status) : status(status) { } + virtual ~Exit(); }; int handleExceptions(const string & programName, std::function<void()> fun); +/* Don't forget to call initPlugins() after settings are initialized! */ void initNix(); void parseCmdLine(int argc, char * * argv, diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc index cc0eea68fca3..13896aeecb6e 100644 --- a/src/libmain/stack.cc +++ b/src/libmain/stack.cc @@ -30,7 +30,7 @@ static void sigsegvHandler(int signo, siginfo_t * info, void * ctx) if (diff < 0) diff = -diff; if (diff < 4096) { char msg[] = "error: stack overflow (possible infinite recursion)\n"; - [[gnu::unused]] int res = write(2, msg, strlen(msg)); + [[gnu::unused]] auto res = write(2, msg, strlen(msg)); _exit(1); // maybe abort instead? } } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 67607ab3d43a..2e9a13e564ca 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -73,6 +73,23 @@ Path BinaryCacheStore::narInfoFileFor(const Path & storePath) return storePathToHash(storePath) + ".narinfo"; } +void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo) +{ + auto narInfoFile = narInfoFileFor(narInfo->path); + + upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo"); + + auto hashPart = storePathToHash(narInfo->path); + + { + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo)); + } + + if (diskCache) + diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo)); +} + void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) { @@ -89,8 +106,6 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str % info.path % ref); } - auto narInfoFile = narInfoFileFor(info.path); - assert(nar->compare(0, narMagic.size(), narMagic) == 0); auto narInfo = make_ref<NarInfo>(info); @@ -114,47 +129,12 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str auto narAccessor = makeNarAccessor(nar); - if (accessor_) { - accessor_->nars.emplace(info.path, narAccessor); - accessor_->addToCache(info.path, *nar); - } - - std::function<void(const Path &, JSONPlaceholder &)> recurse; - - recurse = [&](const Path & path, JSONPlaceholder & res) { - auto st = narAccessor->stat(path); - - auto obj = res.object(); - - switch (st.type) { - case FSAccessor::Type::tRegular: - obj.attr("type", "regular"); - obj.attr("size", st.fileSize); - if (st.isExecutable) - obj.attr("executable", true); - break; - case FSAccessor::Type::tDirectory: - obj.attr("type", "directory"); - { - auto res2 = obj.object("entries"); - for (auto & name : narAccessor->readDirectory(path)) { - auto res3 = res2.placeholder(name); - recurse(path + "/" + name, res3); - } - } - break; - case FSAccessor::Type::tSymlink: - obj.attr("type", "symlink"); - obj.attr("target", narAccessor->readLink(path)); - break; - default: - abort(); - } - }; + if (accessor_) + accessor_->addToCache(info.path, *nar, narAccessor); { auto res = jsonRoot.placeholder("root"); - recurse("", res); + listNar(res, narAccessor, "", true); } } @@ -162,16 +142,14 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str } else { - if (accessor_) { - accessor_->nars.emplace(info.path, makeNarAccessor(nar)); - accessor_->addToCache(info.path, *nar); - } + if (accessor_) + accessor_->addToCache(info.path, *nar, makeNarAccessor(nar)); } /* Compress the NAR. */ narInfo->compression = compression; auto now1 = std::chrono::steady_clock::now(); - auto narCompressed = compress(compression, *nar); + auto narCompressed = compress(compression, *nar, parallelCompression); auto now2 = std::chrono::steady_clock::now(); narInfo->fileHash = hashString(htSHA256, *narCompressed); narInfo->fileSize = narCompressed->size(); @@ -201,17 +179,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str /* Atomically write the NAR info file.*/ if (secretKey) narInfo->sign(*secretKey); - upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo"); - - auto hashPart = storePathToHash(narInfo->path); - - { - auto state_(state.lock()); - state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo)); - } - - if (diskCache) - diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo)); + writeNarInfo(narInfo); stats.narInfoWrite++; } @@ -235,22 +203,18 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink) stats.narRead++; stats.narReadCompressedBytes += nar->size(); - /* Decompress the NAR. FIXME: would be nice to have the remote - side do this. */ - try { - nar = decompress(info->compression, *nar); - } catch (UnknownCompressionMethod &) { - throw Error(format("binary cache path '%s' uses unknown compression method '%s'") - % storePath % info->compression); - } + uint64_t narSize = 0; - stats.narReadBytes += nar->size(); + StringSource source(*nar); - printMsg(lvlTalkative, format("exporting path '%1%' (%2% bytes)") % storePath % nar->size()); + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + narSize += len; + }); - assert(nar->size() % 8 == 0); + decompress(info->compression, source, wrapperSink); - sink((unsigned char *) nar->c_str(), nar->size()); + stats.narReadBytes += narSize; } void BinaryCacheStore::queryPathInfoUncached(const Path & storePath, @@ -326,6 +290,22 @@ ref<FSAccessor> BinaryCacheStore::getFSAccessor() return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), localNarCache); } +void BinaryCacheStore::addSignatures(const Path & storePath, const StringSet & sigs) +{ + /* Note: this is inherently racy since there is no locking on + binary caches. In particular, with S3 this unreliable, even + when addSignatures() is called sequentially on a path, because + S3 might return an outdated cached version. */ + + auto narInfo = make_ref<NarInfo>((NarInfo &) *queryPathInfo(storePath)); + + narInfo->sigs.insert(sigs.begin(), sigs.end()); + + auto narInfoFile = narInfoFileFor(narInfo->path); + + writeNarInfo(narInfo); +} + std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path) { Path drvPath; diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index d3b0e0bd9332..e20b968442b7 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -19,6 +19,8 @@ public: const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"}; const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"}; + const Setting<bool> parallelCompression{this, false, "parallel-compression", + "enable multi-threading compression, available for xz only currently"}; private: @@ -59,6 +61,8 @@ private: std::string narInfoFileFor(const Path & storePath); + void writeNarInfo(ref<NarInfo> narInfo); + public: bool isValidPathUncached(const Path & path) override; @@ -119,8 +123,7 @@ public: ref<FSAccessor> getFSAccessor() override; - void addSignatures(const Path & storePath, const StringSet & sigs) override - { unsupported(); } + void addSignatures(const Path & storePath, const StringSet & sigs) override; std::shared_ptr<std::string> getBuildLog(const Path & path) override; diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 061682377257..f70ab8108fd7 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -6,6 +6,7 @@ #include "archive.hh" #include "affinity.hh" #include "builtins.hh" +#include "download.hh" #include "finally.hh" #include "compression.hh" #include "json.hh" @@ -48,7 +49,9 @@ #include <sys/param.h> #include <sys/mount.h> #include <sys/syscall.h> +#if HAVE_SECCOMP #include <seccomp.h> +#endif #define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old)) #endif @@ -79,7 +82,7 @@ typedef std::shared_ptr<Goal> GoalPtr; typedef std::weak_ptr<Goal> WeakGoalPtr; struct CompareGoalPtrs { - bool operator() (const GoalPtr & a, const GoalPtr & b); + bool operator() (const GoalPtr & a, const GoalPtr & b) const; }; /* Set of goals. */ @@ -153,7 +156,7 @@ public: abort(); } - void trace(const format & f); + void trace(const FormatOrString & fs); string getName() { @@ -178,7 +181,7 @@ protected: }; -bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) { +bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const { string s1 = a->key(); string s2 = b->key(); return s1 < s2; @@ -414,9 +417,9 @@ void Goal::amDone(ExitCode result) } -void Goal::trace(const format & f) +void Goal::trace(const FormatOrString & fs) { - debug(format("%1%: %2%") % name % f); + debug("%1%: %2%", name, fs.s); } @@ -649,6 +652,11 @@ HookInstance::HookInstance() if (dup2(builderOut.writeSide.get(), 4) == -1) throw SysError("dupping builder's stdout/stderr"); + /* Hack: pass the read side of that fd to allow build-remote + to read SSH error messages. */ + if (dup2(builderOut.readSide.get(), 5) == -1) + throw SysError("dupping builder's stdout/stderr"); + Strings args = { baseNameOf(settings.buildHook), std::to_string(verbosity), @@ -834,9 +842,9 @@ private: BuildResult result; /* The current round, if we're building multiple times. */ - unsigned int curRound = 1; + size_t curRound = 1; - unsigned int nrRounds; + size_t nrRounds; /* Path registration info from the previous round, if we're building multiple times. Since this contains the hash, it @@ -956,6 +964,8 @@ private: } void done(BuildResult::Status status, const string & msg = ""); + + PathSet exportReferences(PathSet storePaths); }; @@ -1123,11 +1133,6 @@ void DerivationGoal::haveDerivation() return; } - /* Reject doing a hash build of anything other than a fixed-output - derivation. */ - if (buildMode == bmHash && !drv->isFixedOutput()) - throw Error("cannot do a hash build of non-fixed-output derivation '%1%'", drvPath); - /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ @@ -1164,7 +1169,7 @@ void DerivationGoal::outputsSubstituted() return; } - unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); + auto nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); if (buildMode == bmNormal && nrInvalid == 0) { done(BuildResult::Substituted); return; @@ -1191,7 +1196,7 @@ void DerivationGoal::outputsSubstituted() for (auto & i : drv->inputSrcs) { if (worker.store.isValidPath(i)) continue; if (!settings.useSubstitutes) - throw Error(format("dependency of '%1%' of '%2%' does not exist, and substitution is disabled") + throw Error(format("dependency '%1%' of '%2%' does not exist, and substitution is disabled") % i % drvPath); addWaitee(worker.makeSubstitutionGoal(i)); } @@ -1319,9 +1324,7 @@ void DerivationGoal::inputsRealised() allPaths.insert(inputPaths.begin(), inputPaths.end()); /* Is this a fixed-output derivation? */ - fixedOutput = true; - for (auto & i : drv->outputs) - if (i.second.hash == "") fixedOutput = false; + fixedOutput = drv->isFixedOutput(); /* Don't repeat fixed-output derivations since they're already verified by their output hash.*/ @@ -1341,19 +1344,6 @@ void DerivationGoal::tryToBuild() { trace("trying to build"); - /* Check for the possibility that some other goal in this process - has locked the output since we checked in haveDerivation(). - (It can't happen between here and the lockPaths() call below - because we're not allowing multi-threading.) If so, put this - goal to sleep until another goal finishes, then try again. */ - for (auto & i : drv->outputs) - if (pathIsLockedByMe(worker.store.toRealPath(i.second.path))) { - debug(format("putting derivation '%1%' to sleep because '%2%' is locked by another goal") - % drvPath % i.second.path); - worker.waitForAnyGoal(shared_from_this()); - return; - } - /* Obtain locks on all output paths. The locks are automatically released when we exit this function or Nix crashes. If we can't acquire the lock, then continue; hopefully some other @@ -1475,7 +1465,7 @@ void replaceValidPath(const Path & storePath, const Path tmpPath) tmpPath (the replacement), so we have to move it out of the way first. We'd better not be interrupted here, because if we're repairing (say) Glibc, we end up with a broken system. */ - Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % rand()).str(); + Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str(); if (pathExists(storePath)) rename(storePath.c_str(), oldPath.c_str()); if (rename(tmpPath.c_str(), storePath.c_str()) == -1) @@ -1608,8 +1598,8 @@ void DerivationGoal::buildDone() outputLocks.unlock(); } catch (BuildError & e) { - if (!hook) - printError(e.msg()); + printError(e.msg()); + outputLocks.unlock(); BuildResult::Status st = BuildResult::MiscFailure; @@ -1742,22 +1732,23 @@ int childEntry(void * arg) } -PathSet exportReferences(Store & store, PathSet storePaths) +PathSet DerivationGoal::exportReferences(PathSet storePaths) { PathSet paths; for (auto storePath : storePaths) { /* Check that the store path is valid. */ - if (!store.isInStore(storePath)) + if (!worker.store.isInStore(storePath)) throw BuildError(format("'exportReferencesGraph' contains a non-store path '%1%'") % storePath); - storePath = store.toStorePath(storePath); - if (!store.isValidPath(storePath)) - throw BuildError(format("'exportReferencesGraph' contains an invalid path '%1%'") - % storePath); - store.computeFSClosure(storePath, paths); + storePath = worker.store.toStorePath(storePath); + + if (!inputPaths.count(storePath)) + throw BuildError("cannot export references of path '%s' because it is not in the input closure of the derivation", storePath); + + worker.store.computeFSClosure(storePath, paths); } /* If there are derivations in the graph, then include their @@ -1768,15 +1759,28 @@ PathSet exportReferences(Store & store, PathSet storePaths) for (auto & j : paths2) { if (isDerivation(j)) { - Derivation drv = store.derivationFromPath(j); + Derivation drv = worker.store.derivationFromPath(j); for (auto & k : drv.outputs) - store.computeFSClosure(k.second.path, paths); + worker.store.computeFSClosure(k.second.path, paths); } } return paths; } +static std::once_flag dns_resolve_flag; + +static void preloadNSS() { + /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of + one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already + been loaded in the parent. So we force a download of an invalid URL to force the NSS machinery to + load its lookup libraries in the parent before any child gets a chance to. */ + std::call_once(dns_resolve_flag, []() { + DownloadRequest request("http://this.pre-initializes.the.dns.resolvers.invalid"); + request.tries = 1; // We only need to do it once, and this also suppresses an annoying warning + try { getDownloader()->download(request); } catch (...) {} + }); +} void DerivationGoal::startBuilder() { @@ -1787,6 +1791,9 @@ void DerivationGoal::startBuilder() % drv->platform % settings.thisSystem % drvPath); } + if (drv->isBuiltin()) + preloadNSS(); + #if __APPLE__ additionalSandboxProfile = get(drv->env, "__sandboxProfile"); #endif @@ -1810,8 +1817,13 @@ void DerivationGoal::startBuilder() useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1"; } - if (worker.store.storeDir != worker.store.realStoreDir) - useChroot = true; + if (worker.store.storeDir != worker.store.realStoreDir) { + #if __linux__ + useChroot = true; + #else + throw Error("building using a diverted store is not supported on this platform"); + #endif + } /* If `build-users-group' is not empty, then we have to build as one of the members of that group. */ @@ -1873,7 +1885,7 @@ void DerivationGoal::startBuilder() /* Write closure info to <fileName>. */ writeFile(tmpDir + "/" + fileName, worker.store.makeValidityRegistration( - exportReferences(worker.store, {storePath}), false, false)); + exportReferences({storePath}), false, false)); } } @@ -2375,7 +2387,7 @@ void DerivationGoal::writeStructuredAttrs() for (auto & p : *i) storePaths.insert(p.get<std::string>()); worker.store.pathInfoToJSON(jsonRoot, - exportReferences(worker.store, storePaths), false, true); + exportReferences(storePaths), false, true); } json[i.key()] = nlohmann::json::parse(str.str()); // urgh } @@ -2469,7 +2481,7 @@ void setupSeccomp() { #if __linux__ if (!settings.filterSyscalls) return; - +#if HAVE_SECCOMP scmp_filter_ctx ctx; if (!(ctx = seccomp_init(SCMP_ACT_ALLOW))) @@ -2487,6 +2499,10 @@ void setupSeccomp() seccomp_arch_add(ctx, SCMP_ARCH_X32) != 0) throw SysError("unable to add X32 seccomp architecture"); + if (settings.thisSystem == "aarch64-linux" && + seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0) + printError("unsable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes."); + /* Prevent builders from creating setuid/setgid binaries. */ for (int perm : { S_ISUID, S_ISGID }) { if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1, @@ -2515,6 +2531,11 @@ void setupSeccomp() if (seccomp_load(ctx) != 0) throw SysError("unable to load seccomp BPF program"); +#else + throw Error( + "seccomp is not supported on this platform; " + "you can bypass this error by setting the option 'filter-syscalls' to false, but note that untrusted builds can then create setuid binaries!"); +#endif #endif } @@ -2682,8 +2703,8 @@ void DerivationGoal::runChild() } else { if (errno != EINVAL) throw SysError("mounting /dev/pts"); - doBind("/dev/pts", "/dev/pts"); - doBind("/dev/ptmx", "/dev/ptmx"); + doBind("/dev/pts", chrootRootDir + "/dev/pts"); + doBind("/dev/ptmx", chrootRootDir + "/dev/ptmx"); } } @@ -2928,8 +2949,15 @@ void DerivationGoal::runChild() if (drv->isBuiltin()) { try { logger = makeJSONLogger(*logger); + + BasicDerivation drv2(*drv); + for (auto & e : drv2.env) + e.second = rewriteStrings(e.second, inputRewrites); + if (drv->builder == "builtin:fetchurl") - builtinFetchurl(*drv, netrcData); + builtinFetchurl(drv2, netrcData); + else if (drv->builder == "builtin:buildenv") + builtinBuildenv(drv2); else throw Error(format("unsupported builtin function '%1%'") % string(drv->builder, 8)); _exit(0); @@ -2992,6 +3020,8 @@ void DerivationGoal::registerOutputs() bool runDiffHook = settings.runDiffHook; bool keepPreviousRound = settings.keepFailed || runDiffHook; + std::exception_ptr delayedException; + /* Check whether the output paths were created, and grep each output path to determine what other paths it references. Also make all output paths read-only. */ @@ -3066,7 +3096,7 @@ void DerivationGoal::registerOutputs() /* Check that fixed-output derivations produced the right outputs (i.e., the content hash should match the specified hash). */ - if (i.second.hash != "") { + if (fixedOutput) { bool recursive; Hash h; i.second.parseHashInfo(recursive, h); @@ -3082,27 +3112,34 @@ void DerivationGoal::registerOutputs() /* Check the hash. In hash mode, move the path produced by the derivation to its content-addressed location. */ Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath); - if (buildMode == bmHash) { - Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]); - printError(format("build produced path '%1%' with %2% hash '%3%'") - % dest % printHashType(h.type) % printHash16or32(h2)); - if (worker.store.isValidPath(dest)) - return; + + Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]); + + if (h != h2) { + + /* Throw an error after registering the path as + valid. */ + delayedException = std::make_exception_ptr( + BuildError("fixed-output derivation produced path '%s' with %s hash '%s' instead of the expected hash '%s'", + dest, printHashType(h.type), printHash16or32(h2), printHash16or32(h))); + Path actualDest = worker.store.toRealPath(dest); + + if (worker.store.isValidPath(dest)) + std::rethrow_exception(delayedException); + if (actualPath != actualDest) { PathLocks outputLocks({actualDest}); deletePath(actualDest); if (rename(actualPath.c_str(), actualDest.c_str()) == -1) throw SysError(format("moving '%1%' to '%2%'") % actualPath % dest); } + path = dest; actualPath = actualDest; - } else { - if (h != h2) - throw BuildError( - format("output path '%1%' has %2% hash '%3%' when '%4%' was expected") - % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h)); } + else + assert(path == dest); info.ca = makeFixedOutputCA(recursive, h2); } @@ -3212,6 +3249,8 @@ void DerivationGoal::registerOutputs() info.ultimate = true; worker.store.signPathInfo(info); + if (!info.references.empty()) info.ca.clear(); + infos.push_back(info); } @@ -3279,6 +3318,11 @@ void DerivationGoal::registerOutputs() paths referenced by each of them. If there are cycles in the outputs, this will fail. */ worker.store.registerValidPaths(infos); + + /* In case of a fixed-output derivation hash mismatch, throw an + exception now that we have registered the output as valid. */ + if (delayedException) + std::rethrow_exception(delayedException); } @@ -3394,7 +3438,7 @@ void DerivationGoal::flushLine() else { if (settings.verboseBuild && (settings.printRepeatedBuilds || curRound == 1)) - printError(filterANSIEscapes(currentLogLine, true)); + printError(currentLogLine); else { logTail.push_back(currentLogLine); if (logTail.size() > settings.logLines) logTail.pop_front(); @@ -3636,7 +3680,7 @@ void SubstitutionGoal::tryNext() /* Update the total expected download size. */ auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info); - maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, narInfo->narSize); + maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize); maintainExpectedDownload = narInfo && narInfo->fileSize @@ -3650,9 +3694,12 @@ void SubstitutionGoal::tryNext() /* Bail out early if this substituter lacks a valid signature. LocalStore::addToStore() also checks for this, but only after we've downloaded the path. */ - if (worker.store.requireSigs && !info->checkSignatures(worker.store, worker.store.publicKeys)) { - printInfo(format("warning: substituter '%s' does not have a valid signature for path '%s'") - % sub->getUri() % storePath); + if (worker.store.requireSigs + && !sub->isTrusted + && !info->checkSignatures(worker.store, worker.store.getPublicKeys())) + { + printError("warning: substituter '%s' does not have a valid signature for path '%s'", + sub->getUri(), storePath); tryNext(); return; } @@ -3702,6 +3749,17 @@ void SubstitutionGoal::tryToRun() return; } + /* If the store path is already locked (probably by a + DerivationGoal), then put this goal to sleep. Note: we don't + acquire a lock here since that breaks addToStore(), so below we + handle an AlreadyLocked exception from addToStore(). The check + here is just an optimisation to prevent having to redo a + download due to a locked path. */ + if (pathIsLockedByMe(worker.store.toRealPath(storePath))) { + worker.waitForAWhile(shared_from_this()); + return; + } + maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions); worker.updateProgress(); @@ -3718,7 +3776,7 @@ void SubstitutionGoal::tryToRun() PushActivity pact(act.id); copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()), - storePath, repair); + storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs); promise.set_value(); } catch (...) { @@ -3741,8 +3799,14 @@ void SubstitutionGoal::finished() try { promise.get_future().get(); + } catch (AlreadyLocked & e) { + /* Probably a DerivationGoal is already building this store + path. Sleep for a while and try again. */ + state = &SubstitutionGoal::init; + worker.waitForAWhile(shared_from_this()); + return; } catch (Error & e) { - printInfo(e.msg()); + printError(e.msg()); /* Try the next substitute. */ state = &SubstitutionGoal::tryNext; @@ -4106,10 +4170,10 @@ void Worker::waitForInput() assert(goal); set<int> fds2(j->fds); + std::vector<unsigned char> buffer(4096); for (auto & k : fds2) { if (FD_ISSET(k, &fds)) { - unsigned char buffer[4096]; - ssize_t rd = read(k, buffer, sizeof(buffer)); + ssize_t rd = read(k, buffer.data(), buffer.size()); if (rd == -1) { if (errno != EINTR) throw SysError(format("reading from %1%") @@ -4121,7 +4185,7 @@ void Worker::waitForInput() } else { printMsg(lvlVomit, format("%1%: read %2% bytes") % goal->getName() % rd); - string data((char *) buffer, rd); + string data((char *) buffer.data(), rd); j->lastOutput = after; goal->handleChildOutput(k, data); } diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh index 0cc6ba31f658..0d2da873ece4 100644 --- a/src/libstore/builtins.hh +++ b/src/libstore/builtins.hh @@ -4,6 +4,8 @@ namespace nix { +// TODO: make pluggable. void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData); +void builtinBuildenv(const BasicDerivation & drv); } diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc new file mode 100644 index 000000000000..74e706664694 --- /dev/null +++ b/src/libstore/builtins/buildenv.cc @@ -0,0 +1,204 @@ +#include "builtins.hh" + +#include <sys/stat.h> +#include <sys/types.h> +#include <fcntl.h> +#include <algorithm> + +namespace nix { + +typedef std::map<Path,int> Priorities; + +// FIXME: change into local variables. + +static Priorities priorities; + +static unsigned long symlinks; + +/* For each activated package, create symlinks */ +static void createLinks(const Path & srcDir, const Path & dstDir, int priority) +{ + DirEntries srcFiles; + + try { + srcFiles = readDirectory(srcDir); + } catch (SysError & e) { + if (e.errNo == ENOTDIR) { + printError("warning: not including '%s' in the user environment because it's not a directory", srcDir); + return; + } + throw; + } + + for (const auto & ent : srcFiles) { + if (ent.name[0] == '.') + /* not matched by glob */ + continue; + auto srcFile = srcDir + "/" + ent.name; + auto dstFile = dstDir + "/" + ent.name; + + struct stat srcSt; + try { + if (stat(srcFile.c_str(), &srcSt) == -1) + throw SysError("getting status of '%1%'", srcFile); + } catch (SysError & e) { + if (e.errNo == ENOENT || e.errNo == ENOTDIR) { + printError("warning: skipping dangling symlink '%s'", dstFile); + continue; + } + throw; + } + + /* The files below are special-cased to that they don't show up + * in user profiles, either because they are useless, or + * because they would cauase pointless collisions (e.g., each + * Python package brings its own + * `$out/lib/pythonX.Y/site-packages/easy-install.pth'.) + */ + if (hasSuffix(srcFile, "/propagated-build-inputs") || + hasSuffix(srcFile, "/nix-support") || + hasSuffix(srcFile, "/perllocal.pod") || + hasSuffix(srcFile, "/info/dir") || + hasSuffix(srcFile, "/log")) + continue; + + else if (S_ISDIR(srcSt.st_mode)) { + struct stat dstSt; + auto res = lstat(dstFile.c_str(), &dstSt); + if (res == 0) { + if (S_ISDIR(dstSt.st_mode)) { + createLinks(srcFile, dstFile, priority); + continue; + } else if (S_ISLNK(dstSt.st_mode)) { + auto target = canonPath(dstFile, true); + if (!S_ISDIR(lstat(target).st_mode)) + throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target); + if (unlink(dstFile.c_str()) == -1) + throw SysError(format("unlinking '%1%'") % dstFile); + if (mkdir(dstFile.c_str(), 0755) == -1) + throw SysError(format("creating directory '%1%'")); + createLinks(target, dstFile, priorities[dstFile]); + createLinks(srcFile, dstFile, priority); + continue; + } + } else if (errno != ENOENT) + throw SysError(format("getting status of '%1%'") % dstFile); + } + + else { + struct stat dstSt; + auto res = lstat(dstFile.c_str(), &dstSt); + if (res == 0) { + if (S_ISLNK(dstSt.st_mode)) { + auto prevPriority = priorities[dstFile]; + if (prevPriority == priority) + throw Error( + "packages '%1%' and '%2%' have the same priority %3%; " + "use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' " + "to change the priority of one of the conflicting packages" + " (0 being the highest priority)", + srcFile, readLink(dstFile), priority); + if (prevPriority < priority) + continue; + if (unlink(dstFile.c_str()) == -1) + throw SysError(format("unlinking '%1%'") % dstFile); + } else if (S_ISDIR(dstSt.st_mode)) + throw Error("collision between non-directory '%1%' and directory '%2%'", srcFile, dstFile); + } else if (errno != ENOENT) + throw SysError(format("getting status of '%1%'") % dstFile); + } + + createSymlink(srcFile, dstFile); + priorities[dstFile] = priority; + symlinks++; + } +} + +typedef std::set<Path> FileProp; + +static FileProp done; +static FileProp postponed = FileProp{}; + +static Path out; + +static void addPkg(const Path & pkgDir, int priority) +{ + if (done.count(pkgDir)) return; + done.insert(pkgDir); + createLinks(pkgDir, out, priority); + + try { + for (const auto & p : tokenizeString<std::vector<string>>( + readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n")) + if (!done.count(p)) + postponed.insert(p); + } catch (SysError & e) { + if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw; + } +} + +struct Package { + Path path; + bool active; + int priority; + Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {} +}; + +typedef std::vector<Package> Packages; + +void builtinBuildenv(const BasicDerivation & drv) +{ + auto getAttr = [&](const string & name) { + auto i = drv.env.find(name); + if (i == drv.env.end()) throw Error("attribute '%s' missing", name); + return i->second; + }; + + out = getAttr("out"); + createDirs(out); + + /* Convert the stuff we get from the environment back into a + * coherent data type. */ + Packages pkgs; + auto derivations = tokenizeString<Strings>(getAttr("derivations")); + while (!derivations.empty()) { + /* !!! We're trusting the caller to structure derivations env var correctly */ + auto active = derivations.front(); derivations.pop_front(); + auto priority = stoi(derivations.front()); derivations.pop_front(); + auto outputs = stoi(derivations.front()); derivations.pop_front(); + for (auto n = 0; n < outputs; n++) { + auto path = derivations.front(); derivations.pop_front(); + pkgs.emplace_back(path, active != "false", priority); + } + } + + /* Symlink to the packages that have been installed explicitly by the + * user. Process in priority order to reduce unnecessary + * symlink/unlink steps. + */ + std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) { + return a.priority < b.priority || (a.priority == b.priority && a.path < b.path); + }); + for (const auto & pkg : pkgs) + if (pkg.active) + addPkg(pkg.path, pkg.priority); + + /* Symlink to the packages that have been "propagated" by packages + * installed by the user (i.e., package X declares that it wants Y + * installed as well). We do these later because they have a lower + * priority in case of collisions. + */ + auto priorityCounter = 1000; + while (!postponed.empty()) { + auto pkgDirs = postponed; + postponed = FileProp{}; + for (const auto & pkgDir : pkgDirs) + addPkg(pkgDir, priorityCounter++); + } + + printError("created %d symlinks in user environment", symlinks); + + createSymlink(getAttr("manifest"), out + "/manifest.nix"); +} + +} diff --git a/src/libstore/builtins.cc b/src/libstore/builtins/fetchurl.cc index 4ca4a838e3c4..4ca4a838e3c4 100644 --- a/src/libstore/builtins.cc +++ b/src/libstore/builtins/fetchurl.cc diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc index f56a6adab9c9..9ec8abd228e9 100644 --- a/src/libstore/crypto.cc +++ b/src/libstore/crypto.cc @@ -105,7 +105,7 @@ PublicKeys getDefaultPublicKeys() // FIXME: filter duplicates - for (auto s : settings.binaryCachePublicKeys.get()) { + for (auto s : settings.trustedPublicKeys.get()) { PublicKey key(s); publicKeys.emplace(key.name, key); } diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index a0a0d78b7d30..74b861281ee0 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -57,16 +57,8 @@ bool BasicDerivation::isBuiltin() const bool BasicDerivation::canBuildLocally() const { return platform == settings.thisSystem - || isBuiltin() -#if __linux__ - || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux") - || (platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux") - || (platform == "armv5tel-linux" && (settings.thisSystem == "armv7l-linux" || settings.thisSystem == "armv6l-linux")) -#elif __FreeBSD__ - || (platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd") - || (platform == "i686-linux" && settings.thisSystem == "i686-freebsd") -#endif - ; + || settings.extraPlatforms.get().count(platform) > 0 + || isBuiltin(); } diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 70f9b1f5eacb..54f4dd218007 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -17,11 +17,13 @@ #include <curl/curl.h> -#include <queue> -#include <iostream> -#include <thread> +#include <algorithm> #include <cmath> +#include <cstring> +#include <iostream> +#include <queue> #include <random> +#include <thread> using namespace std::string_literals; @@ -91,6 +93,8 @@ struct CurlDownloader : public Downloader { if (!request.expectedETag.empty()) requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); + if (!request.mimeType.empty()) + requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str()); } ~DownloadItem() @@ -169,7 +173,11 @@ struct CurlDownloader : public Downloader int progressCallback(double dltotal, double dlnow) { - act.progress(dlnow, dltotal); + try { + act.progress(dlnow, dltotal); + } catch (nix::Interrupted &) { + assert(_isInterrupted); + } return _isInterrupted; } @@ -185,6 +193,23 @@ struct CurlDownloader : public Downloader return 0; } + size_t readOffset = 0; + int readCallback(char *buffer, size_t size, size_t nitems) + { + if (readOffset == request.data->length()) + return 0; + auto count = std::min(size * nitems, request.data->length() - readOffset); + assert(count); + memcpy(buffer, request.data->data() + readOffset, count); + readOffset += count; + return count; + } + + static int readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp) + { + return ((DownloadItem *) userp)->readCallback(buffer, size, nitems); + } + long lowSpeedTimeout = 300; void init() @@ -225,6 +250,13 @@ struct CurlDownloader : public Downloader if (request.head) curl_easy_setopt(req, CURLOPT_NOBODY, 1); + if (request.data) { + curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); + curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); + curl_easy_setopt(req, CURLOPT_READDATA, this); + curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); + } + if (request.verifyTLS) { if (settings.caFile != "") curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str()); @@ -265,7 +297,7 @@ struct CurlDownloader : public Downloader } if (code == CURLE_OK && - (httpStatus == 200 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) + (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) { result.cached = httpStatus == 304; done = true; @@ -303,6 +335,7 @@ struct CurlDownloader : public Downloader // Don't bother retrying on certain cURL errors either switch (code) { case CURLE_FAILED_INIT: + case CURLE_URL_MALFORMAT: case CURLE_NOT_BUILT_IN: case CURLE_REMOTE_ACCESS_DENIED: case CURLE_FILE_COULDNT_READ_FILE: @@ -311,10 +344,11 @@ struct CurlDownloader : public Downloader case CURLE_BAD_FUNCTION_ARGUMENT: case CURLE_INTERFACE_FAILED: case CURLE_UNKNOWN_OPTION: - err = Misc; - break; + case CURLE_SSL_CACERT_BADFILE: + err = Misc; + break; default: // Shut up warnings - break; + break; } } @@ -369,11 +403,13 @@ struct CurlDownloader : public Downloader curlm = curl_multi_init(); - #if LIBCURL_VERSION_NUM >= 0x072b00 // correct? + #if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0 curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX); #endif + #if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0 curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS, settings.binaryCachesParallelConnections.get()); + #endif enableHttp2 = settings.enableHttp2; @@ -533,7 +569,7 @@ struct CurlDownloader : public Downloader // FIXME: do this on a worker thread sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult { #ifdef ENABLE_S3 - S3Helper s3Helper(Aws::Region::US_EAST_1); // FIXME: make configurable + S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable auto slash = request.uri.find('/', 5); if (slash == std::string::npos) throw nix::Error("bad S3 URI '%s'", request.uri); @@ -587,7 +623,7 @@ DownloadResult Downloader::download(const DownloadRequest & request) return enqueueDownload(request).get(); } -Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl) +Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl) { auto url = resolveUri(url_); @@ -600,7 +636,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa if (expectedHash) { expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name); if (store->isValidPath(expectedStorePath)) - return expectedStorePath; + return store->toRealPath(expectedStorePath); } Path cacheDir = getCacheDir() + "/nix/tarballs"; @@ -617,7 +653,6 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa string expectedETag; - int ttl = settings.tarballTtl; bool skip = false; if (pathExists(fileLink) && pathExists(dataFile)) { @@ -687,17 +722,22 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa Path tmpDir = createTempDir(); AutoDelete autoDelete(tmpDir, true); // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}); + runProgram("tar", true, {"xf", store->toRealPath(storePath), "-C", tmpDir, "--strip-components", "1"}); unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, NoRepair); } replaceSymlink(unpackedStorePath, unpackedLink); storePath = unpackedStorePath; } - if (expectedStorePath != "" && storePath != expectedStorePath) - throw nix::Error("store path mismatch in file downloaded from '%s'", url); + if (expectedStorePath != "" && storePath != expectedStorePath) { + Hash gotHash = unpack + ? hashPath(expectedHash.type, store->toRealPath(storePath)).first + : hashFile(expectedHash.type, store->toRealPath(storePath)); + throw nix::Error("hash mismatch in file downloaded from '%s': got hash '%s' instead of the expected hash '%s'", + url, gotHash.to_string(), expectedHash.to_string()); + } - return storePath; + return store->toRealPath(storePath); } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index f2d65ad8d61d..7ade756fc356 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -2,6 +2,7 @@ #include "types.hh" #include "hash.hh" +#include "globals.hh" #include <string> #include <future> @@ -18,9 +19,11 @@ struct DownloadRequest unsigned int baseRetryTimeMs = 250; ActivityId parentAct; bool decompress = true; + std::shared_ptr<std::string> data; + std::string mimeType; DownloadRequest(const std::string & uri) - : uri(uri), parentAct(curActivity) { } + : uri(uri), parentAct(getCurActivity()) { } }; struct DownloadResult @@ -52,7 +55,7 @@ struct Downloader use the recorded ETag to verify if the server has a more recent version, and if so, download it to the Nix store. */ Path downloadCached(ref<Store> store, const string & uri, bool unpack, string name = "", - const Hash & expectedHash = Hash(), string * effectiveUri = nullptr); + const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, int ttl = settings.tarballTtl); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh index a67e0775b978..f703e1d15404 100644 --- a/src/libstore/fs-accessor.hh +++ b/src/libstore/fs-accessor.hh @@ -13,9 +13,10 @@ public: struct Stat { - Type type; - uint64_t fileSize; // regular files only - bool isExecutable; // regular files only + Type type = tMissing; + uint64_t fileSize = 0; // regular files only + bool isExecutable = false; // regular files only + uint64_t narOffset = 0; // regular files only }; virtual Stat stat(const Path & path) = 0; diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index ab2c5ca0274c..ba49749d830a 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -59,7 +59,7 @@ static void makeSymlink(const Path & link, const Path & target) /* Create the new symlink. */ Path tempLink = (format("%1%.tmp-%2%-%3%") - % link % getpid() % rand()).str(); + % link % getpid() % random()).str(); createSymlink(target, tempLink); /* Atomically replace the old one. */ @@ -324,10 +324,8 @@ Roots LocalStore::findRootsNoTemp() { Roots roots; - /* Process direct roots in {gcroots,manifests,profiles}. */ + /* Process direct roots in {gcroots,profiles}. */ findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots); - if (pathExists(stateDir + "/manifests")) - findRoots(stateDir + "/manifests", DT_UNKNOWN, roots); findRoots(stateDir + "/profiles", DT_UNKNOWN, roots); /* Add additional roots returned by the program specified by the diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 4fa02f92085a..544566e0b573 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -6,6 +6,7 @@ #include <algorithm> #include <map> #include <thread> +#include <dlfcn.h> namespace nix { @@ -37,6 +38,7 @@ Settings::Settings() , nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR))) , nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR))) , nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR))) + , nixManDir(canonPath(NIX_MAN_DIR)) , nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH)) { buildUsersGroup = getuid() == 0 ? "nixbld" : ""; @@ -116,17 +118,17 @@ template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::s args.mkFlag() .longName(name) .description("Enable sandboxing.") - .handler([=](std::vector<std::string> ss) { value = smEnabled; }) + .handler([=](std::vector<std::string> ss) { override(smEnabled); }) .category(category); args.mkFlag() .longName("no-" + name) .description("Disable sandboxing.") - .handler([=](std::vector<std::string> ss) { value = smDisabled; }) + .handler([=](std::vector<std::string> ss) { override(smDisabled); }) .category(category); args.mkFlag() .longName("relaxed-" + name) .description("Enable sandboxing, but allow builds to disable it.") - .handler([=](std::vector<std::string> ss) { value = smRelaxed; }) + .handler([=](std::vector<std::string> ss) { override(smRelaxed); }) .category(category); } @@ -137,4 +139,46 @@ void MaxBuildJobsSetting::set(const std::string & str) throw UsageError("configuration setting '%s' should be 'auto' or an integer", name); } + +void initPlugins() +{ + for (const auto & pluginFile : settings.pluginFiles.get()) { + Paths pluginFiles; + try { + auto ents = readDirectory(pluginFile); + for (const auto & ent : ents) + pluginFiles.emplace_back(pluginFile + "/" + ent.name); + } catch (SysError & e) { + if (e.errNo != ENOTDIR) + throw; + pluginFiles.emplace_back(pluginFile); + } + for (const auto & file : pluginFiles) { + /* handle is purposefully leaked as there may be state in the + DSO needed by the action of the plugin. */ + void *handle = + dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL); + if (!handle) + throw Error("could not dynamically open plugin file '%s': %s", file, dlerror()); + } + } + /* We handle settings registrations here, since plugins can add settings */ + if (RegisterSetting::settingRegistrations) { + for (auto & registration : *RegisterSetting::settingRegistrations) + settings.addSetting(registration); + delete RegisterSetting::settingRegistrations; + } + settings.handleUnknownSettings(); +} + +RegisterSetting::SettingRegistrations * RegisterSetting::settingRegistrations; + +RegisterSetting::RegisterSetting(AbstractSetting * s) +{ + if (!settingRegistrations) + settingRegistrations = new SettingRegistrations; + settingRegistrations->emplace_back(s); +} + + } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index a4aa842d70fd..9360096aae8c 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -29,7 +29,7 @@ struct CaseHackSetting : public BaseSetting<bool> void set(const std::string & str) override { BaseSetting<bool>::set(str); - nix::useCaseHack = true; + nix::useCaseHack = value; } }; @@ -82,6 +82,9 @@ public: /* The directory where the main programs are stored. */ Path nixBinDir; + /* The directory where the man pages are stored. */ + Path nixManDir; + /* File name of the socket the daemon listens to. */ Path nixDaemonSocketFile; @@ -138,6 +141,11 @@ public: Setting<std::string> builders{this, "@" + nixConfDir + "/machines", "builders", "A semicolon-separated list of build machines, in the format of nix.machines."}; + Setting<bool> buildersUseSubstitutes{this, false, "builders-use-substitutes", + "Whether build machines should use their own substitutes for obtaining " + "build dependencies if possible, rather than waiting for this host to " + "upload them."}; + Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space", "Amount of reserved disk space for the garbage collector."}; @@ -150,7 +158,7 @@ public: Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering", "Whether to call sync() before registering a path as valid."}; - Setting<bool> useSubstitutes{this, true, "use-substitutes", + Setting<bool> useSubstitutes{this, true, "substitute", "Whether to use substitutes.", {"build-use-substitutes"}}; @@ -206,7 +214,8 @@ public: bool lockCPU; /* Whether to show a stack trace if Nix evaluation fails. */ - bool showTrace = false; + Setting<bool> showTrace{this, false, "show-trace", + "Whether to show a stack trace on evaluation errors."}; Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", "Whether builtin functions that allow executing native code should be enabled."}; @@ -227,6 +236,9 @@ public: "Whether to restrict file system access to paths in $NIX_PATH, " "and network access to the URI prefixes listed in 'allowed-uris'."}; + Setting<bool> pureEval{this, false, "pure-eval", + "Whether to restrict file system and network access to files specified by cryptographic hash."}; + Setting<size_t> buildRepeat{this, 0, "repeat", "The number of times to repeat a build in order to verify determinism.", {"build-repeat"}}; @@ -259,10 +271,11 @@ public: Setting<bool> enforceDeterminism{this, true, "enforce-determinism", "Whether to fail if repeated builds produce different output."}; - Setting<Strings> binaryCachePublicKeys{this, + Setting<Strings> trustedPublicKeys{this, {"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}, - "binary-cache-public-keys", - "Trusted public keys for secure substitution."}; + "trusted-public-keys", + "Trusted public keys for secure substitution.", + {"binary-cache-public-keys"}}; Setting<Strings> secretKeyFiles{this, {}, "secret-key-files", "Secret keys with which to sign local builds."}; @@ -277,8 +290,17 @@ public: Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl", "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."}; - Setting<std::string> signedBinaryCaches{this, "*", "signed-binary-caches", - "Obsolete."}; + Setting<bool> requireSigs{this, true, "require-sigs", + "Whether to check that any non-content-addressed path added to the " + "Nix store has a valid signature (that is, one signed using a key " + "listed in 'trusted-public-keys'."}; + + Setting<StringSet> extraPlatforms{this, + std::string{SYSTEM} == "x86_64-linux" ? StringSet{"i686-linux"} : StringSet{}, + "extra-platforms", + "Additional platforms that can be built on the local system. " + "These may be supported natively (e.g. armv7 on some aarch64 CPUs " + "or using hacks like qemu-user."}; Setting<Strings> substituters{this, nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(), @@ -298,6 +320,14 @@ public: Setting<Strings> trustedUsers{this, {"root"}, "trusted-users", "Which users or groups are trusted to ask the daemon to do unsafe things."}; + Setting<unsigned int> ttlNegativeNarInfoCache{this, 3600, "narinfo-cache-negative-ttl", + "The TTL in seconds for negative lookups in the disk cache i.e binary cache lookups that " + "return an invalid path result"}; + + Setting<unsigned int> ttlPositiveNarInfoCache{this, 30 * 24 * 3600, "narinfo-cache-positive-ttl", + "The TTL in seconds for positive lookups in the disk cache i.e binary cache lookups that " + "return a valid path result."}; + /* ?Who we trust to use the daemon in safe ways */ Setting<Strings> allowedUsers{this, {"*"}, "allowed-users", "Which users or groups are allowed to connect to the daemon."}; @@ -355,14 +385,28 @@ public: Setting<Strings> allowedUris{this, {}, "allowed-uris", "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; + + Setting<Paths> pluginFiles{this, {}, "plugin-files", + "Plugins to dynamically load at nix initialization time."}; }; // FIXME: don't use a global variable. extern Settings settings; +/* This should be called after settings are initialized, but before + anything else */ +void initPlugins(); + extern const string nixVersion; +struct RegisterSetting +{ + typedef std::vector<AbstractSetting *> SettingRegistrations; + static SettingRegistrations * settingRegistrations; + RegisterSetting(AbstractSetting * s); +}; + } diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 057337685791..b9e9cd5daba5 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -38,7 +38,7 @@ public: try { BinaryCacheStore::init(); } catch (UploadToHTTP &) { - throw Error(format("'%s' does not appear to be a binary cache") % cacheUri); + throw Error("'%s' does not appear to be a binary cache", cacheUri); } diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority); } @@ -67,7 +67,14 @@ protected: const std::string & data, const std::string & mimeType) override { - throw UploadToHTTP("uploading to an HTTP binary cache is not supported"); + auto req = DownloadRequest(cacheUri + "/" + path); + req.data = std::make_shared<string>(data); // FIXME: inefficient + req.mimeType = mimeType; + try { + getDownloader()->download(req); + } catch (DownloadError & e) { + throw UploadToHTTP(format("uploading to HTTP binary cache at %1% not supported: %2%") % cacheUri % e.msg()); + } } void getFile(const std::string & path, diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index dfefdb9bc874..5dee25308f7f 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -16,6 +16,7 @@ struct LegacySSHStore : public Store const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"}; const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"}; const Setting<bool> compress{this, false, "compress", "whether to compress the connection"}; + const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"}; // Hack for getting remote build log output. const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"}; @@ -55,7 +56,7 @@ struct LegacySSHStore : public Store ref<Connection> openConnection() { auto conn = make_ref<Connection>(); - conn->sshConn = master.startCommand("nix-store --serve --write"); + conn->sshConn = master.startCommand(fmt("%s --serve --write", remoteProgram)); conn->to = FdSink(conn->sshConn->in.get()); conn->from = FdSource(conn->sshConn->out.get()); @@ -119,7 +120,7 @@ struct LegacySSHStore : public Store }); } - void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) override { @@ -130,7 +131,7 @@ struct LegacySSHStore : public Store conn->to << cmdImportPaths << 1; - conn->to(*nar); + copyNAR(source, conn->to); conn->to << exportMagic << info.path @@ -150,12 +151,7 @@ struct LegacySSHStore : public Store conn->to << cmdDumpStorePath << path; conn->to.flush(); - - /* FIXME: inefficient. */ - ParseSink parseSink; /* null sink; just parse the NAR */ - TeeSource savedNAR(conn->from); - parseDump(parseSink, savedNAR); - sink(*savedNAR.data); + copyNAR(conn->from, sink); } PathSet queryAllValidPaths() override { unsupported(); } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 7afecc1cfc62..ef8c2811bd86 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -53,7 +53,6 @@ LocalStore::LocalStore(const Params & params) , trashDir(realStoreDir + "/trash") , tempRootsDir(stateDir + "/temproots") , fnTempRoots(fmt("%s/%d", tempRootsDir, getpid())) - , publicKeys(getDefaultPublicKeys()) { auto state(_state.lock()); @@ -582,7 +581,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs) { - assert(info.ca == "" || info.isContentAddressed(*this)); + if (info.ca != "" && !info.isContentAddressed(*this)) + throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", info.path); state.stmtRegisterValidPath.use() (info.path) @@ -964,21 +964,21 @@ void LocalStore::invalidatePath(State & state, const Path & path) } -void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, - RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) +const PublicKeys & LocalStore::getPublicKeys() { - assert(info.narHash); + auto state(_state.lock()); + if (!state->publicKeys) + state->publicKeys = std::make_unique<PublicKeys>(getDefaultPublicKeys()); + return *state->publicKeys; +} - Hash h = hashString(htSHA256, *nar); - if (h != info.narHash) - throw Error("hash mismatch importing path '%s'; expected hash '%s', got '%s'", - info.path, info.narHash.to_string(), h.to_string()); - if (nar->size() != info.narSize) - throw Error("size mismatch importing path '%s'; expected %s, got %s", - info.path, info.narSize, nar->size()); +void LocalStore::addToStore(const ValidPathInfo & info, Source & source, + RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) +{ + assert(info.narHash); - if (requireSigs && checkSigs && !info.checkSignatures(*this, publicKeys)) + if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys())) throw Error("cannot add path '%s' because it lacks a valid signature", info.path); addTempRoot(info.path); @@ -992,16 +992,34 @@ void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & /* Lock the output path. But don't lock if we're being called from a build hook (whose parent process already acquired a lock on this path). */ - Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS")); - if (find(locksHeld.begin(), locksHeld.end(), info.path) == locksHeld.end()) + if (!locksHeld.count(info.path)) outputLock.lockPaths({realPath}); if (repair || !isValidPath(info.path)) { deletePath(realPath); - StringSource source(*nar); - restorePath(realPath, source); + /* While restoring the path from the NAR, compute the hash + of the NAR. */ + HashSink hashSink(htSHA256); + + LambdaSource wrapperSource([&](unsigned char * data, size_t len) -> size_t { + size_t n = source.read(data, len); + hashSink(data, n); + return n; + }); + + restorePath(realPath, wrapperSource); + + auto hashResult = hashSink.finish(); + + if (hashResult.first != info.narHash) + throw Error("hash mismatch importing path '%s'; expected hash '%s', got '%s'", + info.path, info.narHash.to_string(), hashResult.first.to_string()); + + if (hashResult.second != info.narSize) + throw Error("size mismatch importing path '%s'; expected %s, got %s", + info.path, info.narSize, hashResult.second); autoGC(); @@ -1216,7 +1234,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) /* Check the content hash (optionally - slow). */ printMsg(lvlTalkative, format("checking contents of '%1%'") % i); - HashResult current = hashPath(info->narHash.type, i); + HashResult current = hashPath(info->narHash.type, toRealPath(i)); if (info->narHash != nullHash && info->narHash != current.first) { printError(format("path '%1%' was modified! " diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 0a3841133e5d..1209a06356f7 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -19,7 +19,7 @@ namespace nix { /* Nix store and database schema version. Version 1 (or 0) was Nix <= 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is - Nix 1.0. Version 7 is Nix 1.3. Version 10 is 1.12. */ + Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */ const int nixSchemaVersion = 10; @@ -77,6 +77,8 @@ private: minFree but not much below availAfterGC, then there is no point in starting a new GC. */ uint64_t availAfterGC = std::numeric_limits<uint64_t>::max(); + + std::unique_ptr<PublicKeys> publicKeys; }; Sync<State, std::recursive_mutex> _state; @@ -97,13 +99,16 @@ public: private: Setting<bool> requireSigs{(Store*) this, - settings.signedBinaryCaches != "", // FIXME + settings.requireSigs, "require-sigs", "whether store paths should have a trusted signature on import"}; - PublicKeys publicKeys; + const PublicKeys & getPublicKeys(); public: + // Hack for build-remote.cc. + PathSet locksHeld = tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS")); + /* Initialise the local store, upgrading the schema if necessary. */ LocalStore(const Params & params); @@ -140,7 +145,7 @@ public: void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) override; - void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) override; diff --git a/src/libstore/local.mk b/src/libstore/local.mk index 50c46ce6fe99..3799257f83ff 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -4,25 +4,28 @@ libstore_NAME = libnixstore libstore_DIR := $(d) -libstore_SOURCES := $(wildcard $(d)/*.cc) +libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc) libstore_LIBS = libutil libformat libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread +ifneq ($(OS), FreeBSD) + libstore_LDFLAGS += -ldl +endif libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb $(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox))) ifeq ($(ENABLE_S3), 1) - libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core + libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core endif ifeq ($(OS), SunOS) libstore_LDFLAGS += -lsocket endif -ifeq ($(OS), Linux) +ifeq ($(HAVE_SECCOMP), 1) libstore_LDFLAGS += -lseccomp endif @@ -35,6 +38,7 @@ libstore_CXXFLAGS = \ -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \ -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \ -DNIX_BIN_DIR=\"$(bindir)\" \ + -DNIX_MAN_DIR=\"$(mandir)\" \ -DSANDBOX_SHELL="\"$(sandbox_shell)\"" \ -DLSOF=\"$(lsof)\" diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 2afdeb021a93..b74480684f2a 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -1,21 +1,24 @@ #include "nar-accessor.hh" #include "archive.hh" +#include "json.hh" #include <map> #include <stack> #include <algorithm> +#include <nlohmann/json.hpp> + namespace nix { struct NarMember { - FSAccessor::Type type; + FSAccessor::Type type = FSAccessor::Type::tMissing; - bool isExecutable; + bool isExecutable = false; /* If this is a regular file, position of the contents of this file in the NAR. */ - size_t start, size; + size_t start = 0, size = 0; std::string target; @@ -23,83 +26,127 @@ struct NarMember std::map<std::string, NarMember> children; }; -struct NarIndexer : ParseSink, StringSource +struct NarAccessor : public FSAccessor { - NarMember root; - std::stack<NarMember*> parents; + std::shared_ptr<const std::string> nar; - std::string currentStart; - bool isExec = false; + GetNarBytes getNarBytes; - NarIndexer(const std::string & nar) : StringSource(nar) + NarMember root; + + struct NarIndexer : ParseSink, StringSource { - } + NarAccessor & acc; - void createMember(const Path & path, NarMember member) { - size_t level = std::count(path.begin(), path.end(), '/'); - while(parents.size() > level) { - parents.pop(); - } + std::stack<NarMember *> parents; - if(parents.empty()) { - root = std::move(member); - parents.push(&root); - } else { - if(parents.top()->type != FSAccessor::Type::tDirectory) { - throw Error(format("NAR file missing parent directory of path '%1%'") % path); + std::string currentStart; + bool isExec = false; + + NarIndexer(NarAccessor & acc, const std::string & nar) + : StringSource(nar), acc(acc) + { } + + void createMember(const Path & path, NarMember member) { + size_t level = std::count(path.begin(), path.end(), '/'); + while (parents.size() > level) parents.pop(); + + if (parents.empty()) { + acc.root = std::move(member); + parents.push(&acc.root); + } else { + if (parents.top()->type != FSAccessor::Type::tDirectory) + throw Error("NAR file missing parent directory of path '%s'", path); + auto result = parents.top()->children.emplace(baseNameOf(path), std::move(member)); + parents.push(&result.first->second); } - auto result = parents.top()->children.emplace(baseNameOf(path), std::move(member)); - parents.push(&result.first->second); } - } - void createDirectory(const Path & path) override - { - createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0 }); - } + void createDirectory(const Path & path) override + { + createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0}); + } - void createRegularFile(const Path & path) override - { - createMember(path, {FSAccessor::Type::tRegular, false, 0, 0 }); - } + void createRegularFile(const Path & path) override + { + createMember(path, {FSAccessor::Type::tRegular, false, 0, 0}); + } - void isExecutable() override - { - parents.top()->isExecutable = true; - } + void isExecutable() override + { + parents.top()->isExecutable = true; + } - void preallocateContents(unsigned long long size) override - { - currentStart = string(s, pos, 16); - assert(size <= std::numeric_limits<size_t>::max()); - parents.top()->size = (size_t)size; - parents.top()->start = pos; - } + void preallocateContents(unsigned long long size) override + { + currentStart = string(s, pos, 16); + assert(size <= std::numeric_limits<size_t>::max()); + parents.top()->size = (size_t)size; + parents.top()->start = pos; + } - void receiveContents(unsigned char * data, unsigned int len) override - { - // Sanity check - if (!currentStart.empty()) { - assert(len < 16 || currentStart == string((char *) data, 16)); - currentStart.clear(); + void receiveContents(unsigned char * data, unsigned int len) override + { + // Sanity check + if (!currentStart.empty()) { + assert(len < 16 || currentStart == string((char *) data, 16)); + currentStart.clear(); + } } + + void createSymlink(const Path & path, const string & target) override + { + createMember(path, + NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target}); + } + }; + + NarAccessor(ref<const std::string> nar) : nar(nar) + { + NarIndexer indexer(*this, *nar); + parseDump(indexer, indexer); } - void createSymlink(const Path & path, const string & target) override + NarAccessor(const std::string & listing, GetNarBytes getNarBytes) + : getNarBytes(getNarBytes) { - createMember(path, - NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target}); + using json = nlohmann::json; + + std::function<void(NarMember &, json &)> recurse; + + recurse = [&](NarMember & member, json & v) { + std::string type = v["type"]; + + if (type == "directory") { + member.type = FSAccessor::Type::tDirectory; + for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) { + std::string name = i.key(); + recurse(member.children[name], i.value()); + } + } else if (type == "regular") { + member.type = FSAccessor::Type::tRegular; + member.size = v["size"]; + member.isExecutable = v.value("executable", false); + member.start = v["narOffset"]; + } else if (type == "symlink") { + member.type = FSAccessor::Type::tSymlink; + member.target = v.value("target", ""); + } else return; + }; + + json v = json::parse(listing); + recurse(root, v); } - NarMember* find(const Path & path) + NarMember * find(const Path & path) { Path canon = path == "" ? "" : canonPath(path); - NarMember* current = &root; + NarMember * current = &root; auto end = path.end(); - for(auto it = path.begin(); it != end; ) { + for (auto it = path.begin(); it != end; ) { // because it != end, the remaining component is non-empty so we need // a directory - if(current->type != FSAccessor::Type::tDirectory) return nullptr; + if (current->type != FSAccessor::Type::tDirectory) return nullptr; // skip slash (canonPath above ensures that this is always a slash) assert(*it == '/'); @@ -108,7 +155,7 @@ struct NarIndexer : ParseSink, StringSource // lookup current component auto next = std::find(it, end, '/'); auto child = current->children.find(std::string(it, next)); - if(child == current->children.end()) return nullptr; + if (child == current->children.end()) return nullptr; current = &child->second; it = next; @@ -117,59 +164,50 @@ struct NarIndexer : ParseSink, StringSource return current; } - NarMember& at(const Path & path) { + NarMember & get(const Path & path) { auto result = find(path); - if(result == nullptr) { - throw Error(format("NAR file does not contain path '%1%'") % path); - } + if (result == nullptr) + throw Error("NAR file does not contain path '%1%'", path); return *result; } -}; - -struct NarAccessor : public FSAccessor -{ - ref<const std::string> nar; - NarIndexer indexer; - - NarAccessor(ref<const std::string> nar) : nar(nar), indexer(*nar) - { - parseDump(indexer, indexer); - } Stat stat(const Path & path) override { - auto i = indexer.find(path); + auto i = find(path); if (i == nullptr) return {FSAccessor::Type::tMissing, 0, false}; - return {i->type, i->size, i->isExecutable}; + return {i->type, i->size, i->isExecutable, i->start}; } StringSet readDirectory(const Path & path) override { - auto i = indexer.at(path); + auto i = get(path); if (i.type != FSAccessor::Type::tDirectory) throw Error(format("path '%1%' inside NAR file is not a directory") % path); StringSet res; - for(auto&& child : i.children) { + for (auto & child : i.children) res.insert(child.first); - } return res; } std::string readFile(const Path & path) override { - auto i = indexer.at(path); + auto i = get(path); if (i.type != FSAccessor::Type::tRegular) throw Error(format("path '%1%' inside NAR file is not a regular file") % path); + + if (getNarBytes) return getNarBytes(i.start, i.size); + + assert(nar); return std::string(*nar, i.start, i.size); } std::string readLink(const Path & path) override { - auto i = indexer.at(path); + auto i = get(path); if (i.type != FSAccessor::Type::tSymlink) throw Error(format("path '%1%' inside NAR file is not a symlink") % path); return i.target; @@ -181,4 +219,48 @@ ref<FSAccessor> makeNarAccessor(ref<const std::string> nar) return make_ref<NarAccessor>(nar); } +ref<FSAccessor> makeLazyNarAccessor(const std::string & listing, + GetNarBytes getNarBytes) +{ + return make_ref<NarAccessor>(listing, getNarBytes); +} + +void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor, + const Path & path, bool recurse) +{ + auto st = accessor->stat(path); + + auto obj = res.object(); + + switch (st.type) { + case FSAccessor::Type::tRegular: + obj.attr("type", "regular"); + obj.attr("size", st.fileSize); + if (st.isExecutable) + obj.attr("executable", true); + if (st.narOffset) + obj.attr("narOffset", st.narOffset); + break; + case FSAccessor::Type::tDirectory: + obj.attr("type", "directory"); + { + auto res2 = obj.object("entries"); + for (auto & name : accessor->readDirectory(path)) { + if (recurse) { + auto res3 = res2.placeholder(name); + listNar(res3, accessor, path + "/" + name, true); + } else + res2.object(name); + } + } + break; + case FSAccessor::Type::tSymlink: + obj.attr("type", "symlink"); + obj.attr("target", accessor->readLink(path)); + break; + default: + throw Error("path '%s' does not exist in NAR", path); + } +} + } diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh index 83c570be4c7b..2871199de16e 100644 --- a/src/libstore/nar-accessor.hh +++ b/src/libstore/nar-accessor.hh @@ -1,5 +1,7 @@ #pragma once +#include <functional> + #include "fs-accessor.hh" namespace nix { @@ -8,4 +10,21 @@ namespace nix { file. */ ref<FSAccessor> makeNarAccessor(ref<const std::string> nar); +/* Create a NAR accessor from a NAR listing (in the format produced by + listNar()). The callback getNarBytes(offset, length) is used by the + readFile() method of the accessor to get the contents of files + inside the NAR. */ +typedef std::function<std::string(uint64_t, uint64_t)> GetNarBytes; + +ref<FSAccessor> makeLazyNarAccessor( + const std::string & listing, + GetNarBytes getNarBytes); + +class JSONPlaceholder; + +/* Write a JSON representation of the contents of a NAR (except file + contents). */ +void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor, + const Path & path, bool recurse); + } diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 6e155e877803..35403e5df56f 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -1,6 +1,7 @@ #include "nar-info-disk-cache.hh" #include "sync.hh" #include "sqlite.hh" +#include "globals.hh" #include <sqlite3.h> @@ -47,10 +48,6 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache { public: - /* How long negative and positive lookups are valid. */ - const int ttlNegative = 3600; - const int ttlPositive = 30 * 24 * 3600; - /* How often to purge expired entries from the cache. */ const int purgeInterval = 24 * 3600; @@ -116,8 +113,8 @@ public: SQLiteStmt(state->db, "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))") .use() - (now - ttlNegative) - (now - ttlPositive) + (now - settings.ttlNegativeNarInfoCache) + (now - settings.ttlPositiveNarInfoCache) .exec(); debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db)); @@ -186,8 +183,8 @@ public: auto queryNAR(state->queryNAR.use() (cache.id) (hashPart) - (now - ttlNegative) - (now - ttlPositive)); + (now - settings.ttlNegativeNarInfoCache) + (now - settings.ttlPositiveNarInfoCache)); if (!queryNAR.next()) return {oUnknown, 0}; @@ -260,11 +257,8 @@ public: ref<NarInfoDiskCache> getNarInfoDiskCache() { - static Sync<std::shared_ptr<NarInfoDiskCache>> cache; - - auto cache_(cache.lock()); - if (!*cache_) *cache_ = std::make_shared<NarInfoDiskCacheImpl>(); - return ref<NarInfoDiskCache>(*cache_); + static ref<NarInfoDiskCache> cache = make_ref<NarInfoDiskCacheImpl>(); + return cache; } } diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in index 3f1a2d83d2f2..5cf22faadcbe 100644 --- a/src/libstore/nix-store.pc.in +++ b/src/libstore/nix-store.pc.in @@ -5,5 +5,5 @@ includedir=@includedir@ Name: Nix Description: Nix Package Manager Version: @PACKAGE_VERSION@ -Libs: -L${libdir} -lnixstore -lnixutil -lnixformat -Cflags: -I${includedir}/nix +Libs: -L${libdir} -lnixstore -lnixutil +Cflags: -I${includedir}/nix -std=c++14 diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 891540ae4c1d..7840167d7772 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -213,7 +213,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : ""); Path tempLink = (format("%1%/.tmp-link-%2%-%3%") - % realStoreDir % getpid() % rand()).str(); + % realStoreDir % getpid() % random()).str(); if (link(linkPath.c_str(), tempLink.c_str()) == -1) { if (errno == EMLINK) { diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index 587f29598851..08d1efdbeb01 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -113,8 +113,10 @@ bool PathLocks::lockPaths(const PathSet & _paths, { auto lockedPaths(lockedPaths_.lock()); - if (lockedPaths->count(lockPath)) - throw Error("deadlock: trying to re-acquire self-held lock '%s'", lockPath); + if (lockedPaths->count(lockPath)) { + if (!wait) return false; + throw AlreadyLocked("deadlock: trying to re-acquire self-held lock '%s'", lockPath); + } lockedPaths->insert(lockPath); } diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh index 2a7de611446e..db51f950a320 100644 --- a/src/libstore/pathlocks.hh +++ b/src/libstore/pathlocks.hh @@ -2,10 +2,8 @@ #include "util.hh" - namespace nix { - /* Open (possibly create) a lock file and return the file descriptor. -1 is returned if create is false and the lock could not be opened because it doesn't exist. Any other error throws an exception. */ @@ -18,6 +16,7 @@ enum LockType { ltRead, ltWrite, ltNone }; bool lockFile(int fd, LockType lockType, bool wait); +MakeError(AlreadyLocked, Error); class PathLocks { @@ -38,9 +37,6 @@ public: void setDeletion(bool deletePaths); }; - -// FIXME: not thread-safe! bool pathIsLockedByMe(const Path & path); - } diff --git a/src/libstore/references.cc b/src/libstore/references.cc index ba9f18b9ca5e..5b7eb1f846af 100644 --- a/src/libstore/references.cc +++ b/src/libstore/references.cc @@ -13,7 +13,7 @@ namespace nix { static unsigned int refLength = 32; /* characters */ -static void search(const unsigned char * s, unsigned int len, +static void search(const unsigned char * s, size_t len, StringSet & hashes, StringSet & seen) { static bool initialised = false; @@ -25,7 +25,7 @@ static void search(const unsigned char * s, unsigned int len, initialised = true; } - for (unsigned int i = 0; i + refLength <= len; ) { + for (size_t i = 0; i + refLength <= len; ) { int j; bool match = true; for (j = refLength - 1; j >= 0; --j) @@ -73,7 +73,7 @@ void RefScanSink::operator () (const unsigned char * data, size_t len) search(data, len, hashes, seen); - unsigned int tailLen = len <= refLength ? len : refLength; + size_t tailLen = len <= refLength ? len : refLength; tail = string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) + string((const char *) data + len - tailLen, tailLen); diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc index ba9620a175bb..5233fb2c239b 100644 --- a/src/libstore/remote-fs-accessor.cc +++ b/src/libstore/remote-fs-accessor.cc @@ -1,5 +1,10 @@ #include "remote-fs-accessor.hh" #include "nar-accessor.hh" +#include "json.hh" + +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> namespace nix { @@ -11,20 +16,30 @@ RemoteFSAccessor::RemoteFSAccessor(ref<Store> store, const Path & cacheDir) createDirs(cacheDir); } -Path RemoteFSAccessor::makeCacheFile(const Path & storePath) +Path RemoteFSAccessor::makeCacheFile(const Path & storePath, const std::string & ext) { assert(cacheDir != ""); - return fmt("%s/%s.nar", cacheDir, storePathToHash(storePath)); + return fmt("%s/%s.%s", cacheDir, storePathToHash(storePath), ext); } -void RemoteFSAccessor::addToCache(const Path & storePath, const std::string & nar) +void RemoteFSAccessor::addToCache(const Path & storePath, const std::string & nar, + ref<FSAccessor> narAccessor) { - try { - if (cacheDir == "") return; - /* FIXME: do this asynchronously. */ - writeFile(makeCacheFile(storePath), nar); - } catch (...) { - ignoreException(); + nars.emplace(storePath, narAccessor); + + if (cacheDir != "") { + try { + std::ostringstream str; + JSONPlaceholder jsonRoot(str); + listNar(jsonRoot, narAccessor, "", true); + writeFile(makeCacheFile(storePath, "ls"), str.str()); + + /* FIXME: do this asynchronously. */ + writeFile(makeCacheFile(storePath, "nar"), nar); + + } catch (...) { + ignoreException(); + } } } @@ -42,20 +57,49 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_) if (i != nars.end()) return {i->second, restPath}; StringSink sink; + std::string listing; + Path cacheFile; + + if (cacheDir != "" && pathExists(cacheFile = makeCacheFile(storePath, "nar"))) { + + try { + listing = nix::readFile(makeCacheFile(storePath, "ls")); + + auto narAccessor = makeLazyNarAccessor(listing, + [cacheFile](uint64_t offset, uint64_t length) { + + AutoCloseFD fd = open(cacheFile.c_str(), O_RDONLY | O_CLOEXEC); + if (!fd) + throw SysError("opening NAR cache file '%s'", cacheFile); + + if (lseek(fd.get(), offset, SEEK_SET) != (off_t) offset) + throw SysError("seeking in '%s'", cacheFile); + + std::string buf(length, 0); + readFull(fd.get(), (unsigned char *) buf.data(), length); + + return buf; + }); + + nars.emplace(storePath, narAccessor); + return {narAccessor, restPath}; + + } catch (SysError &) { } + + try { + *sink.s = nix::readFile(cacheFile); - try { - if (cacheDir != "") - *sink.s = nix::readFile(makeCacheFile(storePath)); - } catch (SysError &) { } + auto narAccessor = makeNarAccessor(sink.s); + nars.emplace(storePath, narAccessor); + return {narAccessor, restPath}; - if (sink.s->empty()) { - store->narFromPath(storePath, sink); - addToCache(storePath, *sink.s); + } catch (SysError &) { } } - auto accessor = makeNarAccessor(sink.s); - nars.emplace(storePath, accessor); - return {accessor, restPath}; + store->narFromPath(storePath, sink); + auto narAccessor = makeNarAccessor(sink.s); + addToCache(storePath, *sink.s, narAccessor); + return {narAccessor, restPath}; } FSAccessor::Stat RemoteFSAccessor::stat(const Path & path) diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh index 2a3fc01eff58..4afb3be95736 100644 --- a/src/libstore/remote-fs-accessor.hh +++ b/src/libstore/remote-fs-accessor.hh @@ -18,9 +18,10 @@ class RemoteFSAccessor : public FSAccessor friend class BinaryCacheStore; - Path makeCacheFile(const Path & storePath); + Path makeCacheFile(const Path & storePath, const std::string & ext); - void addToCache(const Path & storePath, const std::string & nar); + void addToCache(const Path & storePath, const std::string & nar, + ref<FSAccessor> narAccessor); public: diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 77b41b6bf8a8..080cef93d214 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -78,9 +78,22 @@ UDSRemoteStore::UDSRemoteStore(const Params & params) } +UDSRemoteStore::UDSRemoteStore(std::string socket_path, const Params & params) + : Store(params) + , LocalFSStore(params) + , RemoteStore(params) + , path(socket_path) +{ +} + + std::string UDSRemoteStore::getUri() { - return "daemon"; + if (path) { + return std::string("unix://") + *path; + } else { + return "daemon"; + } } @@ -98,7 +111,7 @@ ref<RemoteStore::Connection> UDSRemoteStore::openConnection() throw SysError("cannot create Unix domain socket"); closeOnExec(conn->fd.get()); - string socketPath = settings.nixDaemonSocketFile; + string socketPath = path ? *path : settings.nixDaemonSocketFile; struct sockaddr_un addr; addr.sun_family = AF_UNIX; @@ -364,7 +377,7 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart) } -void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, +void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) { auto conn(connections->get()); @@ -372,22 +385,21 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) { conn->to << wopImportPaths; - StringSink sink; - sink << 1 // == path follows - ; - assert(nar->size() % 8 == 0); - sink((unsigned char *) nar->data(), nar->size()); - sink - << exportMagic - << info.path - << info.references - << info.deriver - << 0 // == no legacy signature - << 0 // == no path follows - ; - - StringSource source(*sink.s); - conn->processStderr(0, &source); + auto source2 = sinkToSource([&](Sink & sink) { + sink << 1 // == path follows + ; + copyNAR(source, sink); + sink + << exportMagic + << info.path + << info.references + << info.deriver + << 0 // == no legacy signature + << 0 // == no path follows + ; + }); + + conn->processStderr(0, source2.get()); auto importedPaths = readStorePaths<PathSet>(*this, conn->from); assert(importedPaths.size() <= 1); @@ -399,7 +411,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> << info.references << info.registrationTime << info.narSize << info.ultimate << info.sigs << info.ca << repair << !checkSigs; - conn->to(*nar); + copyNAR(source, conn->to); conn->processStderr(); } } @@ -721,5 +733,14 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) } } +static std::string uriScheme = "unix://"; + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const Store::Params & params) + -> std::shared_ptr<Store> +{ + if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0; + return std::make_shared<UDSRemoteStore>(std::string(uri, uriScheme.size()), params); +}); } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 30c6beae6ff2..95fa59a2069d 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -58,7 +58,7 @@ public: void querySubstitutablePathInfos(const PathSet & paths, SubstitutablePathInfos & infos) override; - void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + void addToStore(const ValidPathInfo & info, Source & nar, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor) override; @@ -122,11 +122,12 @@ protected: ref<Pool<Connection>> connections; + virtual void setOptions(Connection & conn); + private: std::atomic_bool failed{false}; - void setOptions(Connection & conn); }; class UDSRemoteStore : public LocalFSStore, public RemoteStore @@ -134,6 +135,7 @@ class UDSRemoteStore : public LocalFSStore, public RemoteStore public: UDSRemoteStore(const Params & params); + UDSRemoteStore(std::string path, const Params & params); std::string getUri() override; @@ -145,6 +147,7 @@ private: }; ref<RemoteStore::Connection> openConnection() override; + std::experimental::optional<std::string> path; }; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 6a0f19238add..103f141a1a11 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -10,10 +10,14 @@ #include "istringstream_nocopy.hh" #include <aws/core/Aws.h> +#include <aws/core/VersionConfig.h> +#include <aws/core/auth/AWSCredentialsProvider.h> +#include <aws/core/auth/AWSCredentialsProviderChain.h> #include <aws/core/client/ClientConfiguration.h> #include <aws/core/client/DefaultRetryStrategy.h> #include <aws/core/utils/logging/FormattedLogSystem.h> #include <aws/core/utils/logging/LogMacros.h> +#include <aws/core/utils/threading/Executor.h> #include <aws/s3/S3Client.h> #include <aws/s3/model/CreateBucketRequest.h> #include <aws/s3/model/GetBucketLocationRequest.h> @@ -21,6 +25,9 @@ #include <aws/s3/model/HeadObjectRequest.h> #include <aws/s3/model/ListObjectsRequest.h> #include <aws/s3/model/PutObjectRequest.h> +#include <aws/transfer/TransferManager.h> + +using namespace Aws::Transfer; namespace nix { @@ -77,9 +84,22 @@ static void initAWS() }); } -S3Helper::S3Helper(const string & region) +S3Helper::S3Helper(const std::string & profile, const std::string & region) : config(makeConfig(region)) - , client(make_ref<Aws::S3::S3Client>(*config, true, false)) + , client(make_ref<Aws::S3::S3Client>( + profile == "" + ? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>( + std::make_shared<Aws::Auth::DefaultAWSCredentialsProviderChain>()) + : std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>( + std::make_shared<Aws::Auth::ProfileConfigFileAWSCredentialsProvider>(profile.c_str())), + *config, + // FIXME: https://github.com/aws/aws-sdk-cpp/issues/759 +#if AWS_VERSION_MAJOR == 1 && AWS_VERSION_MINOR < 3 + false, +#else + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, +#endif + false)) { } @@ -148,10 +168,13 @@ S3Helper::DownloadResult S3Helper::getObject( struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore { + const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."}; const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}}; const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"}; const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"}; const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"}; + const Setting<uint64_t> bufferSize{ + this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"}; std::string bucketName; @@ -163,7 +186,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const Params & params, const std::string & bucketName) : S3BinaryCacheStore(params) , bucketName(bucketName) - , s3Helper(region) + , s3Helper(profile, region) { diskCache = getNarInfoDiskCache(); } @@ -254,34 +277,76 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const std::string & mimeType, const std::string & contentEncoding) { - auto request = - Aws::S3::Model::PutObjectRequest() - .WithBucket(bucketName) - .WithKey(path); + auto stream = std::make_shared<istringstream_nocopy>(data); - request.SetContentType(mimeType); + auto maxThreads = std::thread::hardware_concurrency(); - if (contentEncoding != "") - request.SetContentEncoding(contentEncoding); + static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor> + executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads); - auto stream = std::make_shared<istringstream_nocopy>(data); + TransferManagerConfiguration transferConfig(executor.get()); - request.SetBody(stream); + transferConfig.s3Client = s3Helper.client; + transferConfig.bufferSize = bufferSize; - stats.put++; - stats.putBytes += data.size(); + if (contentEncoding != "") + transferConfig.createMultipartUploadTemplate.SetContentEncoding( + contentEncoding); + + transferConfig.uploadProgressCallback = + [&](const TransferManager *transferManager, + const std::shared_ptr<const TransferHandle> + &transferHandle) { + //FIXME: find a way to properly abort the multipart upload. + checkInterrupt(); + printTalkative("upload progress ('%s'): '%d' of '%d' bytes", + path, + transferHandle->GetBytesTransferred(), + transferHandle->GetBytesTotalSize()); + }; + + transferConfig.transferStatusUpdatedCallback = + [&](const TransferManager *, + const std::shared_ptr<const TransferHandle> + &transferHandle) { + switch (transferHandle->GetStatus()) { + case TransferStatus::COMPLETED: + printTalkative("upload of '%s' completed", path); + stats.put++; + stats.putBytes += data.size(); + break; + case TransferStatus::IN_PROGRESS: + break; + case TransferStatus::FAILED: + throw Error("AWS error: failed to upload 's3://%s/%s'", + bucketName, path); + break; + default: + throw Error("AWS error: transfer status of 's3://%s/%s' " + "in unexpected state", + bucketName, path); + }; + }; + + std::shared_ptr<TransferManager> transferManager = + TransferManager::Create(transferConfig); auto now1 = std::chrono::steady_clock::now(); - auto result = checkAws(format("AWS error uploading '%s'") % path, - s3Helper.client->PutObject(request)); + std::shared_ptr<TransferHandle> transferHandle = + transferManager->UploadFile(stream, bucketName, path, mimeType, + Aws::Map<Aws::String, Aws::String>()); + + transferHandle->WaitUntilFinished(); auto now2 = std::chrono::steady_clock::now(); - auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count(); + auto duration = + std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1) + .count(); - printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") - % bucketName % path % data.size() % duration); + printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") % + bucketName % path % data.size() % duration); stats.putTimeMs += duration; } diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh index 08a7fbf96e98..4f996400343c 100644 --- a/src/libstore/s3.hh +++ b/src/libstore/s3.hh @@ -14,7 +14,7 @@ struct S3Helper ref<Aws::Client::ClientConfiguration> config; ref<Aws::S3::S3Client> client; - S3Helper(const std::string & region); + S3Helper(const std::string & profile, const std::string & region); ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region); diff --git a/src/libstore/sandbox-defaults.sb b/src/libstore/sandbox-defaults.sb index c8436d9866c5..0299d1ee45d2 100644 --- a/src/libstore/sandbox-defaults.sb +++ b/src/libstore/sandbox-defaults.sb @@ -25,7 +25,11 @@ (allow mach-lookup (global-name "com.apple.system.opendirectoryd.libinfo")) ; Access to /tmp. -(allow file* process-exec (literal "/tmp") (subpath TMPDIR)) +; The network-outbound/network-inbound ones are for unix domain sockets, which +; we allow access to in TMPDIR (but if we allow them more broadly, you could in +; theory escape the sandbox) +(allow file* process-exec network-outbound network-inbound + (literal "/tmp") (subpath TMPDIR)) ; Some packages like to read the system version. (allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist")) diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index b13001b06d57..42d40e71d8be 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -7,7 +7,7 @@ namespace nix { -[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f) +[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs) { int err = sqlite3_errcode(db); @@ -21,7 +21,7 @@ namespace nix { : fmt("SQLite database '%s' is busy", path)); } else - throw SQLiteError("%s: %s (in '%s')", f.str(), sqlite3_errstr(err), path); + throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(err), path); } SQLite::SQLite(const Path & path) diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 14a7a0dd8996..115679b84159 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -93,7 +93,7 @@ struct SQLiteTxn MakeError(SQLiteError, Error); MakeError(SQLiteBusy, SQLiteError); -[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f); +[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs); void handleSQLiteBusy(const SQLiteBusy & e); diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index bb536fadfd51..39205ae2ce12 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -51,21 +51,16 @@ private: std::string host; SSHMaster master; -}; - -class ForwardSource : public Source -{ - Source & readSource; - Sink & writeSink; -public: - ForwardSource(Source & readSource, Sink & writeSink) : readSource(readSource), writeSink(writeSink) {} - size_t read(unsigned char * data, size_t len) override + void setOptions(RemoteStore::Connection & conn) override { - auto res = readSource.read(data, len); - writeSink(data, len); - return res; - } + /* TODO Add a way to explicitly ask for some options to be + forwarded. One option: A way to query the daemon for its + settings, and then a series of params to SSHStore like + forward-cores or forward-overridden-cores that only + override the requested settings. + */ + }; }; void SSHStore::narFromPath(const Path & path, Sink & sink) @@ -73,9 +68,7 @@ void SSHStore::narFromPath(const Path & path, Sink & sink) auto conn(connections->get()); conn->to << wopNarFromPath << path; conn->processStderr(); - ParseSink ps; - auto fwd = ForwardSource(conn->from, sink); - parseDump(ps, fwd); + copyNAR(conn->from, sink); } ref<FSAccessor> SSHStore::getFSAccessor() diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 7ff7a9bffc49..033c580936ad 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -49,6 +49,8 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string addCommonSSHOpts(args); if (socketPath != "") args.insert(args.end(), {"-S", socketPath}); + if (verbosity >= lvlChatty) + args.push_back("-v"); args.push_back(command); execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); @@ -93,6 +95,8 @@ Path SSHMaster::startMaster() , "-o", "LocalCommand=echo started" , "-o", "PermitLocalCommand=yes" }; + if (verbosity >= lvlChatty) + args.push_back("-v"); addCommonSSHOpts(args); execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index c57e42fec00d..1a0d12ca78c2 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -222,11 +222,10 @@ Path Store::makeTextPath(const string & name, const Hash & hash, } -std::pair<Path, Hash> Store::computeStorePathForPath(const Path & srcPath, - bool recursive, HashType hashAlgo, PathFilter & filter) const +std::pair<Path, Hash> Store::computeStorePathForPath(const string & name, + const Path & srcPath, bool recursive, HashType hashAlgo, PathFilter & filter) const { Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath); - string name = baseNameOf(srcPath); Path dstPath = makeFixedOutputPath(recursive, h, name); return std::pair<Path, Hash>(dstPath, h); } @@ -389,8 +388,10 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti Sync<State> state_(State{paths.size(), PathSet()}); std::condition_variable wakeup; + ThreadPool pool; - for (auto & path : paths) + auto doQuery = [&](const Path & path ) { + checkInterrupt(); queryPathInfo(path, [path, &state_, &wakeup](ref<ValidPathInfo> info) { auto state(state_.lock()); @@ -411,6 +412,12 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti if (!--state->left) wakeup.notify_one(); }); + }; + + for (auto & path : paths) + pool.enqueue(std::bind(doQuery, path)); + + pool.process(); while (true) { auto state(state_.lock()); @@ -508,6 +515,8 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths std::shared_ptr<const ValidPathInfo>(info)); if (narInfo) { + if (!narInfo->url.empty()) + jsonPath.attr("url", narInfo->url); if (narInfo->fileHash) jsonPath.attr("downloadHash", narInfo->fileHash.to_string()); if (narInfo->fileSize) @@ -569,9 +578,9 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, auto dstUri = dstStore->getUri(); Activity act(*logger, lvlInfo, actCopyPath, - srcUri == "local" + srcUri == "local" || srcUri == "daemon" ? fmt("copying path '%s' to '%s'", storePath, dstUri) - : dstUri == "local" + : dstUri == "local" || dstUri == "daemon" ? fmt("copying path '%s' from '%s'", storePath, srcUri) : fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri), {storePath, srcUri, dstUri}); @@ -581,32 +590,15 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, uint64_t total = 0; - auto progress = [&](size_t len) { - total += len; - act.progress(total, info->narSize); - }; - - struct MyStringSink : StringSink - { - typedef std::function<void(size_t)> Callback; - Callback callback; - MyStringSink(Callback callback) : callback(callback) { } - void operator () (const unsigned char * data, size_t len) override - { - StringSink::operator ()(data, len); - callback(len); - }; - }; - - MyStringSink sink(progress); - srcStore->narFromPath({storePath}, sink); - + // FIXME +#if 0 if (!info->narHash) { auto info2 = make_ref<ValidPathInfo>(*info); info2->narHash = hashString(htSHA256, *sink.s); if (!info->narSize) info2->narSize = sink.s->size(); info = info2; } +#endif if (info->ultimate) { auto info2 = make_ref<ValidPathInfo>(*info); @@ -614,7 +606,16 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, info = info2; } - dstStore->addToStore(*info, sink.s, repair, checkSigs); + auto source = sinkToSource([&](Sink & sink) { + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + total += len; + act.progress(total, info->narSize); + }); + srcStore->narFromPath({storePath}, wrapperSink); + }); + + dstStore->addToStore(*info, *source, repair, checkSigs); } @@ -756,7 +757,8 @@ bool ValidPathInfo::isContentAddressed(const Store & store) const else if (hasPrefix(ca, "fixed:")) { bool recursive = ca.compare(6, 2, "r:") == 0; Hash hash(std::string(ca, recursive ? 8 : 6)); - if (store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path) + if (references.empty() && + store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path) return true; else warn(); @@ -799,6 +801,21 @@ std::string makeFixedOutputCA(bool recursive, const Hash & hash) } +void Store::addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair, CheckSigsFlag checkSigs, + std::shared_ptr<FSAccessor> accessor) +{ + addToStore(info, make_ref<std::string>(narSource.drain()), repair, checkSigs, accessor); +} + +void Store::addToStore(const ValidPathInfo & info, const ref<std::string> & nar, + RepairFlag repair, CheckSigsFlag checkSigs, + std::shared_ptr<FSAccessor> accessor) +{ + StringSource source(*nar); + addToStore(info, source, repair, checkSigs, accessor); +} + } @@ -830,7 +847,7 @@ ref<Store> openStore(const std::string & uri_, for (auto fun : *RegisterStoreImplementation::implementations) { auto store = fun(uri, params); if (store) { - store->warnUnknownSettings(); + store->handleUnknownSettings(); return ref<Store>(store); } } @@ -887,7 +904,11 @@ std::list<ref<Store>> getDefaultSubstituters() auto addStore = [&](const std::string & uri) { if (done.count(uri)) return; done.insert(uri); - stores.push_back(openStore(uri)); + try { + stores.push_back(openStore(uri)); + } catch (Error & e) { + printError("warning: %s", e.what()); + } }; for (auto uri : settings.substituters.get()) diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index d1e1b5d6f452..ea259f07e8ab 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -192,7 +192,7 @@ struct ValidPathInfo typedef list<ValidPathInfo> ValidPathInfos; -enum BuildMode { bmNormal, bmRepair, bmCheck, bmHash }; +enum BuildMode { bmNormal, bmRepair, bmCheck }; struct BuildResult @@ -248,6 +248,8 @@ public: const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"}; + const Setting<bool> isTrusted{this, false, "trusted", "whether paths from this store can be used as substitutes even when they lack trusted signatures"}; + protected: struct State @@ -305,9 +307,9 @@ public: /* This is the preparatory part of addToStore(); it computes the store path to which srcPath is to be copied. Returns the store path and the cryptographic hash of the contents of srcPath. */ - std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath, - bool recursive = true, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter) const; + std::pair<Path, Hash> computeStorePathForPath(const string & name, + const Path & srcPath, bool recursive = true, + HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter) const; /* Preparatory part of addTextToStore(). @@ -397,9 +399,14 @@ public: virtual bool wantMassQuery() { return false; } /* Import a path into the store. */ + virtual void addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs, + std::shared_ptr<FSAccessor> accessor = 0); + + // FIXME: remove virtual void addToStore(const ValidPathInfo & info, const ref<std::string> & nar, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs, - std::shared_ptr<FSAccessor> accessor = 0) = 0; + std::shared_ptr<FSAccessor> accessor = 0); /* Copy the contents of a path to the store and register the validity the resulting path. The resulting path is returned. @@ -597,6 +604,11 @@ public: "nix-cache-info" file. Lower value means higher priority. */ virtual int getPriority() { return 0; } + virtual Path toRealPath(const Path & storePath) + { + return storePath; + } + protected: Stats stats; @@ -639,9 +651,10 @@ public: virtual Path getRealStoreDir() { return storeDir; } - Path toRealPath(const Path & storePath) + Path toRealPath(const Path & storePath) override { - return getRealStoreDir() + "/" + baseNameOf(storePath); + assert(isInStore(storePath)); + return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1); } std::shared_ptr<std::string> getBuildLog(const Path & path) override; @@ -699,6 +712,9 @@ void removeTempRoots(); * ‘daemon’: The Nix store accessed via a Unix domain socket connection to nix-daemon. + * ‘unix://<path>’: The Nix store accessed via a Unix domain socket + connection to nix-daemon, with the socket located at <path>. + * ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on whether the user has write access to the local Nix store/database. diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index f71229d8fdd6..154e2d20430c 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -40,14 +40,14 @@ static void dumpContents(const Path & path, size_t size, AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); if (!fd) throw SysError(format("opening file '%1%'") % path); - unsigned char buf[65536]; + std::vector<unsigned char> buf(65536); size_t left = size; while (left > 0) { - size_t n = left > sizeof(buf) ? sizeof(buf) : left; - readFull(fd.get(), buf, n); + auto n = std::min(left, buf.size()); + readFull(fd.get(), buf.data(), n); left -= n; - sink(buf, n); + sink(buf.data(), n); } writePadding(size, sink); @@ -146,14 +146,14 @@ static void parseContents(ParseSink & sink, Source & source, const Path & path) sink.preallocateContents(size); unsigned long long left = size; - unsigned char buf[65536]; + std::vector<unsigned char> buf(65536); while (left) { checkInterrupt(); - unsigned int n = sizeof(buf); - if ((unsigned long long) n > left) n = left; - source(buf, n); - sink.receiveContents(buf, n); + auto n = buf.size(); + if ((unsigned long long)n > left) n = left; + source(buf.data(), n); + sink.receiveContents(buf.data(), n); left -= n; } @@ -350,4 +350,21 @@ void restorePath(const Path & path, Source & source) } +void copyNAR(Source & source, Sink & sink) +{ + // FIXME: if 'source' is the output of dumpPath() followed by EOF, + // we should just forward all data directly without parsing. + + ParseSink parseSink; /* null sink; just parse the NAR */ + + LambdaSource wrapper([&](unsigned char * data, size_t len) { + auto n = source.read(data, len); + sink(data, n); + return n; + }); + + parseDump(parseSink, wrapper); +} + + } diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh index 8a15e849c7b8..7a0e688e4201 100644 --- a/src/libutil/archive.hh +++ b/src/libutil/archive.hh @@ -74,6 +74,9 @@ void parseDump(ParseSink & sink, Source & source); void restorePath(const Path & path, Source & source); +/* Read a NAR from 'source' and write it to 'sink'. */ +void copyNAR(Source & source, Sink & sink); + // FIXME: global variables are bad m'kay. extern bool useCaseHack; diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 2b3dff3a5ea1..e1782f8c4bd9 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -1,17 +1,39 @@ #include "compression.hh" #include "util.hh" #include "finally.hh" +#include "logging.hh" #include <lzma.h> #include <bzlib.h> #include <cstdio> #include <cstring> +#if HAVE_BROTLI +#include <brotli/decode.h> +#include <brotli/encode.h> +#endif // HAVE_BROTLI + #include <iostream> namespace nix { -static ref<std::string> decompressXZ(const std::string & in) +static const size_t bufSize = 32 * 1024; + +static void decompressNone(Source & source, Sink & sink) +{ + std::vector<unsigned char> buf(bufSize); + while (true) { + size_t n; + try { + n = source.read(buf.data(), buf.size()); + } catch (EndOfFile &) { + break; + } + sink(buf.data(), n); + } +} + +static void decompressXZ(Source & source, Sink & sink) { lzma_stream strm(LZMA_STREAM_INIT); @@ -23,36 +45,44 @@ static ref<std::string> decompressXZ(const std::string & in) Finally free([&]() { lzma_end(&strm); }); lzma_action action = LZMA_RUN; - uint8_t outbuf[BUFSIZ]; - ref<std::string> res = make_ref<std::string>(); - strm.next_in = (uint8_t *) in.c_str(); - strm.avail_in = in.size(); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + std::vector<uint8_t> inbuf(bufSize), outbuf(bufSize); + strm.next_in = nullptr; + strm.avail_in = 0; + strm.next_out = outbuf.data(); + strm.avail_out = outbuf.size(); + bool eof = false; while (true) { checkInterrupt(); + if (strm.avail_in == 0 && !eof) { + strm.next_in = inbuf.data(); + try { + strm.avail_in = source.read((unsigned char *) strm.next_in, inbuf.size()); + } catch (EndOfFile &) { + eof = true; + } + } + if (strm.avail_in == 0) action = LZMA_FINISH; lzma_ret ret = lzma_code(&strm, action); - if (strm.avail_out == 0 || ret == LZMA_STREAM_END) { - res->append((char *) outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + if (strm.avail_out < outbuf.size()) { + sink((unsigned char *) outbuf.data(), outbuf.size() - strm.avail_out); + strm.next_out = outbuf.data(); + strm.avail_out = outbuf.size(); } - if (ret == LZMA_STREAM_END) - return res; + if (ret == LZMA_STREAM_END) return; if (ret != LZMA_OK) throw CompressionError("error %d while decompressing xz file", ret); } } -static ref<std::string> decompressBzip2(const std::string & in) +static void decompressBzip2(Source & source, Sink & sink) { bz_stream strm; memset(&strm, 0, sizeof(strm)); @@ -63,62 +93,129 @@ static ref<std::string> decompressBzip2(const std::string & in) Finally free([&]() { BZ2_bzDecompressEnd(&strm); }); - char outbuf[BUFSIZ]; - ref<std::string> res = make_ref<std::string>(); - strm.next_in = (char *) in.c_str(); - strm.avail_in = in.size(); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + std::vector<char> inbuf(bufSize), outbuf(bufSize); + strm.next_in = nullptr; + strm.avail_in = 0; + strm.next_out = outbuf.data(); + strm.avail_out = outbuf.size(); + bool eof = false; while (true) { checkInterrupt(); + if (strm.avail_in == 0 && !eof) { + strm.next_in = inbuf.data(); + try { + strm.avail_in = source.read((unsigned char *) strm.next_in, inbuf.size()); + } catch (EndOfFile &) { + eof = true; + } + } + int ret = BZ2_bzDecompress(&strm); - if (strm.avail_out == 0 || ret == BZ_STREAM_END) { - res->append(outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); + if (strm.avail_in == 0 && strm.avail_out == outbuf.size() && eof) + throw CompressionError("bzip2 data ends prematurely"); + + if (strm.avail_out < outbuf.size()) { + sink((unsigned char *) outbuf.data(), outbuf.size() - strm.avail_out); + strm.next_out = outbuf.data(); + strm.avail_out = outbuf.size(); } - if (ret == BZ_STREAM_END) - return res; + if (ret == BZ_STREAM_END) return; if (ret != BZ_OK) throw CompressionError("error while decompressing bzip2 file"); - - if (strm.avail_in == 0) - throw CompressionError("bzip2 data ends prematurely"); } } -static ref<std::string> decompressBrotli(const std::string & in) +static void decompressBrotli(Source & source, Sink & sink) { - // FIXME: use libbrotli - return make_ref<std::string>(runProgram(BRO, true, {"-d"}, {in})); +#if !HAVE_BROTLI + RunOptions options(BROTLI, {"-d"}); + options.standardIn = &source; + options.standardOut = &sink; + runProgram2(options); +#else + auto *s = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + if (!s) + throw CompressionError("unable to initialize brotli decoder"); + + Finally free([s]() { BrotliDecoderDestroyInstance(s); }); + + std::vector<uint8_t> inbuf(bufSize), outbuf(bufSize); + const uint8_t * next_in = nullptr; + size_t avail_in = 0; + bool eof = false; + + while (true) { + checkInterrupt(); + + if (avail_in == 0 && !eof) { + next_in = inbuf.data(); + try { + avail_in = source.read((unsigned char *) next_in, inbuf.size()); + } catch (EndOfFile &) { + eof = true; + } + } + + uint8_t * next_out = outbuf.data(); + size_t avail_out = outbuf.size(); + + auto ret = BrotliDecoderDecompressStream(s, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr); + + switch (ret) { + case BROTLI_DECODER_RESULT_ERROR: + throw CompressionError("error while decompressing brotli file"); + case BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: + if (eof) + throw CompressionError("incomplete or corrupt brotli file"); + break; + case BROTLI_DECODER_RESULT_SUCCESS: + if (avail_in != 0) + throw CompressionError("unexpected input after brotli decompression"); + break; + case BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: + // I'm not sure if this can happen, but abort if this happens with empty buffer + if (avail_out == outbuf.size()) + throw CompressionError("brotli decompression requires larger buffer"); + break; + } + + // Always ensure we have full buffer for next invocation + if (avail_out < outbuf.size()) + sink((unsigned char *) outbuf.data(), outbuf.size() - avail_out); + + if (ret == BROTLI_DECODER_RESULT_SUCCESS) return; + } +#endif // HAVE_BROTLI } -ref<std::string> compress(const std::string & method, const std::string & in) +ref<std::string> decompress(const std::string & method, const std::string & in) { - StringSink ssink; - auto sink = makeCompressionSink(method, ssink); - (*sink)(in); - sink->finish(); - return ssink.s; + StringSource source(in); + StringSink sink; + decompress(method, source, sink); + return sink.s; } -ref<std::string> decompress(const std::string & method, const std::string & in) +void decompress(const std::string & method, Source & source, Sink & sink) { if (method == "none") - return make_ref<std::string>(in); + return decompressNone(source, sink); else if (method == "xz") - return decompressXZ(in); + return decompressXZ(source, sink); else if (method == "bzip2") - return decompressBzip2(in); + return decompressBzip2(source, sink); else if (method == "br") - return decompressBrotli(in); + return decompressBrotli(source, sink); else - throw UnknownCompressionMethod(format("unknown compression method '%s'") % method); + throw UnknownCompressionMethod("unknown compression method '%s'", method); } struct NoneSink : CompressionSink @@ -136,10 +233,9 @@ struct XzSink : CompressionSink lzma_stream strm = LZMA_STREAM_INIT; bool finished = false; - XzSink(Sink & nextSink) : nextSink(nextSink) - { - lzma_ret ret = lzma_easy_encoder( - &strm, 6, LZMA_CHECK_CRC64); + template <typename F> + XzSink(Sink & nextSink, F&& initEncoder) : nextSink(nextSink) { + lzma_ret ret = initEncoder(); if (ret != LZMA_OK) throw CompressionError("unable to initialise lzma encoder"); // FIXME: apply the x86 BCJ filter? @@ -147,6 +243,9 @@ struct XzSink : CompressionSink strm.next_out = outbuf; strm.avail_out = sizeof(outbuf); } + XzSink(Sink & nextSink) : XzSink(nextSink, [this]() { + return lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64); + }) {} ~XzSink() { @@ -200,6 +299,27 @@ struct XzSink : CompressionSink } }; +#ifdef HAVE_LZMA_MT +struct ParallelXzSink : public XzSink +{ + ParallelXzSink(Sink &nextSink) : XzSink(nextSink, [this]() { + lzma_mt mt_options = {}; + mt_options.flags = 0; + mt_options.timeout = 300; // Using the same setting as the xz cmd line + mt_options.preset = LZMA_PRESET_DEFAULT; + mt_options.filters = NULL; + mt_options.check = LZMA_CHECK_CRC64; + mt_options.threads = lzma_cputhreads(); + mt_options.block_size = 0; + if (mt_options.threads == 0) + mt_options.threads = 1; + // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the + // number of threads. + return lzma_stream_encoder_mt(&strm, &mt_options); + }) {} +}; +#endif + struct BzipSink : CompressionSink { Sink & nextSink; @@ -249,7 +369,20 @@ struct BzipSink : CompressionSink void write(const unsigned char * data, size_t len) override { + /* Bzip2's 'avail_in' parameter is an unsigned int, so we need + to split the input into chunks of at most 4 GiB. */ + while (len) { + auto n = std::min((size_t) std::numeric_limits<decltype(strm.avail_in)>::max(), len); + writeInternal(data, n); + data += n; + len -= n; + } + } + + void writeInternal(const unsigned char * data, size_t len) + { assert(!finished); + assert(len <= std::numeric_limits<decltype(strm.avail_in)>::max()); strm.next_in = (char *) data; strm.avail_in = len; @@ -270,36 +403,140 @@ struct BzipSink : CompressionSink } }; -struct BrotliSink : CompressionSink +struct LambdaCompressionSink : CompressionSink { Sink & nextSink; std::string data; + using CompressFnTy = std::function<std::string(const std::string&)>; + CompressFnTy compressFn; + LambdaCompressionSink(Sink& nextSink, CompressFnTy compressFn) + : nextSink(nextSink) + , compressFn(std::move(compressFn)) + { + }; + + void finish() override + { + flush(); + nextSink(compressFn(data)); + } + + void write(const unsigned char * data, size_t len) override + { + checkInterrupt(); + this->data.append((const char *) data, len); + } +}; + +struct BrotliCmdSink : LambdaCompressionSink +{ + BrotliCmdSink(Sink& nextSink) + : LambdaCompressionSink(nextSink, [](const std::string& data) { + return runProgram(BROTLI, true, {}, data); + }) + { + } +}; + +#if HAVE_BROTLI +struct BrotliSink : CompressionSink +{ + Sink & nextSink; + uint8_t outbuf[BUFSIZ]; + BrotliEncoderState *state; + bool finished = false; BrotliSink(Sink & nextSink) : nextSink(nextSink) { + state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); + if (!state) + throw CompressionError("unable to initialise brotli encoder"); } ~BrotliSink() { + BrotliEncoderDestroyInstance(state); } - // FIXME: use libbrotli - void finish() override { flush(); - nextSink(runProgram(BRO, true, {}, data)); + assert(!finished); + + const uint8_t *next_in = nullptr; + size_t avail_in = 0; + uint8_t *next_out = outbuf; + size_t avail_out = sizeof(outbuf); + while (!finished) { + checkInterrupt(); + + if (!BrotliEncoderCompressStream(state, + BROTLI_OPERATION_FINISH, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr)) + throw CompressionError("error while finishing brotli file"); + + finished = BrotliEncoderIsFinished(state); + if (avail_out == 0 || finished) { + nextSink(outbuf, sizeof(outbuf) - avail_out); + next_out = outbuf; + avail_out = sizeof(outbuf); + } + } } void write(const unsigned char * data, size_t len) override { - checkInterrupt(); - this->data.append((const char *) data, len); + // Don't feed brotli too much at once + const size_t CHUNK_SIZE = sizeof(outbuf) << 2; + while (len) { + size_t n = std::min(CHUNK_SIZE, len); + writeInternal(data, n); + data += n; + len -= n; + } + } + + void writeInternal(const unsigned char * data, size_t len) + { + assert(!finished); + + const uint8_t *next_in = data; + size_t avail_in = len; + uint8_t *next_out = outbuf; + size_t avail_out = sizeof(outbuf); + + while (avail_in > 0) { + checkInterrupt(); + + if (!BrotliEncoderCompressStream(state, + BROTLI_OPERATION_PROCESS, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr)) + throw CompressionError("error while compressing brotli file"); + + if (avail_out < sizeof(outbuf) || avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - avail_out); + next_out = outbuf; + avail_out = sizeof(outbuf); + } + } } }; +#endif // HAVE_BROTLI -ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink) +ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel) { + if (parallel) { +#ifdef HAVE_LZMA_MT + if (method == "xz") + return make_ref<ParallelXzSink>(nextSink); +#endif + printMsg(lvlError, format("Warning: parallel compression requested but not supported for method '%1%', falling back to single-threaded compression") % method); + } + if (method == "none") return make_ref<NoneSink>(nextSink); else if (method == "xz") @@ -307,9 +544,22 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next else if (method == "bzip2") return make_ref<BzipSink>(nextSink); else if (method == "br") +#if HAVE_BROTLI return make_ref<BrotliSink>(nextSink); +#else + return make_ref<BrotliCmdSink>(nextSink); +#endif else throw UnknownCompressionMethod(format("unknown compression method '%s'") % method); } +ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel) +{ + StringSink ssink; + auto sink = makeCompressionSink(method, ssink, parallel); + (*sink)(in); + sink->finish(); + return ssink.s; +} + } diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index e3e6f5a99303..f7a3e3fbd32e 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -8,16 +8,18 @@ namespace nix { -ref<std::string> compress(const std::string & method, const std::string & in); - ref<std::string> decompress(const std::string & method, const std::string & in); +void decompress(const std::string & method, Source & source, Sink & sink); + +ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false); + struct CompressionSink : BufferedSink { virtual void finish() = 0; }; -ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink); +ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false); MakeError(UnknownCompressionMethod, Error); diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 14c4cca031bb..ce6858f0d65a 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -7,10 +7,12 @@ namespace nix { void Config::set(const std::string & name, const std::string & value) { auto i = _settings.find(name); - if (i == _settings.end()) - throw UsageError("unknown setting '%s'", name); - i->second.setting->set(value); - i->second.setting->overriden = true; + if (i == _settings.end()) { + extras.emplace(name, value); + } else { + i->second.setting->set(value); + i->second.setting->overriden = true; + } } void Config::addSetting(AbstractSetting * setting) @@ -21,34 +23,34 @@ void Config::addSetting(AbstractSetting * setting) bool set = false; - auto i = initials.find(setting->name); - if (i != initials.end()) { + auto i = extras.find(setting->name); + if (i != extras.end()) { setting->set(i->second); setting->overriden = true; - initials.erase(i); + extras.erase(i); set = true; } for (auto & alias : setting->aliases) { - auto i = initials.find(alias); - if (i != initials.end()) { + auto i = extras.find(alias); + if (i != extras.end()) { if (set) warn("setting '%s' is set, but it's an alias of '%s' which is also set", alias, setting->name); else { setting->set(i->second); setting->overriden = true; - initials.erase(i); + extras.erase(i); set = true; } } } } -void Config::warnUnknownSettings() +void Config::handleUnknownSettings() { - for (auto & i : initials) - warn("unknown setting '%s'", i.first); + for (auto & s : extras) + warn("unknown setting '%s'", s.first); } StringMap Config::getSettings(bool overridenOnly) @@ -60,7 +62,7 @@ StringMap Config::getSettings(bool overridenOnly) return res; } -void Config::applyConfigFile(const Path & path, bool fatal) +void Config::applyConfigFile(const Path & path) { try { string contents = readFile(path); @@ -80,7 +82,31 @@ void Config::applyConfigFile(const Path & path, bool fatal) vector<string> tokens = tokenizeString<vector<string> >(line); if (tokens.empty()) continue; - if (tokens.size() < 2 || tokens[1] != "=") + if (tokens.size() < 2) + throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); + + auto include = false; + auto ignoreMissing = false; + if (tokens[0] == "include") + include = true; + else if (tokens[0] == "!include") { + include = true; + ignoreMissing = true; + } + + if (include) { + if (tokens.size() != 2) + throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); + auto p = absPath(tokens[1], dirOf(path)); + if (pathExists(p)) { + applyConfigFile(p); + } else if (!ignoreMissing) { + throw Error("file '%1%' included from '%2%' not found", p, path); + } + continue; + } + + if (tokens[1] != "=") throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); string name = tokens[0]; @@ -88,12 +114,7 @@ void Config::applyConfigFile(const Path & path, bool fatal) vector<string>::iterator i = tokens.begin(); advance(i, 2); - try { - set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow - } catch (UsageError & e) { - if (fatal) throw; - warn("in configuration file '%s': %s", path, e.what()); - } + set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow }; } catch (SysError &) { } } @@ -152,7 +173,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category) .longName(name) .description(description) .arity(1) - .handler([=](std::vector<std::string> ss) { set(ss[0]); }) + .handler([=](std::vector<std::string> ss) { overriden = true; set(ss[0]); }) .category(category); } @@ -201,12 +222,12 @@ template<> void BaseSetting<bool>::convertToArg(Args & args, const std::string & args.mkFlag() .longName(name) .description(description) - .handler([=](std::vector<std::string> ss) { value = true; }) + .handler([=](std::vector<std::string> ss) { override(true); }) .category(category); args.mkFlag() .longName("no-" + name) .description(description) - .handler([=](std::vector<std::string> ss) { value = false; }) + .handler([=](std::vector<std::string> ss) { override(false); }) .category(category); } diff --git a/src/libutil/config.hh b/src/libutil/config.hh index 99850c1cdfd5..d2e7faf17434 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -48,25 +48,25 @@ private: Settings _settings; - StringMap initials; + StringMap extras; public: Config(const StringMap & initials) - : initials(initials) + : extras(initials) { } void set(const std::string & name, const std::string & value); void addSetting(AbstractSetting * setting); - void warnUnknownSettings(); + void handleUnknownSettings(); StringMap getSettings(bool overridenOnly = false); const Settings & _getSettings() { return _settings; } - void applyConfigFile(const Path & path, bool fatal = false); + void applyConfigFile(const Path & path); void resetOverriden(); @@ -142,6 +142,12 @@ public: void set(const std::string & str) override; + virtual void override(const T & v) + { + overriden = true; + value = v; + } + std::string to_string() override; void convertToArg(Args & args, const std::string & category) override; diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index 11e3c9dca58a..9d82f13a5e38 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -82,7 +82,7 @@ static string printHash32(const Hash & hash) string s; s.reserve(len); - for (int n = len - 1; n >= 0; n--) { + for (int n = (int) len - 1; n >= 0; n--) { unsigned int b = n * 5; unsigned int i = b / 8; unsigned int j = b % 8; @@ -189,7 +189,9 @@ Hash::Hash(const std::string & s, HashType type) else if (size == base64Len()) { auto d = base64Decode(std::string(s, pos)); - assert(d.size() == hashSize); + if (d.size() != hashSize) + throw BadHash("invalid base-64 hash '%s'", s); + assert(hashSize); memcpy(hash, d.data(), hashSize); } @@ -255,12 +257,12 @@ Hash hashFile(HashType ht, const Path & path) AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); if (!fd) throw SysError(format("opening file '%1%'") % path); - unsigned char buf[8192]; + std::vector<unsigned char> buf(8192); ssize_t n; - while ((n = read(fd.get(), buf, sizeof(buf)))) { + while ((n = read(fd.get(), buf.data(), buf.size()))) { checkInterrupt(); if (n == -1) throw SysError(format("reading file '%1%'") % path); - update(ht, ctx, buf, n); + update(ht, ctx, buf.data(), n); } finish(ht, ctx, hash.hash); diff --git a/src/libutil/local.mk b/src/libutil/local.mk index 0721b21c2089..824f48fbfc9f 100644 --- a/src/libutil/local.mk +++ b/src/libutil/local.mk @@ -6,8 +6,8 @@ libutil_DIR := $(d) libutil_SOURCES := $(wildcard $(d)/*.cc) -libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) +libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) -lboost_context libutil_LIBS = libformat -libutil_CXXFLAGS = -DBRO=\"$(bro)\" +libutil_CXXFLAGS = -DBROTLI=\"$(brotli)\" diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 011155871122..799c6e1ae441 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -6,7 +6,16 @@ namespace nix { -thread_local ActivityId curActivity = 0; +static thread_local ActivityId curActivity = 0; + +ActivityId getCurActivity() +{ + return curActivity; +} +void setCurActivity(const ActivityId activityId) +{ + curActivity = activityId; +} Logger * logger = makeDefaultLogger(); @@ -44,11 +53,12 @@ public: prefix = std::string("<") + c + ">"; } - writeToStderr(prefix + (tty ? fs.s : filterANSIEscapes(fs.s)) + "\n"); + writeToStderr(prefix + filterANSIEscapes(fs.s, !tty) + "\n"); } void startActivity(ActivityId act, Verbosity lvl, ActivityType type, const std::string & s, const Fields & fields, ActivityId parent) + override { if (lvl <= verbosity && !s.empty()) log(lvl, s + "..."); @@ -220,4 +230,12 @@ bool handleJSONLogMessage(const std::string & msg, return true; } +Activity::~Activity() { + try { + logger.stopActivity(id); + } catch (...) { + ignoreException(); + } +} + } diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh index 677aa4daec4d..678703102e9b 100644 --- a/src/libutil/logging.hh +++ b/src/libutil/logging.hh @@ -77,7 +77,8 @@ public: virtual void result(ActivityId act, ResultType type, const Fields & fields) { }; }; -extern thread_local ActivityId curActivity; +ActivityId getCurActivity(); +void setCurActivity(const ActivityId activityId); struct Activity { @@ -86,16 +87,15 @@ struct Activity const ActivityId id; Activity(Logger & logger, Verbosity lvl, ActivityType type, const std::string & s = "", - const Logger::Fields & fields = {}, ActivityId parent = curActivity); + const Logger::Fields & fields = {}, ActivityId parent = getCurActivity()); Activity(Logger & logger, ActivityType type, - const Logger::Fields & fields = {}, ActivityId parent = curActivity) + const Logger::Fields & fields = {}, ActivityId parent = getCurActivity()) : Activity(logger, lvlError, type, "", fields, parent) { }; Activity(const Activity & act) = delete; - ~Activity() - { logger.stopActivity(id); } + ~Activity(); void progress(uint64_t done = 0, uint64_t expected = 0, uint64_t running = 0, uint64_t failed = 0) const { result(resProgress, done, expected, running, failed); } @@ -122,8 +122,8 @@ struct Activity struct PushActivity { const ActivityId prevAct; - PushActivity(ActivityId act) : prevAct(curActivity) { curActivity = act; } - ~PushActivity() { curActivity = prevAct; } + PushActivity(ActivityId act) : prevAct(getCurActivity()) { setCurActivity(act); } + ~PushActivity() { setCurActivity(prevAct); } }; extern Logger * logger; diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh index 3cb5d50889d9..9b8290e634c9 100644 --- a/src/libutil/lru-cache.hh +++ b/src/libutil/lru-cache.hh @@ -2,6 +2,7 @@ #include <map> #include <list> +#include <experimental/optional> namespace nix { @@ -63,18 +64,17 @@ public: /* Look up an item in the cache. If it exists, it becomes the most recently used item. */ - // FIXME: use boost::optional? - Value * get(const Key & key) + std::experimental::optional<Value> get(const Key & key) { auto i = data.find(key); - if (i == data.end()) return 0; + if (i == data.end()) return {}; /* Move this item to the back of the LRU list. */ lru.erase(i->second.first.it); auto j = lru.insert(lru.end(), i); i->second.first.it = j; - return &i->second.second; + return i->second.second; } size_t size() diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh index e0ec66c01803..5ee0b88ef50f 100644 --- a/src/libutil/monitor-fd.hh +++ b/src/libutil/monitor-fd.hh @@ -21,13 +21,29 @@ public: MonitorFdHup(int fd) { thread = std::thread([fd]() { - /* Wait indefinitely until a POLLHUP occurs. */ - struct pollfd fds[1]; - fds[0].fd = fd; - fds[0].events = 0; - if (poll(fds, 1, -1) == -1) abort(); // can't happen - assert(fds[0].revents & POLLHUP); - triggerInterrupt(); + while (true) { + /* Wait indefinitely until a POLLHUP occurs. */ + struct pollfd fds[1]; + fds[0].fd = fd; + /* This shouldn't be necessary, but macOS doesn't seem to + like a zeroed out events field. + See rdar://37537852. + */ + fds[0].events = POLLHUP; + auto count = poll(fds, 1, -1); + if (count == -1) abort(); // can't happen + /* This shouldn't happen, but can on macOS due to a bug. + See rdar://37550628. + + This may eventually need a delay or further + coordination with the main thread if spinning proves + too harmful. + */ + if (count == 0) continue; + assert(fds[0].revents & POLLHUP); + triggerInterrupt(); + break; + } }); }; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 950e6362a245..21803edd056a 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -5,6 +5,8 @@ #include <cerrno> #include <memory> +#include <boost/coroutine2/coroutine.hpp> + namespace nix { @@ -67,7 +69,8 @@ void FdSink::write(const unsigned char * data, size_t len) try { writeFull(fd, data, len); } catch (SysError & e) { - _good = true; + _good = false; + throw; } } @@ -87,6 +90,23 @@ void Source::operator () (unsigned char * data, size_t len) } +std::string Source::drain() +{ + std::string s; + std::vector<unsigned char> buf(8192); + while (true) { + size_t n; + try { + n = read(buf.data(), buf.size()); + s.append((char *) buf.data(), n); + } catch (EndOfFile &) { + break; + } + } + return s; +} + + size_t BufferedSource::read(unsigned char * data, size_t len) { if (!buffer) buffer = decltype(buffer)(new unsigned char[bufSize]); @@ -113,7 +133,7 @@ size_t FdSource::readUnbuffered(unsigned char * data, size_t len) ssize_t n; do { checkInterrupt(); - n = ::read(fd, (char *) data, bufSize); + n = ::read(fd, (char *) data, len); } while (n == -1 && errno == EINTR); if (n == -1) { _good = false; throw SysError("reading from file"); } if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); } @@ -137,6 +157,50 @@ size_t StringSource::read(unsigned char * data, size_t len) } +std::unique_ptr<Source> sinkToSource(std::function<void(Sink &)> fun) +{ + struct SinkToSource : Source + { + typedef boost::coroutines2::coroutine<std::string> coro_t; + + coro_t::pull_type coro; + + SinkToSource(std::function<void(Sink &)> fun) + : coro([&](coro_t::push_type & yield) { + LambdaSink sink([&](const unsigned char * data, size_t len) { + if (len) yield(std::string((const char *) data, len)); + }); + fun(sink); + }) + { + } + + std::string cur; + size_t pos = 0; + + size_t read(unsigned char * data, size_t len) override + { + if (!coro) + throw EndOfFile("coroutine has finished"); + + if (pos == cur.size()) { + if (!cur.empty()) coro(); + cur = coro.get(); + pos = 0; + } + + auto n = std::min(cur.size() - pos, len); + memcpy(data, (unsigned char *) cur.data() + pos, n); + pos += n; + + return n; + } + }; + + return std::make_unique<SinkToSource>(fun); +} + + void writePadding(size_t len, Sink & sink) { if (len % 8) { diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 2ea5b6354ee9..14b62fdb6774 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -56,11 +56,13 @@ struct Source void operator () (unsigned char * data, size_t len); /* Store up to ‘len’ in the buffer pointed to by ‘data’, and - return the number of bytes stored. If blocks until at least + return the number of bytes stored. It blocks until at least one byte is available. */ virtual size_t read(unsigned char * data, size_t len) = 0; virtual bool good() { return true; } + + std::string drain(); }; @@ -75,10 +77,12 @@ struct BufferedSource : Source size_t read(unsigned char * data, size_t len) override; - /* Underlying read call, to be overridden. */ - virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0; bool hasData(); + +protected: + /* Underlying read call, to be overridden. */ + virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0; }; @@ -132,8 +136,9 @@ struct FdSource : BufferedSource return *this; } - size_t readUnbuffered(unsigned char * data, size_t len) override; bool good() override; +protected: + size_t readUnbuffered(unsigned char * data, size_t len) override; private: bool _good = true; }; @@ -175,6 +180,43 @@ struct TeeSource : Source }; +/* Convert a function into a sink. */ +struct LambdaSink : Sink +{ + typedef std::function<void(const unsigned char *, size_t)> lambda_t; + + lambda_t lambda; + + LambdaSink(const lambda_t & lambda) : lambda(lambda) { } + + virtual void operator () (const unsigned char * data, size_t len) + { + lambda(data, len); + } +}; + + +/* Convert a function into a source. */ +struct LambdaSource : Source +{ + typedef std::function<size_t(unsigned char *, size_t)> lambda_t; + + lambda_t lambda; + + LambdaSource(const lambda_t & lambda) : lambda(lambda) { } + + size_t read(unsigned char * data, size_t len) override + { + return lambda(data, len); + } +}; + + +/* Convert a function that feeds data into a Sink into a Source. The + Source executes the function as a coroutine. */ +std::unique_ptr<Source> sinkToSource(std::function<void(Sink &)> fun); + + void writePadding(size_t len, Sink & sink); void writeString(const unsigned char * buf, size_t len, Sink & sink); @@ -188,7 +230,7 @@ inline Sink & operator << (Sink & sink, uint64_t n) buf[4] = (n >> 32) & 0xff; buf[5] = (n >> 40) & 0xff; buf[6] = (n >> 48) & 0xff; - buf[7] = (n >> 56) & 0xff; + buf[7] = (unsigned char) (n >> 56) & 0xff; sink(buf, sizeof(buf)); return sink; } @@ -220,7 +262,7 @@ T readNum(Source & source) if (n > std::numeric_limits<T>::max()) throw SerialisationError("serialised integer %d is too large for type '%s'", n, typeid(T).name()); - return n; + return (T) n; } diff --git a/src/libutil/util.cc b/src/libutil/util.cc index f56153cd4a8a..15962236ec65 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -3,6 +3,7 @@ #include "affinity.hh" #include "sync.hh" #include "finally.hh" +#include "serialise.hh" #include <cctype> #include <cerrno> @@ -73,6 +74,13 @@ std::map<std::string, std::string> getEnv() } +void clearEnv() +{ + for (auto & name : getEnv()) + unsetenv(name.first.c_str()); +} + + Path absPath(Path path, Path dir) { if (path[0] != '/') { @@ -192,6 +200,12 @@ bool isInDir(const Path & path, const Path & dir) } +bool isDirOrInDir(const Path & path, const Path & dir) +{ + return path == dir or isInDir(path, dir); +} + + struct stat lstat(const Path & path) { struct stat st; @@ -216,18 +230,18 @@ bool pathExists(const Path & path) Path readLink(const Path & path) { checkInterrupt(); - struct stat st = lstat(path); - if (!S_ISLNK(st.st_mode)) - throw Error(format("'%1%' is not a symlink") % path); - auto bufSize = std::max(st.st_size, (off_t) PATH_MAX + 1); - char buf[bufSize]; - ssize_t rlsize = readlink(path.c_str(), buf, bufSize); - if (rlsize == -1) - throw SysError(format("reading symbolic link '%1%'") % path); - else if (rlsize > bufSize) - throw Error(format("symbolic link '%1%' size overflow %2% > %3%") - % path % rlsize % bufSize); - return string(buf, rlsize); + std::vector<char> buf; + for (ssize_t bufSize = PATH_MAX/4; true; bufSize += bufSize/2) { + buf.resize(bufSize); + ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize); + if (rlSize == -1) + if (errno == EINVAL) + throw Error("'%1%' is not a symlink", path); + else + throw SysError("reading symbolic link '%1%'", path); + else if (rlSize < bufSize) + return string(buf.data(), rlSize); + } } @@ -281,10 +295,10 @@ string readFile(int fd) if (fstat(fd, &st) == -1) throw SysError("statting file"); - auto buf = std::make_unique<unsigned char[]>(st.st_size); - readFull(fd, buf.get(), st.st_size); + std::vector<unsigned char> buf(st.st_size); + readFull(fd, buf.data(), st.st_size); - return string((char *) buf.get(), st.st_size); + return string((char *) buf.data(), st.st_size); } @@ -426,10 +440,10 @@ Path createTempDir(const Path & tmpRoot, const Path & prefix, static Lazy<Path> getHome2([]() { Path homeDir = getEnv("HOME"); if (homeDir.empty()) { - char buf[16384]; + std::vector<char> buf(16384); struct passwd pwbuf; struct passwd * pw; - if (getpwuid_r(getuid(), &pwbuf, buf, sizeof(buf), &pw) != 0 + if (getpwuid_r(getuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0 || !pw || !pw->pw_dir || !pw->pw_dir[0]) throw Error("cannot determine user's home directory"); homeDir = pw->pw_dir; @@ -554,21 +568,44 @@ void writeFull(int fd, const string & s, bool allowInterrupts) } -string drainFD(int fd) +string drainFD(int fd, bool block) +{ + StringSink sink; + drainFD(fd, sink, block); + return std::move(*sink.s); +} + + +void drainFD(int fd, Sink & sink, bool block) { - string result; - unsigned char buffer[4096]; + int saved; + + Finally finally([&]() { + if (!block) { + if (fcntl(fd, F_SETFL, saved) == -1) + throw SysError("making file descriptor blocking"); + } + }); + + if (!block) { + saved = fcntl(fd, F_GETFL); + if (fcntl(fd, F_SETFL, saved | O_NONBLOCK) == -1) + throw SysError("making file descriptor non-blocking"); + } + + std::vector<unsigned char> buf(4096); while (1) { checkInterrupt(); - ssize_t rd = read(fd, buffer, sizeof buffer); + ssize_t rd = read(fd, buf.data(), buf.size()); if (rd == -1) { + if (!block && (errno == EAGAIN || errno == EWOULDBLOCK)) + break; if (errno != EINTR) throw SysError("reading from file"); } else if (rd == 0) break; - else result.append((char *) buffer, rd); + else sink(buf.data(), rd); } - return result; } @@ -908,20 +945,47 @@ string runProgram(Path program, bool searchPath, const Strings & args, return res.second; } -std::pair<int, std::string> runProgram(const RunOptions & options) +std::pair<int, std::string> runProgram(const RunOptions & options_) +{ + RunOptions options(options_); + StringSink sink; + options.standardOut = &sink; + + int status = 0; + + try { + runProgram2(options); + } catch (ExecError & e) { + status = e.status; + } + + return {status, std::move(*sink.s)}; +} + +void runProgram2(const RunOptions & options) { checkInterrupt(); + assert(!(options.standardIn && options.input)); + + std::unique_ptr<Source> source_; + Source * source = options.standardIn; + + if (options.input) { + source_ = std::make_unique<StringSource>(*options.input); + source = source_.get(); + } + /* Create a pipe. */ Pipe out, in; - out.create(); - if (options.input) in.create(); + if (options.standardOut) out.create(); + if (source) in.create(); /* Fork. */ Pid pid = startProcess([&]() { - if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1) throw SysError("dupping stdout"); - if (options.input && dup2(in.readSide.get(), STDIN_FILENO) == -1) + if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1) throw SysError("dupping stdin"); Strings args_(options.args); @@ -949,11 +1013,20 @@ std::pair<int, std::string> runProgram(const RunOptions & options) }); - if (options.input) { + if (source) { in.readSide = -1; writerThread = std::thread([&]() { try { - writeFull(in.writeSide.get(), *options.input); + std::vector<unsigned char> buf(8 * 1024); + while (true) { + size_t n; + try { + n = source->read(buf.data(), buf.size()); + } catch (EndOfFile &) { + break; + } + writeFull(in.writeSide.get(), buf.data(), n); + } promise.set_value(); } catch (...) { promise.set_exception(std::current_exception()); @@ -962,15 +1035,17 @@ std::pair<int, std::string> runProgram(const RunOptions & options) }); } - string result = drainFD(out.readSide.get()); + if (options.standardOut) + drainFD(out.readSide.get(), *options.standardOut); /* Wait for the child to finish. */ int status = pid.wait(); /* Wait for the writer thread to finish. */ - if (options.input) promise.get_future().get(); + if (source) promise.get_future().get(); - return {status, result}; + if (status) + throw ExecError(status, fmt("program '%1%' %2%", options.program, statusToString(status))); } @@ -1173,36 +1248,51 @@ void ignoreException() } -string filterANSIEscapes(const string & s, bool nixOnly) +std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned int width) { - string t, r; - enum { stTop, stEscape, stCSI } state = stTop; - for (auto c : s) { - if (state == stTop) { - if (c == '\e') { - state = stEscape; - r = c; - } else - t += c; - } else if (state == stEscape) { - r += c; - if (c == '[') - state = stCSI; - else { - t += r; - state = stTop; + std::string t, e; + size_t w = 0; + auto i = s.begin(); + + while (w < (size_t) width && i != s.end()) { + + if (*i == '\e') { + std::string e; + e += *i++; + char last = 0; + + if (i != s.end() && *i == '[') { + e += *i++; + // eat parameter bytes + while (i != s.end() && *i >= 0x30 && *i <= 0x3f) e += *i++; + // eat intermediate bytes + while (i != s.end() && *i >= 0x20 && *i <= 0x2f) e += *i++; + // eat final byte + if (i != s.end() && *i >= 0x40 && *i <= 0x7e) e += last = *i++; + } else { + if (i != s.end() && *i >= 0x40 && *i <= 0x5f) e += *i++; } - } else { - r += c; - if (c >= 0x40 && c <= 0x7e) { - if (nixOnly && (c != 'p' && c != 'q' && c != 's' && c != 'a' && c != 'b')) - t += r; - state = stTop; - r.clear(); + + if (!filterAll && last == 'm') + t += e; + } + + else if (*i == '\t') { + i++; t += ' '; w++; + while (w < (size_t) width && w % 8) { + t += ' '; w++; } } + + else if (*i == '\r') + // do nothing for now + i++; + + else { + t += *i++; w++; + } } - t += r; + return t; } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index a3494e09b09b..743d238611fc 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -25,6 +25,9 @@ namespace nix { +struct Sink; +struct Source; + /* Return an environment variable. */ string getEnv(const string & key, const string & def = ""); @@ -32,6 +35,9 @@ string getEnv(const string & key, const string & def = ""); /* Get the entire environment. */ std::map<std::string, std::string> getEnv(); +/* Clear the environment. */ +void clearEnv(); + /* Return an absolutized path, resolving paths relative to the specified directory, or the current directory otherwise. The path is also canonicalised. */ @@ -53,10 +59,12 @@ Path dirOf(const Path & path); following the final `/'. */ string baseNameOf(const Path & path); -/* Check whether a given path is a descendant of the given - directory. */ +/* Check whether 'path' is a descendant of 'dir'. */ bool isInDir(const Path & path, const Path & dir); +/* Check whether 'path' is equal to 'dir' or a descendant of 'dir'. */ +bool isDirOrInDir(const Path & path, const Path & dir); + /* Get status of `path'. */ struct stat lstat(const Path & path); @@ -143,8 +151,9 @@ MakeError(EndOfFile, Error) /* Read a file descriptor until EOF occurs. */ -string drainFD(int fd); +string drainFD(int fd, bool block = true); +void drainFD(int fd, Sink & sink, bool block = true); /* Automatic cleanup of resources. */ @@ -251,6 +260,8 @@ struct RunOptions bool searchPath = true; Strings args; std::experimental::optional<std::string> input; + Source * standardIn = nullptr; + Sink * standardOut = nullptr; bool _killStderr = false; RunOptions(const Path & program, const Strings & args) @@ -261,6 +272,8 @@ struct RunOptions std::pair<int, std::string> runProgram(const RunOptions & options); +void runProgram2(const RunOptions & options); + class ExecError : public Error { @@ -386,10 +399,14 @@ void ignoreException(); #define ANSI_BLUE "\e[34;1m" -/* Filter out ANSI escape codes from the given string. If ‘nixOnly’ is - set, only filter escape codes generated by Nixpkgs' stdenv (used to - denote nesting etc.). */ -string filterANSIEscapes(const string & s, bool nixOnly = false); +/* Truncate a string to 'width' printable characters. If 'filterAll' + is true, all ANSI escape sequences are filtered out. Otherwise, + some escape sequences (such as colour setting) are copied but not + included in the character count. Also, tabs are expanded to + spaces. */ +std::string filterANSIEscapes(const std::string & s, + bool filterAll = false, + unsigned int width = std::numeric_limits<unsigned int>::max()); /* Base64 encoding/decoding. */ diff --git a/src/libutil/xml-writer.cc b/src/libutil/xml-writer.cc index 98bd058d18be..e5cc2e9fc719 100644 --- a/src/libutil/xml-writer.cc +++ b/src/libutil/xml-writer.cc @@ -28,7 +28,7 @@ void XMLWriter::close() } -void XMLWriter::indent_(unsigned int depth) +void XMLWriter::indent_(size_t depth) { if (!indent) return; output << string(depth * 2, ' '); @@ -75,7 +75,7 @@ void XMLWriter::writeAttrs(const XMLAttrs & attrs) { for (auto & i : attrs) { output << " " << i.first << "=\""; - for (unsigned int j = 0; j < i.second.size(); ++j) { + for (size_t j = 0; j < i.second.size(); ++j) { char c = i.second[j]; if (c == '"') output << """; else if (c == '<') output << "<"; diff --git a/src/libutil/xml-writer.hh b/src/libutil/xml-writer.hh index 3cefe3712c08..b98b445265a2 100644 --- a/src/libutil/xml-writer.hh +++ b/src/libutil/xml-writer.hh @@ -44,7 +44,7 @@ public: private: void writeAttrs(const XMLAttrs & attrs); - void indent_(unsigned int depth); + void indent_(size_t depth); }; diff --git a/src/linenoise/ConvertUTF.cpp b/src/linenoise/ConvertUTF.cpp new file mode 100644 index 000000000000..f7e5915d5e8f --- /dev/null +++ b/src/linenoise/ConvertUTF.cpp @@ -0,0 +1,542 @@ +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* --------------------------------------------------------------------- + + Conversions between UTF32, UTF-16, and UTF-8. Source code file. + Author: Mark E. Davis, 1994. + Rev History: Rick McGowan, fixes & updates May 2001. + Sept 2001: fixed const & error conditions per + mods suggested by S. Parent & A. Lillich. + June 2002: Tim Dodd added detection and handling of incomplete + source sequences, enhanced error detection, added casts + to eliminate compiler warnings. + July 2003: slight mods to back out aggressive FFFE detection. + Jan 2004: updated switches in from-UTF8 conversions. + Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions. + + See the header file "ConvertUTF.h" for complete documentation. + +------------------------------------------------------------------------ */ + +#include "ConvertUTF.h" +#ifdef CVTUTF_DEBUG +#include <stdio.h> +#endif + +namespace linenoise_ng { + +static const int halfShift = 10; /* used for shifting by 10 bits */ + +static const UTF32 halfBase = 0x0010000UL; +static const UTF32 halfMask = 0x3FFUL; + +#define UNI_SUR_HIGH_START (UTF32)0xD800 +#define UNI_SUR_HIGH_END (UTF32)0xDBFF +#define UNI_SUR_LOW_START (UTF32)0xDC00 +#define UNI_SUR_LOW_END (UTF32)0xDFFF +#define false 0 +#define true 1 + +/* --------------------------------------------------------------------- */ + +ConversionResult ConvertUTF32toUTF16 ( + const UTF32** sourceStart, const UTF32* sourceEnd, + char16_t** targetStart, char16_t* targetEnd, ConversionFlags flags) { + ConversionResult result = conversionOK; + const UTF32* source = *sourceStart; + char16_t* target = *targetStart; + while (source < sourceEnd) { + UTF32 ch; + if (target >= targetEnd) { + result = targetExhausted; break; + } + ch = *source++; + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { + if (flags == strictConversion) { + --source; /* return to the illegal value itself */ + result = sourceIllegal; + break; + } else { + *target++ = UNI_REPLACEMENT_CHAR; + } + } else { + *target++ = (UTF16)ch; /* normal case */ + } + } else if (ch > UNI_MAX_LEGAL_UTF32) { + if (flags == strictConversion) { + result = sourceIllegal; + } else { + *target++ = UNI_REPLACEMENT_CHAR; + } + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + if (target + 1 >= targetEnd) { + --source; /* Back up source pointer! */ + result = targetExhausted; break; + } + ch -= halfBase; + *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); + *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); + } + } + *sourceStart = source; + *targetStart = target; + return result; +} + +/* --------------------------------------------------------------------- */ + +ConversionResult ConvertUTF16toUTF32 ( + const UTF16** sourceStart, const UTF16* sourceEnd, + UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) { + ConversionResult result = conversionOK; + const UTF16* source = *sourceStart; + UTF32* target = *targetStart; + UTF32 ch, ch2; + while (source < sourceEnd) { + const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */ + ch = *source++; + /* If we have a surrogate pair, convert to UTF32 first. */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { + /* If the 16 bits following the high surrogate are in the source buffer... */ + if (source < sourceEnd) { + ch2 = *source; + /* If it's a low surrogate, convert to UTF32. */ + if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { + ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + + (ch2 - UNI_SUR_LOW_START) + halfBase; + ++source; + } else if (flags == strictConversion) { /* it's an unpaired high surrogate */ + --source; /* return to the illegal value itself */ + result = sourceIllegal; + break; + } + } else { /* We don't have the 16 bits following the high surrogate. */ + --source; /* return to the high surrogate */ + result = sourceExhausted; + break; + } + } else if (flags == strictConversion) { + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { + --source; /* return to the illegal value itself */ + result = sourceIllegal; + break; + } + } + if (target >= targetEnd) { + source = oldSource; /* Back up source pointer! */ + result = targetExhausted; break; + } + *target++ = ch; + } + *sourceStart = source; + *targetStart = target; +#ifdef CVTUTF_DEBUG +if (result == sourceIllegal) { + fprintf(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2); + fflush(stderr); +} +#endif + return result; +} + +/* --------------------------------------------------------------------- */ + +/* + * Index into the table below with the first byte of a UTF-8 sequence to + * get the number of trailing bytes that are supposed to follow it. + * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is + * left as-is for anyone who may want to do such conversion, which was + * allowed in earlier algorithms. + */ +static const char trailingBytesForUTF8[256] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, + 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 +}; + +/* + * Magic values subtracted from a buffer value during UTF8 conversion. + * This table contains as many values as there might be trailing bytes + * in a UTF-8 sequence. + */ +static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, + 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; + +/* + * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed + * into the first byte, depending on how many bytes follow. There are + * as many entries in this table as there are UTF-8 sequence types. + * (I.e., one byte sequence, two byte... etc.). Remember that sequencs + * for *legal* UTF-8 will be 4 or fewer bytes total. + */ +static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; + +/* --------------------------------------------------------------------- */ + +/* The interface converts a whole buffer to avoid function-call overhead. + * Constants have been gathered. Loops & conditionals have been removed as + * much as possible for efficiency, in favor of drop-through switches. + * (See "Note A" at the bottom of the file for equivalent code.) + * If your compiler supports it, the "isLegalUTF8" call can be turned + * into an inline function. + */ + +/* --------------------------------------------------------------------- */ + +ConversionResult ConvertUTF16toUTF8 ( + const UTF16** sourceStart, const UTF16* sourceEnd, + UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) { + ConversionResult result = conversionOK; + const UTF16* source = *sourceStart; + UTF8* target = *targetStart; + while (source < sourceEnd) { + UTF32 ch; + unsigned short bytesToWrite = 0; + const UTF32 byteMask = 0xBF; + const UTF32 byteMark = 0x80; + const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */ + ch = *source++; + /* If we have a surrogate pair, convert to UTF32 first. */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { + /* If the 16 bits following the high surrogate are in the source buffer... */ + if (source < sourceEnd) { + UTF32 ch2 = *source; + /* If it's a low surrogate, convert to UTF32. */ + if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { + ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + + (ch2 - UNI_SUR_LOW_START) + halfBase; + ++source; + } else if (flags == strictConversion) { /* it's an unpaired high surrogate */ + --source; /* return to the illegal value itself */ + result = sourceIllegal; + break; + } + } else { /* We don't have the 16 bits following the high surrogate. */ + --source; /* return to the high surrogate */ + result = sourceExhausted; + break; + } + } else if (flags == strictConversion) { + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { + --source; /* return to the illegal value itself */ + result = sourceIllegal; + break; + } + } + /* Figure out how many bytes the result will require */ + if (ch < (UTF32)0x80) { bytesToWrite = 1; + } else if (ch < (UTF32)0x800) { bytesToWrite = 2; + } else if (ch < (UTF32)0x10000) { bytesToWrite = 3; + } else if (ch < (UTF32)0x110000) { bytesToWrite = 4; + } else { bytesToWrite = 3; + ch = UNI_REPLACEMENT_CHAR; + } + + target += bytesToWrite; + if (target > targetEnd) { + source = oldSource; /* Back up source pointer! */ + target -= bytesToWrite; result = targetExhausted; break; + } + switch (bytesToWrite) { /* note: everything falls through. */ + case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; + case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; + case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; + case 1: *--target = (UTF8)(ch | firstByteMark[bytesToWrite]); + } + target += bytesToWrite; + } + *sourceStart = source; + *targetStart = target; + return result; +} + +/* --------------------------------------------------------------------- */ + +/* + * Utility routine to tell whether a sequence of bytes is legal UTF-8. + * This must be called with the length pre-determined by the first byte. + * If not calling this from ConvertUTF8to*, then the length can be set by: + * length = trailingBytesForUTF8[*source]+1; + * and the sequence is illegal right away if there aren't that many bytes + * available. + * If presented with a length > 4, this returns false. The Unicode + * definition of UTF-8 goes up to 4-byte sequences. + */ + +static Boolean isLegalUTF8(const UTF8 *source, int length) { + UTF8 a; + const UTF8 *srcptr = source+length; + switch (length) { + default: return false; + /* Everything else falls through when "true"... */ + case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; + case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; + case 2: if ((a = (*--srcptr)) > 0xBF) return false; + + switch (*source) { + /* no fall-through in this inner switch */ + case 0xE0: if (a < 0xA0) return false; break; + case 0xED: if (a > 0x9F) return false; break; + case 0xF0: if (a < 0x90) return false; break; + case 0xF4: if (a > 0x8F) return false; break; + default: if (a < 0x80) return false; + } + + case 1: if (*source >= 0x80 && *source < 0xC2) return false; + } + if (*source > 0xF4) return false; + return true; +} + +/* --------------------------------------------------------------------- */ + +/* + * Exported function to return whether a UTF-8 sequence is legal or not. + * This is not used here; it's just exported. + */ +Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) { + int length = trailingBytesForUTF8[*source]+1; + if (source+length > sourceEnd) { + return false; + } + return isLegalUTF8(source, length); +} + +/* --------------------------------------------------------------------- */ + +ConversionResult ConvertUTF8toUTF16 ( + const UTF8** sourceStart, const UTF8* sourceEnd, + UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) { + ConversionResult result = conversionOK; + const UTF8* source = *sourceStart; + UTF16* target = *targetStart; + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + result = sourceExhausted; break; + } + /* Do this check whether lenient or strict */ + if (! isLegalUTF8(source, extraBytesToRead+1)) { + result = sourceIllegal; + break; + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (target >= targetEnd) { + source -= (extraBytesToRead+1); /* Back up source pointer! */ + result = targetExhausted; break; + } + if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { + if (flags == strictConversion) { + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + result = sourceIllegal; + break; + } else { + *target++ = UNI_REPLACEMENT_CHAR; + } + } else { + *target++ = (UTF16)ch; /* normal case */ + } + } else if (ch > UNI_MAX_UTF16) { + if (flags == strictConversion) { + result = sourceIllegal; + source -= (extraBytesToRead+1); /* return to the start */ + break; /* Bail out; shouldn't continue */ + } else { + *target++ = UNI_REPLACEMENT_CHAR; + } + } else { + /* target is a character in range 0xFFFF - 0x10FFFF. */ + if (target + 1 >= targetEnd) { + source -= (extraBytesToRead+1); /* Back up source pointer! */ + result = targetExhausted; break; + } + ch -= halfBase; + *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); + *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); + } + } + *sourceStart = source; + *targetStart = target; + return result; +} + +/* --------------------------------------------------------------------- */ + +ConversionResult ConvertUTF32toUTF8 ( + const UTF32** sourceStart, const UTF32* sourceEnd, + UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) { + ConversionResult result = conversionOK; + const UTF32* source = *sourceStart; + UTF8* target = *targetStart; + while (source < sourceEnd) { + UTF32 ch; + unsigned short bytesToWrite = 0; + const UTF32 byteMask = 0xBF; + const UTF32 byteMark = 0x80; + ch = *source++; + if (flags == strictConversion ) { + /* UTF-16 surrogate values are illegal in UTF-32 */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { + --source; /* return to the illegal value itself */ + result = sourceIllegal; + break; + } + } + /* + * Figure out how many bytes the result will require. Turn any + * illegally large UTF32 things (> Plane 17) into replacement chars. + */ + if (ch < (UTF32)0x80) { bytesToWrite = 1; + } else if (ch < (UTF32)0x800) { bytesToWrite = 2; + } else if (ch < (UTF32)0x10000) { bytesToWrite = 3; + } else if (ch <= UNI_MAX_LEGAL_UTF32) { bytesToWrite = 4; + } else { bytesToWrite = 3; + ch = UNI_REPLACEMENT_CHAR; + result = sourceIllegal; + } + + target += bytesToWrite; + if (target > targetEnd) { + --source; /* Back up source pointer! */ + target -= bytesToWrite; result = targetExhausted; break; + } + switch (bytesToWrite) { /* note: everything falls through. */ + case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; + case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; + case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; + case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]); + } + target += bytesToWrite; + } + *sourceStart = source; + *targetStart = target; + return result; +} + +/* --------------------------------------------------------------------- */ + +ConversionResult ConvertUTF8toUTF32 ( + const UTF8** sourceStart, const UTF8* sourceEnd, + UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) { + ConversionResult result = conversionOK; + const UTF8* source = *sourceStart; + UTF32* target = *targetStart; + while (source < sourceEnd) { + UTF32 ch = 0; + unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; + if (source + extraBytesToRead >= sourceEnd) { + result = sourceExhausted; break; + } + /* Do this check whether lenient or strict */ + if (! isLegalUTF8(source, extraBytesToRead+1)) { + result = sourceIllegal; + break; + } + /* + * The cases all fall through. See "Note A" below. + */ + switch (extraBytesToRead) { + case 5: ch += *source++; ch <<= 6; + case 4: ch += *source++; ch <<= 6; + case 3: ch += *source++; ch <<= 6; + case 2: ch += *source++; ch <<= 6; + case 1: ch += *source++; ch <<= 6; + case 0: ch += *source++; + } + ch -= offsetsFromUTF8[extraBytesToRead]; + + if (target >= targetEnd) { + source -= (extraBytesToRead+1); /* Back up the source pointer! */ + result = targetExhausted; break; + } + if (ch <= UNI_MAX_LEGAL_UTF32) { + /* + * UTF-16 surrogate values are illegal in UTF-32, and anything + * over Plane 17 (> 0x10FFFF) is illegal. + */ + if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { + if (flags == strictConversion) { + source -= (extraBytesToRead+1); /* return to the illegal value itself */ + result = sourceIllegal; + break; + } else { + *target++ = UNI_REPLACEMENT_CHAR; + } + } else { + *target++ = ch; + } + } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */ + result = sourceIllegal; + *target++ = UNI_REPLACEMENT_CHAR; + } + } + *sourceStart = source; + *targetStart = target; + return result; +} + +} + +/* --------------------------------------------------------------------- + + Note A. + The fall-through switches in UTF-8 reading code save a + temp variable, some decrements & conditionals. The switches + are equivalent to the following loop: + { + int tmpBytesToRead = extraBytesToRead+1; + do { + ch += *source++; + --tmpBytesToRead; + if (tmpBytesToRead) ch <<= 6; + } while (tmpBytesToRead > 0); + } + In UTF-8 writing code, the switches on "bytesToWrite" are + similarly unrolled loops. + + --------------------------------------------------------------------- */ diff --git a/src/linenoise/ConvertUTF.h b/src/linenoise/ConvertUTF.h new file mode 100755 index 000000000000..8a296235dcd9 --- /dev/null +++ b/src/linenoise/ConvertUTF.h @@ -0,0 +1,162 @@ +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + +/* --------------------------------------------------------------------- + + Conversions between UTF32, UTF-16, and UTF-8. Header file. + + Several funtions are included here, forming a complete set of + conversions between the three formats. UTF-7 is not included + here, but is handled in a separate source file. + + Each of these routines takes pointers to input buffers and output + buffers. The input buffers are const. + + Each routine converts the text between *sourceStart and sourceEnd, + putting the result into the buffer between *targetStart and + targetEnd. Note: the end pointers are *after* the last item: e.g. + *(sourceEnd - 1) is the last item. + + The return result indicates whether the conversion was successful, + and if not, whether the problem was in the source or target buffers. + (Only the first encountered problem is indicated.) + + After the conversion, *sourceStart and *targetStart are both + updated to point to the end of last text successfully converted in + the respective buffers. + + Input parameters: + sourceStart - pointer to a pointer to the source buffer. + The contents of this are modified on return so that + it points at the next thing to be converted. + targetStart - similarly, pointer to pointer to the target buffer. + sourceEnd, targetEnd - respectively pointers to the ends of the + two buffers, for overflow checking only. + + These conversion functions take a ConversionFlags argument. When this + flag is set to strict, both irregular sequences and isolated surrogates + will cause an error. When the flag is set to lenient, both irregular + sequences and isolated surrogates are converted. + + Whether the flag is strict or lenient, all illegal sequences will cause + an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>, + or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code + must check for illegal sequences. + + When the flag is set to lenient, characters over 0x10FFFF are converted + to the replacement character; otherwise (when the flag is set to strict) + they constitute an error. + + Output parameters: + The value "sourceIllegal" is returned from some routines if the input + sequence is malformed. When "sourceIllegal" is returned, the source + value will point to the illegal value that caused the problem. E.g., + in UTF-8 when a sequence is malformed, it points to the start of the + malformed sequence. + + Author: Mark E. Davis, 1994. + Rev History: Rick McGowan, fixes & updates May 2001. + Fixes & updates, Sept 2001. + +------------------------------------------------------------------------ */ + +/* --------------------------------------------------------------------- + The following 4 definitions are compiler-specific. + The C standard does not guarantee that wchar_t has at least + 16 bits, so wchar_t is no less portable than unsigned short! + All should be unsigned values to avoid sign extension during + bit mask & shift operations. +------------------------------------------------------------------------ */ + +#if 0 +typedef unsigned long UTF32; /* at least 32 bits */ +typedef unsigned short UTF16; /* at least 16 bits */ +typedef unsigned char UTF8; /* typically 8 bits */ +#endif + +#include <stdint.h> +#include <string> + +namespace linenoise_ng { + +typedef uint32_t UTF32; +typedef uint16_t UTF16; +typedef uint8_t UTF8; +typedef unsigned char Boolean; /* 0 or 1 */ + +/* Some fundamental constants */ +#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD +#define UNI_MAX_BMP (UTF32)0x0000FFFF +#define UNI_MAX_UTF16 (UTF32)0x0010FFFF +#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF +#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF + +typedef enum { + conversionOK, /* conversion successful */ + sourceExhausted, /* partial character in source, but hit end */ + targetExhausted, /* insuff. room in target for conversion */ + sourceIllegal /* source sequence is illegal/malformed */ +} ConversionResult; + +typedef enum { + strictConversion = 0, + lenientConversion +} ConversionFlags; + +// /* This is for C++ and does no harm in C */ +// #ifdef __cplusplus +// extern "C" { +// #endif + +ConversionResult ConvertUTF8toUTF16 ( + const UTF8** sourceStart, const UTF8* sourceEnd, + UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags); + +ConversionResult ConvertUTF16toUTF8 ( + const UTF16** sourceStart, const UTF16* sourceEnd, + UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags); + +ConversionResult ConvertUTF8toUTF32 ( + const UTF8** sourceStart, const UTF8* sourceEnd, + UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags); + +ConversionResult ConvertUTF32toUTF8 ( + const UTF32** sourceStart, const UTF32* sourceEnd, + UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags); + +ConversionResult ConvertUTF16toUTF32 ( + const UTF16** sourceStart, const UTF16* sourceEnd, + UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags); + +ConversionResult ConvertUTF32toUTF16 ( + const UTF32** sourceStart, const UTF32* sourceEnd, + char16_t** targetStart, char16_t* targetEnd, ConversionFlags flags); + +Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd); + +// #ifdef __cplusplus +// } +// #endif + +} + +/* --------------------------------------------------------------------- */ diff --git a/src/linenoise/LICENSE b/src/linenoise/LICENSE index 18e814865a54..b7c58c445860 100644 --- a/src/linenoise/LICENSE +++ b/src/linenoise/LICENSE @@ -1,25 +1,66 @@ -Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com> -Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com> +linenoise.cpp +============= + +Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com> +Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Redis nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +wcwidth.cpp +=========== + +Markus Kuhn -- 2007-05-26 (Unicode 5.0) + +Permission to use, copy, modify, and distribute this software +for any purpose and without fee is hereby granted. The author +disclaims all warranties with regard to this software. + + + +ConvertUTF.cpp +============== + +Copyright 2001-2004 Unicode, Inc. + +Disclaimer + +This source code is provided as is by Unicode, Inc. No claims are +made as to fitness for any particular purpose. No warranties of any +kind are expressed or implied. The recipient agrees to determine +applicability of information provided. If this file has been +purchased on magnetic or optical media from Unicode, Inc., the +sole remedy for any claim will be exchange of defective media +within 90 days of receipt. + +Limitations on Rights to Redistribute This Code + +Unicode, Inc. hereby grants the right to freely use the information +supplied in this file in the creation of products supporting the +Unicode Standard, and to make copies of this file in any form +for internal or external distribution as long as this notice +remains attached. diff --git a/src/linenoise/linenoise.c b/src/linenoise/linenoise.c deleted file mode 100644 index fce14a7c53a3..000000000000 --- a/src/linenoise/linenoise.c +++ /dev/null @@ -1,1199 +0,0 @@ -/* linenoise.c -- guerrilla line editing library against the idea that a - * line editing lib needs to be 20,000 lines of C code. - * - * You can find the latest source code at: - * - * http://github.com/antirez/linenoise - * - * Does a number of crazy assumptions that happen to be true in 99.9999% of - * the 2010 UNIX computers around. - * - * ------------------------------------------------------------------------ - * - * Copyright (c) 2010-2016, Salvatore Sanfilippo <antirez at gmail dot com> - * Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com> - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * ------------------------------------------------------------------------ - * - * References: - * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html - * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html - * - * Todo list: - * - Filter bogus Ctrl+<char> combinations. - * - Win32 support - * - * Bloat: - * - History search like Ctrl+r in readline? - * - * List of escape sequences used by this program, we do everything just - * with three sequences. In order to be so cheap we may have some - * flickering effect with some slow terminal, but the lesser sequences - * the more compatible. - * - * EL (Erase Line) - * Sequence: ESC [ n K - * Effect: if n is 0 or missing, clear from cursor to end of line - * Effect: if n is 1, clear from beginning of line to cursor - * Effect: if n is 2, clear entire line - * - * CUF (CUrsor Forward) - * Sequence: ESC [ n C - * Effect: moves cursor forward n chars - * - * CUB (CUrsor Backward) - * Sequence: ESC [ n D - * Effect: moves cursor backward n chars - * - * The following is used to get the terminal width if getting - * the width with the TIOCGWINSZ ioctl fails - * - * DSR (Device Status Report) - * Sequence: ESC [ 6 n - * Effect: reports the current cusor position as ESC [ n ; m R - * where n is the row and m is the column - * - * When multi line mode is enabled, we also use an additional escape - * sequence. However multi line editing is disabled by default. - * - * CUU (Cursor Up) - * Sequence: ESC [ n A - * Effect: moves cursor up of n chars. - * - * CUD (Cursor Down) - * Sequence: ESC [ n B - * Effect: moves cursor down of n chars. - * - * When linenoiseClearScreen() is called, two additional escape sequences - * are used in order to clear the screen and position the cursor at home - * position. - * - * CUP (Cursor position) - * Sequence: ESC [ H - * Effect: moves the cursor to upper left corner - * - * ED (Erase display) - * Sequence: ESC [ 2 J - * Effect: clear the whole screen - * - */ - -#include <termios.h> -#include <unistd.h> -#include <stdlib.h> -#include <stdio.h> -#include <errno.h> -#include <string.h> -#include <stdlib.h> -#include <ctype.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <sys/ioctl.h> -#include <unistd.h> -#include "linenoise.h" - -#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100 -#define LINENOISE_MAX_LINE 4096 -static char *unsupported_term[] = {"dumb","cons25","emacs",NULL}; -static linenoiseCompletionCallback *completionCallback = NULL; -static linenoiseHintsCallback *hintsCallback = NULL; -static linenoiseFreeHintsCallback *freeHintsCallback = NULL; - -static struct termios orig_termios; /* In order to restore at exit.*/ -static int rawmode = 0; /* For atexit() function to check if restore is needed*/ -static int mlmode = 0; /* Multi line mode. Default is single line. */ -static int atexit_registered = 0; /* Register atexit just 1 time. */ -static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN; -static int history_len = 0; -static char **history = NULL; - -/* The linenoiseState structure represents the state during line editing. - * We pass this state to functions implementing specific editing - * functionalities. */ -struct linenoiseState { - int ifd; /* Terminal stdin file descriptor. */ - int ofd; /* Terminal stdout file descriptor. */ - char *buf; /* Edited line buffer. */ - size_t buflen; /* Edited line buffer size. */ - const char *prompt; /* Prompt to display. */ - size_t plen; /* Prompt length. */ - size_t pos; /* Current cursor position. */ - size_t oldpos; /* Previous refresh cursor position. */ - size_t len; /* Current edited line length. */ - size_t cols; /* Number of columns in terminal. */ - size_t maxrows; /* Maximum num of rows used so far (multiline mode) */ - int history_index; /* The history index we are currently editing. */ -}; - -enum KEY_ACTION{ - KEY_NULL = 0, /* NULL */ - CTRL_A = 1, /* Ctrl+a */ - CTRL_B = 2, /* Ctrl-b */ - CTRL_C = 3, /* Ctrl-c */ - CTRL_D = 4, /* Ctrl-d */ - CTRL_E = 5, /* Ctrl-e */ - CTRL_F = 6, /* Ctrl-f */ - CTRL_H = 8, /* Ctrl-h */ - TAB = 9, /* Tab */ - CTRL_K = 11, /* Ctrl+k */ - CTRL_L = 12, /* Ctrl+l */ - ENTER = 13, /* Enter */ - CTRL_N = 14, /* Ctrl-n */ - CTRL_P = 16, /* Ctrl-p */ - CTRL_T = 20, /* Ctrl-t */ - CTRL_U = 21, /* Ctrl+u */ - CTRL_W = 23, /* Ctrl+w */ - ESC = 27, /* Escape */ - BACKSPACE = 127 /* Backspace */ -}; - -static void linenoiseAtExit(void); -int linenoiseHistoryAdd(const char *line); -static void refreshLine(struct linenoiseState *l); - -/* Debugging macro. */ -#if 0 -FILE *lndebug_fp = NULL; -#define lndebug(...) \ - do { \ - if (lndebug_fp == NULL) { \ - lndebug_fp = fopen("/tmp/lndebug.txt","a"); \ - fprintf(lndebug_fp, \ - "[%d %d %d] p: %d, rows: %d, rpos: %d, max: %d, oldmax: %d\n", \ - (int)l->len,(int)l->pos,(int)l->oldpos,plen,rows,rpos, \ - (int)l->maxrows,old_rows); \ - } \ - fprintf(lndebug_fp, ", " __VA_ARGS__); \ - fflush(lndebug_fp); \ - } while (0) -#else -#define lndebug(fmt, ...) -#endif - -/* ======================= Low level terminal handling ====================== */ - -/* Set if to use or not the multi line mode. */ -void linenoiseSetMultiLine(int ml) { - mlmode = ml; -} - -/* Return true if the terminal name is in the list of terminals we know are - * not able to understand basic escape sequences. */ -static int isUnsupportedTerm(void) { - char *term = getenv("TERM"); - int j; - - if (term == NULL) return 0; - for (j = 0; unsupported_term[j]; j++) - if (!strcasecmp(term,unsupported_term[j])) return 1; - return 0; -} - -/* Raw mode: 1960 magic shit. */ -static int enableRawMode(int fd) { - struct termios raw; - - if (!isatty(STDIN_FILENO)) goto fatal; - if (!atexit_registered) { - atexit(linenoiseAtExit); - atexit_registered = 1; - } - if (tcgetattr(fd,&orig_termios) == -1) goto fatal; - - raw = orig_termios; /* modify the original mode */ - /* input modes: no break, no CR to NL, no parity check, no strip char, - * no start/stop output control. */ - raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); - /* output modes - disable post processing */ - raw.c_oflag &= ~(OPOST); - /* control modes - set 8 bit chars */ - raw.c_cflag |= (CS8); - /* local modes - choing off, canonical off, no extended functions, - * no signal chars (^Z,^C) */ - raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); - /* control chars - set return condition: min number of bytes and timer. - * We want read to return every single byte, without timeout. */ - raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */ - - /* put terminal in raw mode after flushing */ - if (tcsetattr(fd,TCSAFLUSH,&raw) < 0) goto fatal; - rawmode = 1; - return 0; - -fatal: - errno = ENOTTY; - return -1; -} - -static void disableRawMode(int fd) { - /* Don't even check the return value as it's too late. */ - if (rawmode && tcsetattr(fd,TCSAFLUSH,&orig_termios) != -1) - rawmode = 0; -} - -/* Use the ESC [6n escape sequence to query the horizontal cursor position - * and return it. On error -1 is returned, on success the position of the - * cursor. */ -static int getCursorPosition(int ifd, int ofd) { - char buf[32]; - int cols, rows; - unsigned int i = 0; - - /* Report cursor location */ - if (write(ofd, "\x1b[6n", 4) != 4) return -1; - - /* Read the response: ESC [ rows ; cols R */ - while (i < sizeof(buf)-1) { - if (read(ifd,buf+i,1) != 1) break; - if (buf[i] == 'R') break; - i++; - } - buf[i] = '\0'; - - /* Parse it. */ - if (buf[0] != ESC || buf[1] != '[') return -1; - if (sscanf(buf+2,"%d;%d",&rows,&cols) != 2) return -1; - return cols; -} - -/* Try to get the number of columns in the current terminal, or assume 80 - * if it fails. */ -static int getColumns(int ifd, int ofd) { - struct winsize ws; - - if (ioctl(1, TIOCGWINSZ, &ws) == -1 || ws.ws_col == 0) { - /* ioctl() failed. Try to query the terminal itself. */ - int start, cols; - - /* Get the initial position so we can restore it later. */ - start = getCursorPosition(ifd,ofd); - if (start == -1) goto failed; - - /* Go to right margin and get position. */ - if (write(ofd,"\x1b[999C",6) != 6) goto failed; - cols = getCursorPosition(ifd,ofd); - if (cols == -1) goto failed; - - /* Restore position. */ - if (cols > start) { - char seq[32]; - snprintf(seq,32,"\x1b[%dD",cols-start); - if (write(ofd,seq,strlen(seq)) == -1) { - /* Can't recover... */ - } - } - return cols; - } else { - return ws.ws_col; - } - -failed: - return 80; -} - -/* Clear the screen. Used to handle ctrl+l */ -void linenoiseClearScreen(void) { - if (write(STDOUT_FILENO,"\x1b[H\x1b[2J",7) <= 0) { - /* nothing to do, just to avoid warning. */ - } -} - -/* Beep, used for completion when there is nothing to complete or when all - * the choices were already shown. */ -static void linenoiseBeep(void) { - fprintf(stderr, "\x7"); - fflush(stderr); -} - -/* ============================== Completion ================================ */ - -/* Free a list of completion option populated by linenoiseAddCompletion(). */ -static void freeCompletions(linenoiseCompletions *lc) { - size_t i; - for (i = 0; i < lc->len; i++) - free(lc->cvec[i]); - if (lc->cvec != NULL) - free(lc->cvec); -} - -/* This is an helper function for linenoiseEdit() and is called when the - * user types the <tab> key in order to complete the string currently in the - * input. - * - * The state of the editing is encapsulated into the pointed linenoiseState - * structure as described in the structure definition. */ -static int completeLine(struct linenoiseState *ls) { - linenoiseCompletions lc = { 0, NULL }; - int nread, nwritten; - char c = 0; - - completionCallback(ls->buf,&lc); - if (lc.len == 0) { - linenoiseBeep(); - } else { - size_t stop = 0, i = 0; - - while(!stop) { - /* Show completion or original buffer */ - if (i < lc.len) { - struct linenoiseState saved = *ls; - - ls->len = ls->pos = strlen(lc.cvec[i]); - ls->buf = lc.cvec[i]; - refreshLine(ls); - ls->len = saved.len; - ls->pos = saved.pos; - ls->buf = saved.buf; - } else { - refreshLine(ls); - } - - nread = read(ls->ifd,&c,1); - if (nread <= 0) { - freeCompletions(&lc); - return -1; - } - - switch(c) { - case 9: /* tab */ - i = (i+1) % (lc.len+1); - if (i == lc.len) linenoiseBeep(); - break; - case 27: /* escape */ - /* Re-show original buffer */ - if (i < lc.len) refreshLine(ls); - stop = 1; - break; - default: - /* Update buffer and return */ - if (i < lc.len) { - nwritten = snprintf(ls->buf,ls->buflen,"%s",lc.cvec[i]); - ls->len = ls->pos = nwritten; - } - stop = 1; - break; - } - } - } - - freeCompletions(&lc); - return c; /* Return last read character */ -} - -/* Register a callback function to be called for tab-completion. */ -void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) { - completionCallback = fn; -} - -/* Register a hits function to be called to show hits to the user at the - * right of the prompt. */ -void linenoiseSetHintsCallback(linenoiseHintsCallback *fn) { - hintsCallback = fn; -} - -/* Register a function to free the hints returned by the hints callback - * registered with linenoiseSetHintsCallback(). */ -void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *fn) { - freeHintsCallback = fn; -} - -/* This function is used by the callback function registered by the user - * in order to add completion options given the input string when the - * user typed <tab>. See the example.c source code for a very easy to - * understand example. */ -void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) { - size_t len = strlen(str); - char *copy, **cvec; - - copy = malloc(len+1); - if (copy == NULL) return; - memcpy(copy,str,len+1); - cvec = realloc(lc->cvec,sizeof(char*)*(lc->len+1)); - if (cvec == NULL) { - free(copy); - return; - } - lc->cvec = cvec; - lc->cvec[lc->len++] = copy; -} - -/* =========================== Line editing ================================= */ - -/* We define a very simple "append buffer" structure, that is an heap - * allocated string where we can append to. This is useful in order to - * write all the escape sequences in a buffer and flush them to the standard - * output in a single call, to avoid flickering effects. */ -struct abuf { - char *b; - int len; -}; - -static void abInit(struct abuf *ab) { - ab->b = NULL; - ab->len = 0; -} - -static void abAppend(struct abuf *ab, const char *s, int len) { - char *new = realloc(ab->b,ab->len+len); - - if (new == NULL) return; - memcpy(new+ab->len,s,len); - ab->b = new; - ab->len += len; -} - -static void abFree(struct abuf *ab) { - free(ab->b); -} - -/* Helper of refreshSingleLine() and refreshMultiLine() to show hints - * to the right of the prompt. */ -void refreshShowHints(struct abuf *ab, struct linenoiseState *l, int plen) { - char seq[64]; - if (hintsCallback && plen+l->len < l->cols) { - int color = -1, bold = 0; - char *hint = hintsCallback(l->buf,&color,&bold); - if (hint) { - int hintlen = strlen(hint); - int hintmaxlen = l->cols-(plen+l->len); - if (hintlen > hintmaxlen) hintlen = hintmaxlen; - if (bold == 1 && color == -1) color = 37; - if (color != -1 || bold != 0) - snprintf(seq,64,"\033[%d;%d;49m",bold,color); - abAppend(ab,seq,strlen(seq)); - abAppend(ab,hint,hintlen); - if (color != -1 || bold != 0) - abAppend(ab,"\033[0m",4); - /* Call the function to free the hint returned. */ - if (freeHintsCallback) freeHintsCallback(hint); - } - } -} - -/* Single line low level line refresh. - * - * Rewrite the currently edited line accordingly to the buffer content, - * cursor position, and number of columns of the terminal. */ -static void refreshSingleLine(struct linenoiseState *l) { - char seq[64]; - size_t plen = strlen(l->prompt); - int fd = l->ofd; - char *buf = l->buf; - size_t len = l->len; - size_t pos = l->pos; - struct abuf ab; - - while((plen+pos) >= l->cols) { - buf++; - len--; - pos--; - } - while (plen+len > l->cols) { - len--; - } - - abInit(&ab); - /* Cursor to left edge */ - snprintf(seq,64,"\r"); - abAppend(&ab,seq,strlen(seq)); - /* Write the prompt and the current buffer content */ - abAppend(&ab,l->prompt,strlen(l->prompt)); - abAppend(&ab,buf,len); - /* Show hits if any. */ - refreshShowHints(&ab,l,plen); - /* Erase to right */ - snprintf(seq,64,"\x1b[0K"); - abAppend(&ab,seq,strlen(seq)); - /* Move cursor to original position. */ - snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen)); - abAppend(&ab,seq,strlen(seq)); - if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ - abFree(&ab); -} - -/* Multi line low level line refresh. - * - * Rewrite the currently edited line accordingly to the buffer content, - * cursor position, and number of columns of the terminal. */ -static void refreshMultiLine(struct linenoiseState *l) { - char seq[64]; - int plen = strlen(l->prompt); - int rows = (plen+l->len+l->cols-1)/l->cols; /* rows used by current buf. */ - int rpos = (plen+l->oldpos+l->cols)/l->cols; /* cursor relative row. */ - int rpos2; /* rpos after refresh. */ - int col; /* colum position, zero-based. */ - int old_rows = l->maxrows; - int fd = l->ofd, j; - struct abuf ab; - - /* Update maxrows if needed. */ - if (rows > (int)l->maxrows) l->maxrows = rows; - - /* First step: clear all the lines used before. To do so start by - * going to the last row. */ - abInit(&ab); - if (old_rows-rpos > 0) { - lndebug("go down %d", old_rows-rpos); - snprintf(seq,64,"\x1b[%dB", old_rows-rpos); - abAppend(&ab,seq,strlen(seq)); - } - - /* Now for every row clear it, go up. */ - for (j = 0; j < old_rows-1; j++) { - lndebug("clear+up"); - snprintf(seq,64,"\r\x1b[0K\x1b[1A"); - abAppend(&ab,seq,strlen(seq)); - } - - /* Clean the top line. */ - lndebug("clear"); - snprintf(seq,64,"\r\x1b[0K"); - abAppend(&ab,seq,strlen(seq)); - - /* Write the prompt and the current buffer content */ - abAppend(&ab,l->prompt,strlen(l->prompt)); - abAppend(&ab,l->buf,l->len); - - /* Show hits if any. */ - refreshShowHints(&ab,l,plen); - - /* If we are at the very end of the screen with our prompt, we need to - * emit a newline and move the prompt to the first column. */ - if (l->pos && - l->pos == l->len && - (l->pos+plen) % l->cols == 0) - { - lndebug("<newline>"); - abAppend(&ab,"\n",1); - snprintf(seq,64,"\r"); - abAppend(&ab,seq,strlen(seq)); - rows++; - if (rows > (int)l->maxrows) l->maxrows = rows; - } - - /* Move cursor to right position. */ - rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */ - lndebug("rpos2 %d", rpos2); - - /* Go up till we reach the expected positon. */ - if (rows-rpos2 > 0) { - lndebug("go-up %d", rows-rpos2); - snprintf(seq,64,"\x1b[%dA", rows-rpos2); - abAppend(&ab,seq,strlen(seq)); - } - - /* Set column. */ - col = (plen+(int)l->pos) % (int)l->cols; - lndebug("set col %d", 1+col); - if (col) - snprintf(seq,64,"\r\x1b[%dC", col); - else - snprintf(seq,64,"\r"); - abAppend(&ab,seq,strlen(seq)); - - lndebug("\n"); - l->oldpos = l->pos; - - if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ - abFree(&ab); -} - -/* Calls the two low level functions refreshSingleLine() or - * refreshMultiLine() according to the selected mode. */ -static void refreshLine(struct linenoiseState *l) { - if (mlmode) - refreshMultiLine(l); - else - refreshSingleLine(l); -} - -/* Insert the character 'c' at cursor current position. - * - * On error writing to the terminal -1 is returned, otherwise 0. */ -int linenoiseEditInsert(struct linenoiseState *l, char c) { - if (l->len < l->buflen) { - if (l->len == l->pos) { - l->buf[l->pos] = c; - l->pos++; - l->len++; - l->buf[l->len] = '\0'; - if ((!mlmode && l->plen+l->len < l->cols && !hintsCallback)) { - /* Avoid a full update of the line in the - * trivial case. */ - if (write(l->ofd,&c,1) == -1) return -1; - } else { - refreshLine(l); - } - } else { - memmove(l->buf+l->pos+1,l->buf+l->pos,l->len-l->pos); - l->buf[l->pos] = c; - l->len++; - l->pos++; - l->buf[l->len] = '\0'; - refreshLine(l); - } - } - return 0; -} - -/* Move cursor on the left. */ -void linenoiseEditMoveLeft(struct linenoiseState *l) { - if (l->pos > 0) { - l->pos--; - refreshLine(l); - } -} - -/* Move cursor on the right. */ -void linenoiseEditMoveRight(struct linenoiseState *l) { - if (l->pos != l->len) { - l->pos++; - refreshLine(l); - } -} - -/* Move cursor to the start of the line. */ -void linenoiseEditMoveHome(struct linenoiseState *l) { - if (l->pos != 0) { - l->pos = 0; - refreshLine(l); - } -} - -/* Move cursor to the end of the line. */ -void linenoiseEditMoveEnd(struct linenoiseState *l) { - if (l->pos != l->len) { - l->pos = l->len; - refreshLine(l); - } -} - -/* Substitute the currently edited line with the next or previous history - * entry as specified by 'dir'. */ -#define LINENOISE_HISTORY_NEXT 0 -#define LINENOISE_HISTORY_PREV 1 -void linenoiseEditHistoryNext(struct linenoiseState *l, int dir) { - if (history_len > 1) { - /* Update the current history entry before to - * overwrite it with the next one. */ - free(history[history_len - 1 - l->history_index]); - history[history_len - 1 - l->history_index] = strdup(l->buf); - /* Show the new entry */ - l->history_index += (dir == LINENOISE_HISTORY_PREV) ? 1 : -1; - if (l->history_index < 0) { - l->history_index = 0; - return; - } else if (l->history_index >= history_len) { - l->history_index = history_len-1; - return; - } - strncpy(l->buf,history[history_len - 1 - l->history_index],l->buflen); - l->buf[l->buflen-1] = '\0'; - l->len = l->pos = strlen(l->buf); - refreshLine(l); - } -} - -/* Delete the character at the right of the cursor without altering the cursor - * position. Basically this is what happens with the "Delete" keyboard key. */ -void linenoiseEditDelete(struct linenoiseState *l) { - if (l->len > 0 && l->pos < l->len) { - memmove(l->buf+l->pos,l->buf+l->pos+1,l->len-l->pos-1); - l->len--; - l->buf[l->len] = '\0'; - refreshLine(l); - } -} - -/* Backspace implementation. */ -void linenoiseEditBackspace(struct linenoiseState *l) { - if (l->pos > 0 && l->len > 0) { - memmove(l->buf+l->pos-1,l->buf+l->pos,l->len-l->pos); - l->pos--; - l->len--; - l->buf[l->len] = '\0'; - refreshLine(l); - } -} - -/* Delete the previosu word, maintaining the cursor at the start of the - * current word. */ -void linenoiseEditDeletePrevWord(struct linenoiseState *l) { - size_t old_pos = l->pos; - size_t diff; - - while (l->pos > 0 && l->buf[l->pos-1] == ' ') - l->pos--; - while (l->pos > 0 && l->buf[l->pos-1] != ' ') - l->pos--; - diff = old_pos - l->pos; - memmove(l->buf+l->pos,l->buf+old_pos,l->len-old_pos+1); - l->len -= diff; - refreshLine(l); -} - -/* This function is the core of the line editing capability of linenoise. - * It expects 'fd' to be already in "raw mode" so that every key pressed - * will be returned ASAP to read(). - * - * The resulting string is put into 'buf' when the user type enter, or - * when ctrl+d is typed. - * - * The function returns the length of the current buffer. */ -static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, const char *prompt) -{ - struct linenoiseState l; - - /* Populate the linenoise state that we pass to functions implementing - * specific editing functionalities. */ - l.ifd = stdin_fd; - l.ofd = stdout_fd; - l.buf = buf; - l.buflen = buflen; - l.prompt = prompt; - l.plen = strlen(prompt); - l.oldpos = l.pos = 0; - l.len = 0; - l.cols = getColumns(stdin_fd, stdout_fd); - l.maxrows = 0; - l.history_index = 0; - - /* Buffer starts empty. */ - l.buf[0] = '\0'; - l.buflen--; /* Make sure there is always space for the nulterm */ - - /* The latest history entry is always our current buffer, that - * initially is just an empty string. */ - linenoiseHistoryAdd(""); - - if (write(l.ofd,prompt,l.plen) == -1) return -1; - while(1) { - char c; - int nread; - char seq[3]; - - nread = read(l.ifd,&c,1); - if (nread <= 0) return l.len; - - /* Only autocomplete when the callback is set. It returns < 0 when - * there was an error reading from fd. Otherwise it will return the - * character that should be handled next. */ - if (c == 9 && completionCallback != NULL) { - c = completeLine(&l); - /* Return on errors */ - if (c < 0) return l.len; - /* Read next character when 0 */ - if (c == 0) continue; - } - - switch(c) { - case ENTER: /* enter */ - history_len--; - free(history[history_len]); - if (mlmode) linenoiseEditMoveEnd(&l); - if (hintsCallback) { - /* Force a refresh without hints to leave the previous - * line as the user typed it after a newline. */ - linenoiseHintsCallback *hc = hintsCallback; - hintsCallback = NULL; - refreshLine(&l); - hintsCallback = hc; - } - return (int)l.len; - case CTRL_C: /* ctrl-c */ - errno = EAGAIN; - return -1; - case BACKSPACE: /* backspace */ - case 8: /* ctrl-h */ - linenoiseEditBackspace(&l); - break; - case CTRL_D: /* ctrl-d, remove char at right of cursor, or if the - line is empty, act as end-of-file. */ - if (l.len > 0) { - linenoiseEditDelete(&l); - } else { - history_len--; - free(history[history_len]); - return -1; - } - break; - case CTRL_T: /* ctrl-t, swaps current character with previous. */ - if (l.pos > 0 && l.pos < l.len) { - int aux = buf[l.pos-1]; - buf[l.pos-1] = buf[l.pos]; - buf[l.pos] = aux; - if (l.pos != l.len-1) l.pos++; - refreshLine(&l); - } - break; - case CTRL_B: /* ctrl-b */ - linenoiseEditMoveLeft(&l); - break; - case CTRL_F: /* ctrl-f */ - linenoiseEditMoveRight(&l); - break; - case CTRL_P: /* ctrl-p */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); - break; - case CTRL_N: /* ctrl-n */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); - break; - case ESC: /* escape sequence */ - /* Read the next two bytes representing the escape sequence. - * Use two calls to handle slow terminals returning the two - * chars at different times. */ - if (read(l.ifd,seq,1) == -1) break; - if (read(l.ifd,seq+1,1) == -1) break; - - /* ESC [ sequences. */ - if (seq[0] == '[') { - if (seq[1] >= '0' && seq[1] <= '9') { - /* Extended escape, read additional byte. */ - if (read(l.ifd,seq+2,1) == -1) break; - if (seq[2] == '~') { - switch(seq[1]) { - case '3': /* Delete key. */ - linenoiseEditDelete(&l); - break; - } - } - } else { - switch(seq[1]) { - case 'A': /* Up */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); - break; - case 'B': /* Down */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); - break; - case 'C': /* Right */ - linenoiseEditMoveRight(&l); - break; - case 'D': /* Left */ - linenoiseEditMoveLeft(&l); - break; - case 'H': /* Home */ - linenoiseEditMoveHome(&l); - break; - case 'F': /* End*/ - linenoiseEditMoveEnd(&l); - break; - } - } - } - - /* ESC O sequences. */ - else if (seq[0] == 'O') { - switch(seq[1]) { - case 'H': /* Home */ - linenoiseEditMoveHome(&l); - break; - case 'F': /* End*/ - linenoiseEditMoveEnd(&l); - break; - } - } - break; - default: - if (linenoiseEditInsert(&l,c)) return -1; - break; - case CTRL_U: /* Ctrl+u, delete the whole line. */ - buf[0] = '\0'; - l.pos = l.len = 0; - refreshLine(&l); - break; - case CTRL_K: /* Ctrl+k, delete from current to end of line. */ - buf[l.pos] = '\0'; - l.len = l.pos; - refreshLine(&l); - break; - case CTRL_A: /* Ctrl+a, go to the start of the line */ - linenoiseEditMoveHome(&l); - break; - case CTRL_E: /* ctrl+e, go to the end of the line */ - linenoiseEditMoveEnd(&l); - break; - case CTRL_L: /* ctrl+l, clear screen */ - linenoiseClearScreen(); - refreshLine(&l); - break; - case CTRL_W: /* ctrl+w, delete previous word */ - linenoiseEditDeletePrevWord(&l); - break; - } - } - return l.len; -} - -/* This special mode is used by linenoise in order to print scan codes - * on screen for debugging / development purposes. It is implemented - * by the linenoise_example program using the --keycodes option. */ -void linenoisePrintKeyCodes(void) { - char quit[4]; - - printf("Linenoise key codes debugging mode.\n" - "Press keys to see scan codes. Type 'quit' at any time to exit.\n"); - if (enableRawMode(STDIN_FILENO) == -1) return; - memset(quit,' ',4); - while(1) { - char c; - int nread; - - nread = read(STDIN_FILENO,&c,1); - if (nread <= 0) continue; - memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */ - quit[sizeof(quit)-1] = c; /* Insert current char on the right. */ - if (memcmp(quit,"quit",sizeof(quit)) == 0) break; - - printf("'%c' %02x (%d) (type quit to exit)\n", - isprint(c) ? c : '?', (int)c, (int)c); - printf("\r"); /* Go left edge manually, we are in raw mode. */ - fflush(stdout); - } - disableRawMode(STDIN_FILENO); -} - -/* This function calls the line editing function linenoiseEdit() using - * the STDIN file descriptor set in raw mode. */ -static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) { - int count; - - if (buflen == 0) { - errno = EINVAL; - return -1; - } - - if (enableRawMode(STDIN_FILENO) == -1) return -1; - count = linenoiseEdit(STDIN_FILENO, STDOUT_FILENO, buf, buflen, prompt); - disableRawMode(STDIN_FILENO); - printf("\n"); - return count; -} - -/* This function is called when linenoise() is called with the standard - * input file descriptor not attached to a TTY. So for example when the - * program using linenoise is called in pipe or with a file redirected - * to its standard input. In this case, we want to be able to return the - * line regardless of its length (by default we are limited to 4k). */ -static char *linenoiseNoTTY(void) { - char *line = NULL; - size_t len = 0, maxlen = 0; - - while(1) { - if (len == maxlen) { - if (maxlen == 0) maxlen = 16; - maxlen *= 2; - char *oldval = line; - line = realloc(line,maxlen); - if (line == NULL) { - if (oldval) free(oldval); - return NULL; - } - } - int c = fgetc(stdin); - if (c == EOF || c == '\n') { - if (c == EOF && len == 0) { - free(line); - return NULL; - } else { - line[len] = '\0'; - return line; - } - } else { - line[len] = c; - len++; - } - } -} - -/* The high level function that is the main API of the linenoise library. - * This function checks if the terminal has basic capabilities, just checking - * for a blacklist of stupid terminals, and later either calls the line - * editing function or uses dummy fgets() so that you will be able to type - * something even in the most desperate of the conditions. */ -char *linenoise(const char *prompt) { - char buf[LINENOISE_MAX_LINE]; - int count; - - if (!isatty(STDIN_FILENO)) { - /* Not a tty: read from file / pipe. In this mode we don't want any - * limit to the line size, so we call a function to handle that. */ - return linenoiseNoTTY(); - } else if (isUnsupportedTerm()) { - size_t len; - - printf("%s",prompt); - fflush(stdout); - if (fgets(buf,LINENOISE_MAX_LINE,stdin) == NULL) return NULL; - len = strlen(buf); - while(len && (buf[len-1] == '\n' || buf[len-1] == '\r')) { - len--; - buf[len] = '\0'; - } - return strdup(buf); - } else { - count = linenoiseRaw(buf,LINENOISE_MAX_LINE,prompt); - if (count == -1) return NULL; - return strdup(buf); - } -} - -/* This is just a wrapper the user may want to call in order to make sure - * the linenoise returned buffer is freed with the same allocator it was - * created with. Useful when the main program is using an alternative - * allocator. */ -void linenoiseFree(void *ptr) { - free(ptr); -} - -/* ================================ History ================================= */ - -/* Free the history, but does not reset it. Only used when we have to - * exit() to avoid memory leaks are reported by valgrind & co. */ -static void freeHistory(void) { - if (history) { - int j; - - for (j = 0; j < history_len; j++) - free(history[j]); - free(history); - } -} - -/* At exit we'll try to fix the terminal to the initial conditions. */ -static void linenoiseAtExit(void) { - disableRawMode(STDIN_FILENO); - freeHistory(); -} - -/* This is the API call to add a new entry in the linenoise history. - * It uses a fixed array of char pointers that are shifted (memmoved) - * when the history max length is reached in order to remove the older - * entry and make room for the new one, so it is not exactly suitable for huge - * histories, but will work well for a few hundred of entries. - * - * Using a circular buffer is smarter, but a bit more complex to handle. */ -int linenoiseHistoryAdd(const char *line) { - char *linecopy; - - if (history_max_len == 0) return 0; - - /* Initialization on first call. */ - if (history == NULL) { - history = malloc(sizeof(char*)*history_max_len); - if (history == NULL) return 0; - memset(history,0,(sizeof(char*)*history_max_len)); - } - - /* Don't add duplicated lines. */ - if (history_len && !strcmp(history[history_len-1], line)) return 0; - - /* Add an heap allocated copy of the line in the history. - * If we reached the max length, remove the older line. */ - linecopy = strdup(line); - if (!linecopy) return 0; - if (history_len == history_max_len) { - free(history[0]); - memmove(history,history+1,sizeof(char*)*(history_max_len-1)); - history_len--; - } - history[history_len] = linecopy; - history_len++; - return 1; -} - -/* Set the maximum length for the history. This function can be called even - * if there is already some history, the function will make sure to retain - * just the latest 'len' elements if the new history length value is smaller - * than the amount of items already inside the history. */ -int linenoiseHistorySetMaxLen(int len) { - char **new; - - if (len < 1) return 0; - if (history) { - int tocopy = history_len; - - new = malloc(sizeof(char*)*len); - if (new == NULL) return 0; - - /* If we can't copy everything, free the elements we'll not use. */ - if (len < tocopy) { - int j; - - for (j = 0; j < tocopy-len; j++) free(history[j]); - tocopy = len; - } - memset(new,0,sizeof(char*)*len); - memcpy(new,history+(history_len-tocopy), sizeof(char*)*tocopy); - free(history); - history = new; - } - history_max_len = len; - if (history_len > history_max_len) - history_len = history_max_len; - return 1; -} - -/* Save the history in the specified file. On success 0 is returned - * otherwise -1 is returned. */ -int linenoiseHistorySave(const char *filename) { - mode_t old_umask = umask(S_IXUSR|S_IRWXG|S_IRWXO); - FILE *fp; - int j; - - fp = fopen(filename,"w"); - umask(old_umask); - if (fp == NULL) return -1; - chmod(filename,S_IRUSR|S_IWUSR); - for (j = 0; j < history_len; j++) - fprintf(fp,"%s\n",history[j]); - fclose(fp); - return 0; -} - -/* Load the history from the specified file. If the file does not exist - * zero is returned and no operation is performed. - * - * If the file exists and the operation succeeded 0 is returned, otherwise - * on error -1 is returned. */ -int linenoiseHistoryLoad(const char *filename) { - FILE *fp = fopen(filename,"r"); - char buf[LINENOISE_MAX_LINE]; - - if (fp == NULL) return -1; - - while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) { - char *p; - - p = strchr(buf,'\r'); - if (!p) p = strchr(buf,'\n'); - if (p) *p = '\0'; - linenoiseHistoryAdd(buf); - } - fclose(fp); - return 0; -} diff --git a/src/linenoise/linenoise.cpp b/src/linenoise/linenoise.cpp new file mode 100644 index 000000000000..c57505d2fa97 --- /dev/null +++ b/src/linenoise/linenoise.cpp @@ -0,0 +1,3450 @@ +/* linenoise.c -- guerrilla line editing library against the idea that a + * line editing lib needs to be 20,000 lines of C code. + * + * Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com> + * Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com> + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * line editing lib needs to be 20,000 lines of C code. + * + * You can find the latest source code at: + * + * http://github.com/antirez/linenoise + * + * Does a number of crazy assumptions that happen to be true in 99.9999% of + * the 2010 UNIX computers around. + * + * References: + * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html + * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html + * + * Todo list: + * - Switch to gets() if $TERM is something we can't support. + * - Filter bogus Ctrl+<char> combinations. + * - Win32 support + * + * Bloat: + * - Completion? + * - History search like Ctrl+r in readline? + * + * List of escape sequences used by this program, we do everything just + * with three sequences. In order to be so cheap we may have some + * flickering effect with some slow terminal, but the lesser sequences + * the more compatible. + * + * CHA (Cursor Horizontal Absolute) + * Sequence: ESC [ n G + * Effect: moves cursor to column n (1 based) + * + * EL (Erase Line) + * Sequence: ESC [ n K + * Effect: if n is 0 or missing, clear from cursor to end of line + * Effect: if n is 1, clear from beginning of line to cursor + * Effect: if n is 2, clear entire line + * + * CUF (Cursor Forward) + * Sequence: ESC [ n C + * Effect: moves cursor forward of n chars + * + * The following are used to clear the screen: ESC [ H ESC [ 2 J + * This is actually composed of two sequences: + * + * cursorhome + * Sequence: ESC [ H + * Effect: moves the cursor to upper left corner + * + * ED2 (Clear entire screen) + * Sequence: ESC [ 2 J + * Effect: clear the whole screen + * + */ + +#ifdef _WIN32 + +#include <conio.h> +#include <windows.h> +#include <io.h> + +#if defined(_MSC_VER) && _MSC_VER < 1900 +#define snprintf _snprintf // Microsoft headers use underscores in some names +#endif + +#if !defined GNUC +#define strcasecmp _stricmp +#endif + +#define strdup _strdup +#define isatty _isatty +#define write _write +#define STDIN_FILENO 0 + +#else /* _WIN32 */ + +#include <signal.h> +#include <termios.h> +#include <unistd.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/ioctl.h> +#include <cctype> +#include <wctype.h> + +#endif /* _WIN32 */ + +#include <stdio.h> +#include <errno.h> +#include <fcntl.h> + +#include "linenoise.h" +#include "ConvertUTF.h" + +#include <string> +#include <vector> +#include <memory> + +using std::string; +using std::vector; +using std::unique_ptr; +using namespace linenoise_ng; + +typedef unsigned char char8_t; + +static ConversionResult copyString8to32(char32_t* dst, size_t dstSize, + size_t& dstCount, const char* src) { + const UTF8* sourceStart = reinterpret_cast<const UTF8*>(src); + const UTF8* sourceEnd = sourceStart + strlen(src); + UTF32* targetStart = reinterpret_cast<UTF32*>(dst); + UTF32* targetEnd = targetStart + dstSize; + + ConversionResult res = ConvertUTF8toUTF32( + &sourceStart, sourceEnd, &targetStart, targetEnd, lenientConversion); + + if (res == conversionOK) { + dstCount = targetStart - reinterpret_cast<UTF32*>(dst); + + if (dstCount < dstSize) { + *targetStart = 0; + } + } + + return res; +} + +static ConversionResult copyString8to32(char32_t* dst, size_t dstSize, + size_t& dstCount, const char8_t* src) { + return copyString8to32(dst, dstSize, dstCount, + reinterpret_cast<const char*>(src)); +} + +static size_t strlen32(const char32_t* str) { + const char32_t* ptr = str; + + while (*ptr) { + ++ptr; + } + + return ptr - str; +} + +static size_t strlen8(const char8_t* str) { + return strlen(reinterpret_cast<const char*>(str)); +} + +static char8_t* strdup8(const char* src) { + return reinterpret_cast<char8_t*>(strdup(src)); +} + +#ifdef _WIN32 +static const int FOREGROUND_WHITE = + FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE; +static const int BACKGROUND_WHITE = + BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE; +static const int INTENSITY = FOREGROUND_INTENSITY | BACKGROUND_INTENSITY; + +class WinAttributes { + public: + WinAttributes() { + CONSOLE_SCREEN_BUFFER_INFO info; + GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &info); + _defaultAttribute = info.wAttributes & INTENSITY; + _defaultColor = info.wAttributes & FOREGROUND_WHITE; + _defaultBackground = info.wAttributes & BACKGROUND_WHITE; + + _consoleAttribute = _defaultAttribute; + _consoleColor = _defaultColor | _defaultBackground; + } + + public: + int _defaultAttribute; + int _defaultColor; + int _defaultBackground; + + int _consoleAttribute; + int _consoleColor; +}; + +static WinAttributes WIN_ATTR; + +static void copyString32to16(char16_t* dst, size_t dstSize, size_t* dstCount, + const char32_t* src, size_t srcSize) { + const UTF32* sourceStart = reinterpret_cast<const UTF32*>(src); + const UTF32* sourceEnd = sourceStart + srcSize; + char16_t* targetStart = reinterpret_cast<char16_t*>(dst); + char16_t* targetEnd = targetStart + dstSize; + + ConversionResult res = ConvertUTF32toUTF16( + &sourceStart, sourceEnd, &targetStart, targetEnd, lenientConversion); + + if (res == conversionOK) { + *dstCount = targetStart - reinterpret_cast<char16_t*>(dst); + + if (*dstCount < dstSize) { + *targetStart = 0; + } + } +} +#endif + +static void copyString32to8(char* dst, size_t dstSize, size_t* dstCount, + const char32_t* src, size_t srcSize) { + const UTF32* sourceStart = reinterpret_cast<const UTF32*>(src); + const UTF32* sourceEnd = sourceStart + srcSize; + UTF8* targetStart = reinterpret_cast<UTF8*>(dst); + UTF8* targetEnd = targetStart + dstSize; + + ConversionResult res = ConvertUTF32toUTF8( + &sourceStart, sourceEnd, &targetStart, targetEnd, lenientConversion); + + if (res == conversionOK) { + *dstCount = targetStart - reinterpret_cast<UTF8*>(dst); + + if (*dstCount < dstSize) { + *targetStart = 0; + } + } +} + +static void copyString32to8(char* dst, size_t dstLen, const char32_t* src) { + size_t dstCount = 0; + copyString32to8(dst, dstLen, &dstCount, src, strlen32(src)); +} + +static void copyString32(char32_t* dst, const char32_t* src, size_t len) { + while (0 < len && *src) { + *dst++ = *src++; + --len; + } + + *dst = 0; +} + +static int strncmp32(const char32_t* left, const char32_t* right, size_t len) { + while (0 < len && *left) { + if (*left != *right) { + return *left - *right; + } + + ++left; + ++right; + --len; + } + + return 0; +} + +#ifdef _WIN32 +#include <iostream> + +static size_t OutputWin(char16_t* text16, char32_t* text32, size_t len32) { + size_t count16 = 0; + + copyString32to16(text16, len32, &count16, text32, len32); + WriteConsoleW(GetStdHandle(STD_OUTPUT_HANDLE), text16, + static_cast<DWORD>(count16), nullptr, nullptr); + + return count16; +} + +static char32_t* HandleEsc(char32_t* p, char32_t* end) { + if (*p == '[') { + int code = 0; + + for (++p; p < end; ++p) { + char32_t c = *p; + + if ('0' <= c && c <= '9') { + code = code * 10 + (c - '0'); + } else if (c == 'm' || c == ';') { + switch (code) { + case 0: + WIN_ATTR._consoleAttribute = WIN_ATTR._defaultAttribute; + WIN_ATTR._consoleColor = + WIN_ATTR._defaultColor | WIN_ATTR._defaultBackground; + break; + + case 1: // BOLD + case 5: // BLINK + WIN_ATTR._consoleAttribute = + (WIN_ATTR._defaultAttribute ^ FOREGROUND_INTENSITY) & INTENSITY; + break; + + case 30: + WIN_ATTR._consoleColor = BACKGROUND_WHITE; + break; + + case 31: + WIN_ATTR._consoleColor = + FOREGROUND_RED | WIN_ATTR._defaultBackground; + break; + + case 32: + WIN_ATTR._consoleColor = + FOREGROUND_GREEN | WIN_ATTR._defaultBackground; + break; + + case 33: + WIN_ATTR._consoleColor = + FOREGROUND_RED | FOREGROUND_GREEN | WIN_ATTR._defaultBackground; + break; + + case 34: + WIN_ATTR._consoleColor = + FOREGROUND_BLUE | WIN_ATTR._defaultBackground; + break; + + case 35: + WIN_ATTR._consoleColor = + FOREGROUND_BLUE | FOREGROUND_RED | WIN_ATTR._defaultBackground; + break; + + case 36: + WIN_ATTR._consoleColor = FOREGROUND_BLUE | FOREGROUND_GREEN | + WIN_ATTR._defaultBackground; + break; + + case 37: + WIN_ATTR._consoleColor = FOREGROUND_GREEN | FOREGROUND_RED | + FOREGROUND_BLUE | + WIN_ATTR._defaultBackground; + break; + } + + code = 0; + } + + if (*p == 'm') { + ++p; + break; + } + } + } else { + ++p; + } + + auto handle = GetStdHandle(STD_OUTPUT_HANDLE); + SetConsoleTextAttribute(handle, + WIN_ATTR._consoleAttribute | WIN_ATTR._consoleColor); + + return p; +} + +static size_t WinWrite32(char16_t* text16, char32_t* text32, size_t len32) { + char32_t* p = text32; + char32_t* q = p; + char32_t* e = text32 + len32; + size_t count16 = 0; + + while (p < e) { + if (*p == 27) { + if (q < p) { + count16 += OutputWin(text16, q, p - q); + } + + q = p = HandleEsc(p + 1, e); + } else { + ++p; + } + } + + if (q < p) { + count16 += OutputWin(text16, q, p - q); + } + + return count16; +} +#endif + +static int write32(int fd, char32_t* text32, int len32) { +#ifdef _WIN32 + if (isatty(fd)) { + size_t len16 = 2 * len32 + 1; + unique_ptr<char16_t[]> text16(new char16_t[len16]); + size_t count16 = WinWrite32(text16.get(), text32, len32); + + return static_cast<int>(count16); + } else { + size_t len8 = 4 * len32 + 1; + unique_ptr<char[]> text8(new char[len8]); + size_t count8 = 0; + + copyString32to8(text8.get(), len8, &count8, text32, len32); + + return write(fd, text8.get(), static_cast<unsigned int>(count8)); + } +#else + size_t len8 = 4 * len32 + 1; + unique_ptr<char[]> text8(new char[len8]); + size_t count8 = 0; + + copyString32to8(text8.get(), len8, &count8, text32, len32); + + return write(fd, text8.get(), count8); +#endif +} + +class Utf32String { + public: + Utf32String() : _length(0), _data(nullptr) { + // note: parens intentional, _data must be properly initialized + _data = new char32_t[1](); + } + + explicit Utf32String(const char* src) : _length(0), _data(nullptr) { + size_t len = strlen(src); + // note: parens intentional, _data must be properly initialized + _data = new char32_t[len + 1](); + copyString8to32(_data, len + 1, _length, src); + } + + explicit Utf32String(const char8_t* src) : _length(0), _data(nullptr) { + size_t len = strlen(reinterpret_cast<const char*>(src)); + // note: parens intentional, _data must be properly initialized + _data = new char32_t[len + 1](); + copyString8to32(_data, len + 1, _length, src); + } + + explicit Utf32String(const char32_t* src) : _length(0), _data(nullptr) { + for (_length = 0; src[_length] != 0; ++_length) { + } + + // note: parens intentional, _data must be properly initialized + _data = new char32_t[_length + 1](); + memcpy(_data, src, _length * sizeof(char32_t)); + } + + explicit Utf32String(const char32_t* src, int len) : _length(len), _data(nullptr) { + // note: parens intentional, _data must be properly initialized + _data = new char32_t[len + 1](); + memcpy(_data, src, len * sizeof(char32_t)); + } + + explicit Utf32String(int len) : _length(0), _data(nullptr) { + // note: parens intentional, _data must be properly initialized + _data = new char32_t[len](); + } + + explicit Utf32String(const Utf32String& that) : _length(that._length), _data(nullptr) { + // note: parens intentional, _data must be properly initialized + _data = new char32_t[_length + 1](); + memcpy(_data, that._data, sizeof(char32_t) * _length); + } + + Utf32String& operator=(const Utf32String& that) { + if (this != &that) { + delete[] _data; + _data = new char32_t[that._length](); + _length = that._length; + memcpy(_data, that._data, sizeof(char32_t) * _length); + } + + return *this; + } + + ~Utf32String() { delete[] _data; } + + public: + char32_t* get() const { return _data; } + + size_t length() const { return _length; } + + size_t chars() const { return _length; } + + void initFromBuffer() { + for (_length = 0; _data[_length] != 0; ++_length) { + } + } + + const char32_t& operator[](size_t pos) const { return _data[pos]; } + + char32_t& operator[](size_t pos) { return _data[pos]; } + + private: + size_t _length; + char32_t* _data; +}; + +class Utf8String { + Utf8String(const Utf8String&) = delete; + Utf8String& operator=(const Utf8String&) = delete; + + public: + explicit Utf8String(const Utf32String& src) { + size_t len = src.length() * 4 + 1; + _data = new char[len]; + copyString32to8(_data, len, src.get()); + } + + ~Utf8String() { delete[] _data; } + + public: + char* get() const { return _data; } + + private: + char* _data; +}; + +struct linenoiseCompletions { + vector<Utf32String> completionStrings; +}; + +#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100 +#define LINENOISE_MAX_LINE 4096 + +// make control-characters more readable +#define ctrlChar(upperCaseASCII) (upperCaseASCII - 0x40) + +/** + * Recompute widths of all characters in a char32_t buffer + * @param text input buffer of Unicode characters + * @param widths output buffer of character widths + * @param charCount number of characters in buffer + */ +namespace linenoise_ng { +int mk_wcwidth(char32_t ucs); +} + +static void recomputeCharacterWidths(const char32_t* text, char* widths, + int charCount) { + for (int i = 0; i < charCount; ++i) { + widths[i] = mk_wcwidth(text[i]); + } +} + +/** + * Calculate a new screen position given a starting position, screen width and + * character count + * @param x initial x position (zero-based) + * @param y initial y position (zero-based) + * @param screenColumns screen column count + * @param charCount character positions to advance + * @param xOut returned x position (zero-based) + * @param yOut returned y position (zero-based) + */ +static void calculateScreenPosition(int x, int y, int screenColumns, + int charCount, int& xOut, int& yOut) { + xOut = x; + yOut = y; + int charsRemaining = charCount; + while (charsRemaining > 0) { + int charsThisRow = (x + charsRemaining < screenColumns) ? charsRemaining + : screenColumns - x; + xOut = x + charsThisRow; + yOut = y; + charsRemaining -= charsThisRow; + x = 0; + ++y; + } + if (xOut == screenColumns) { // we have to special-case line wrap + xOut = 0; + ++yOut; + } +} + +/** + * Calculate a column width using mk_wcswidth() + * @param buf32 text to calculate + * @param len length of text to calculate + */ +namespace linenoise_ng { +int mk_wcswidth(const char32_t* pwcs, size_t n); +} + +static int calculateColumnPosition(char32_t* buf32, int len) { + int width = mk_wcswidth(reinterpret_cast<const char32_t*>(buf32), len); + if (width == -1) + return len; + else + return width; +} + +static bool isControlChar(char32_t testChar) { + return (testChar < ' ') || // C0 controls + (testChar >= 0x7F && testChar <= 0x9F); // DEL and C1 controls +} + +struct PromptBase { // a convenience struct for grouping prompt info + Utf32String promptText; // our copy of the prompt text, edited + char* promptCharWidths; // character widths from mk_wcwidth() + int promptChars; // chars in promptText + int promptBytes; // bytes in promptText + int promptExtraLines; // extra lines (beyond 1) occupied by prompt + int promptIndentation; // column offset to end of prompt + int promptLastLinePosition; // index into promptText where last line begins + int promptPreviousInputLen; // promptChars of previous input line, for + // clearing + int promptCursorRowOffset; // where the cursor is relative to the start of + // the prompt + int promptScreenColumns; // width of screen in columns + int promptPreviousLen; // help erasing + int promptErrorCode; // error code (invalid UTF-8) or zero + + PromptBase() : promptPreviousInputLen(0) {} + + bool write() { + if (write32(1, promptText.get(), promptBytes) == -1) return false; + + return true; + } +}; + +struct PromptInfo : public PromptBase { + PromptInfo(const char* textPtr, int columns) { + promptExtraLines = 0; + promptLastLinePosition = 0; + promptPreviousLen = 0; + promptScreenColumns = columns; + Utf32String tempUnicode(textPtr); + + // strip control characters from the prompt -- we do allow newline + char32_t* pIn = tempUnicode.get(); + char32_t* pOut = pIn; + + int len = 0; + int x = 0; + + bool const strip = (isatty(1) == 0); + + while (*pIn) { + char32_t c = *pIn; + if ('\n' == c || !isControlChar(c)) { + *pOut = c; + ++pOut; + ++pIn; + ++len; + if ('\n' == c || ++x >= promptScreenColumns) { + x = 0; + ++promptExtraLines; + promptLastLinePosition = len; + } + } else if (c == '\x1b') { + if (strip) { + // jump over control chars + ++pIn; + if (*pIn == '[') { + ++pIn; + while (*pIn && ((*pIn == ';') || ((*pIn >= '0' && *pIn <= '9')))) { + ++pIn; + } + if (*pIn == 'm') { + ++pIn; + } + } + } else { + // copy control chars + *pOut = *pIn; + ++pOut; + ++pIn; + if (*pIn == '[') { + *pOut = *pIn; + ++pOut; + ++pIn; + while (*pIn && ((*pIn == ';') || ((*pIn >= '0' && *pIn <= '9')))) { + *pOut = *pIn; + ++pOut; + ++pIn; + } + if (*pIn == 'm') { + *pOut = *pIn; + ++pOut; + ++pIn; + } + } + } + } else { + ++pIn; + } + } + *pOut = 0; + promptChars = len; + promptBytes = static_cast<int>(pOut - tempUnicode.get()); + promptText = tempUnicode; + + promptIndentation = len - promptLastLinePosition; + promptCursorRowOffset = promptExtraLines; + } +}; + +// Used with DynamicPrompt (history search) +// +static const Utf32String forwardSearchBasePrompt("(i-search)`"); +static const Utf32String reverseSearchBasePrompt("(reverse-i-search)`"); +static const Utf32String endSearchBasePrompt("': "); +static Utf32String + previousSearchText; // remembered across invocations of linenoise() + +// changing prompt for "(reverse-i-search)`text':" etc. +// +struct DynamicPrompt : public PromptBase { + Utf32String searchText; // text we are searching for + char* searchCharWidths; // character widths from mk_wcwidth() + int searchTextLen; // chars in searchText + int direction; // current search direction, 1=forward, -1=reverse + + DynamicPrompt(PromptBase& pi, int initialDirection) + : searchTextLen(0), direction(initialDirection) { + promptScreenColumns = pi.promptScreenColumns; + promptCursorRowOffset = 0; + Utf32String emptyString(1); + searchText = emptyString; + const Utf32String* basePrompt = + (direction > 0) ? &forwardSearchBasePrompt : &reverseSearchBasePrompt; + size_t promptStartLength = basePrompt->length(); + promptChars = + static_cast<int>(promptStartLength + endSearchBasePrompt.length()); + promptBytes = promptChars; + promptLastLinePosition = promptChars; // TODO fix this, we are asssuming + // that the history prompt won't wrap + // (!) + promptPreviousLen = promptChars; + Utf32String tempUnicode(promptChars + 1); + memcpy(tempUnicode.get(), basePrompt->get(), + sizeof(char32_t) * promptStartLength); + memcpy(&tempUnicode[promptStartLength], endSearchBasePrompt.get(), + sizeof(char32_t) * (endSearchBasePrompt.length() + 1)); + tempUnicode.initFromBuffer(); + promptText = tempUnicode; + calculateScreenPosition(0, 0, pi.promptScreenColumns, promptChars, + promptIndentation, promptExtraLines); + } + + void updateSearchPrompt(void) { + const Utf32String* basePrompt = + (direction > 0) ? &forwardSearchBasePrompt : &reverseSearchBasePrompt; + size_t promptStartLength = basePrompt->length(); + promptChars = static_cast<int>(promptStartLength + searchTextLen + + endSearchBasePrompt.length()); + promptBytes = promptChars; + Utf32String tempUnicode(promptChars + 1); + memcpy(tempUnicode.get(), basePrompt->get(), + sizeof(char32_t) * promptStartLength); + memcpy(&tempUnicode[promptStartLength], searchText.get(), + sizeof(char32_t) * searchTextLen); + size_t endIndex = promptStartLength + searchTextLen; + memcpy(&tempUnicode[endIndex], endSearchBasePrompt.get(), + sizeof(char32_t) * (endSearchBasePrompt.length() + 1)); + tempUnicode.initFromBuffer(); + promptText = tempUnicode; + } + + void updateSearchText(const char32_t* textPtr) { + Utf32String tempUnicode(textPtr); + searchTextLen = static_cast<int>(tempUnicode.chars()); + searchText = tempUnicode; + updateSearchPrompt(); + } +}; + +class KillRing { + static const int capacity = 10; + int size; + int index; + char indexToSlot[10]; + vector<Utf32String> theRing; + + public: + enum action { actionOther, actionKill, actionYank }; + action lastAction; + size_t lastYankSize; + + KillRing() : size(0), index(0), lastAction(actionOther) { + theRing.reserve(capacity); + } + + void kill(const char32_t* text, int textLen, bool forward) { + if (textLen == 0) { + return; + } + Utf32String killedText(text, textLen); + if (lastAction == actionKill && size > 0) { + int slot = indexToSlot[0]; + int currentLen = static_cast<int>(theRing[slot].length()); + int resultLen = currentLen + textLen; + Utf32String temp(resultLen + 1); + if (forward) { + memcpy(temp.get(), theRing[slot].get(), currentLen * sizeof(char32_t)); + memcpy(&temp[currentLen], killedText.get(), textLen * sizeof(char32_t)); + } else { + memcpy(temp.get(), killedText.get(), textLen * sizeof(char32_t)); + memcpy(&temp[textLen], theRing[slot].get(), + currentLen * sizeof(char32_t)); + } + temp[resultLen] = 0; + temp.initFromBuffer(); + theRing[slot] = temp; + } else { + if (size < capacity) { + if (size > 0) { + memmove(&indexToSlot[1], &indexToSlot[0], size); + } + indexToSlot[0] = size; + size++; + theRing.push_back(killedText); + } else { + int slot = indexToSlot[capacity - 1]; + theRing[slot] = killedText; + memmove(&indexToSlot[1], &indexToSlot[0], capacity - 1); + indexToSlot[0] = slot; + } + index = 0; + } + } + + Utf32String* yank() { return (size > 0) ? &theRing[indexToSlot[index]] : 0; } + + Utf32String* yankPop() { + if (size == 0) { + return 0; + } + ++index; + if (index == size) { + index = 0; + } + return &theRing[indexToSlot[index]]; + } +}; + +class InputBuffer { + char32_t* buf32; // input buffer + char* charWidths; // character widths from mk_wcwidth() + int buflen; // buffer size in characters + int len; // length of text in input buffer + int pos; // character position in buffer ( 0 <= pos <= len ) + + void clearScreen(PromptBase& pi); + int incrementalHistorySearch(PromptBase& pi, int startChar); + int completeLine(PromptBase& pi); + void refreshLine(PromptBase& pi); + + public: + InputBuffer(char32_t* buffer, char* widthArray, int bufferLen) + : buf32(buffer), + charWidths(widthArray), + buflen(bufferLen - 1), + len(0), + pos(0) { + buf32[0] = 0; + } + void preloadBuffer(const char* preloadText) { + size_t ucharCount = 0; + copyString8to32(buf32, buflen + 1, ucharCount, preloadText); + recomputeCharacterWidths(buf32, charWidths, static_cast<int>(ucharCount)); + len = static_cast<int>(ucharCount); + pos = static_cast<int>(ucharCount); + } + int getInputLine(PromptBase& pi); + int length(void) const { return len; } +}; + +// Special codes for keyboard input: +// +// Between Windows and the various Linux "terminal" programs, there is some +// pretty diverse behavior in the "scan codes" and escape sequences we are +// presented with. So ... we'll translate them all into our own pidgin +// pseudocode, trying to stay out of the way of UTF-8 and international +// characters. Here's the general plan. +// +// "User input keystrokes" (key chords, whatever) will be encoded as a single +// value. +// The low 21 bits are reserved for Unicode characters. Popular function-type +// keys +// get their own codes in the range 0x10200000 to (if needed) 0x1FE00000, +// currently +// just arrow keys, Home, End and Delete. Keypresses with Ctrl get ORed with +// 0x20000000, with Alt get ORed with 0x40000000. So, Ctrl+Alt+Home is encoded +// as 0x20000000 + 0x40000000 + 0x10A00000 == 0x70A00000. To keep things +// complicated, +// the Alt key is equivalent to prefixing the keystroke with ESC, so ESC +// followed by +// D is treated the same as Alt + D ... we'll just use Emacs terminology and +// call +// this "Meta". So, we will encode both ESC followed by D and Alt held down +// while D +// is pressed the same, as Meta-D, encoded as 0x40000064. +// +// Here are the definitions of our component constants: +// +// Maximum unsigned 32-bit value = 0xFFFFFFFF; // For reference, max 32-bit +// value +// Highest allocated Unicode char = 0x001FFFFF; // For reference, max +// Unicode value +static const int META = 0x40000000; // Meta key combination +static const int CTRL = 0x20000000; // Ctrl key combination +// static const int SPECIAL_KEY = 0x10000000; // Common bit for all special +// keys +static const int UP_ARROW_KEY = 0x10200000; // Special keys +static const int DOWN_ARROW_KEY = 0x10400000; +static const int RIGHT_ARROW_KEY = 0x10600000; +static const int LEFT_ARROW_KEY = 0x10800000; +static const int HOME_KEY = 0x10A00000; +static const int END_KEY = 0x10C00000; +static const int DELETE_KEY = 0x10E00000; +static const int PAGE_UP_KEY = 0x11000000; +static const int PAGE_DOWN_KEY = 0x11200000; + +static const char* unsupported_term[] = {"dumb", "cons25", "emacs", NULL}; +static linenoiseCompletionCallback* completionCallback = NULL; + +#ifdef _WIN32 +static HANDLE console_in, console_out; +static DWORD oldMode; +static WORD oldDisplayAttribute; +#else +static struct termios orig_termios; /* in order to restore at exit */ +#endif + +static KillRing killRing; + +static int rawmode = 0; /* for atexit() function to check if restore is needed*/ +static int atexit_registered = 0; /* register atexit just 1 time */ +static int historyMaxLen = LINENOISE_DEFAULT_HISTORY_MAX_LEN; +static int historyLen = 0; +static int historyIndex = 0; +static char8_t** history = NULL; + +// used to emulate Windows command prompt on down-arrow after a recall +// we use -2 as our "not set" value because we add 1 to the previous index on +// down-arrow, +// and zero is a valid index (so -1 is a valid "previous index") +static int historyPreviousIndex = -2; +static bool historyRecallMostRecent = false; + +static void linenoiseAtExit(void); + +static bool isUnsupportedTerm(void) { + char* term = getenv("TERM"); + if (term == NULL) return false; + for (int j = 0; unsupported_term[j]; ++j) + if (!strcasecmp(term, unsupported_term[j])) { + return true; + } + return false; +} + +static void beep() { + fprintf(stderr, "\x7"); // ctrl-G == bell/beep + fflush(stderr); +} + +void linenoiseHistoryFree(void) { + if (history) { + for (int j = 0; j < historyLen; ++j) free(history[j]); + historyLen = 0; + free(history); + history = 0; + } +} + +static int enableRawMode(void) { +#ifdef _WIN32 + if (!console_in) { + console_in = GetStdHandle(STD_INPUT_HANDLE); + console_out = GetStdHandle(STD_OUTPUT_HANDLE); + + GetConsoleMode(console_in, &oldMode); + SetConsoleMode(console_in, oldMode & + ~(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | + ENABLE_PROCESSED_INPUT)); + } + return 0; +#else + struct termios raw; + + if (!isatty(STDIN_FILENO)) goto fatal; + if (!atexit_registered) { + atexit(linenoiseAtExit); + atexit_registered = 1; + } + if (tcgetattr(0, &orig_termios) == -1) goto fatal; + + raw = orig_termios; /* modify the original mode */ + /* input modes: no break, no CR to NL, no parity check, no strip char, + * no start/stop output control. */ + raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); + /* output modes - disable post processing */ + // this is wrong, we don't want raw output, it turns newlines into straight + // linefeeds + // raw.c_oflag &= ~(OPOST); + /* control modes - set 8 bit chars */ + raw.c_cflag |= (CS8); + /* local modes - echoing off, canonical off, no extended functions, + * no signal chars (^Z,^C) */ + raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); + /* control chars - set return condition: min number of bytes and timer. + * We want read to return every single byte, without timeout. */ + raw.c_cc[VMIN] = 1; + raw.c_cc[VTIME] = 0; /* 1 byte, no timer */ + + /* put terminal in raw mode after flushing */ + if (tcsetattr(0, TCSADRAIN, &raw) < 0) goto fatal; + rawmode = 1; + return 0; + +fatal: + errno = ENOTTY; + return -1; +#endif +} + +static void disableRawMode(void) { +#ifdef _WIN32 + SetConsoleMode(console_in, oldMode); + console_in = 0; + console_out = 0; +#else + if (rawmode && tcsetattr(0, TCSADRAIN, &orig_termios) != -1) rawmode = 0; +#endif +} + +// At exit we'll try to fix the terminal to the initial conditions +static void linenoiseAtExit(void) { disableRawMode(); } + +static int getScreenColumns(void) { + int cols; +#ifdef _WIN32 + CONSOLE_SCREEN_BUFFER_INFO inf; + GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &inf); + cols = inf.dwSize.X; +#else + struct winsize ws; + cols = (ioctl(1, TIOCGWINSZ, &ws) == -1) ? 80 : ws.ws_col; +#endif + // cols is 0 in certain circumstances like inside debugger, which creates + // further issues + return (cols > 0) ? cols : 80; +} + +static int getScreenRows(void) { + int rows; +#ifdef _WIN32 + CONSOLE_SCREEN_BUFFER_INFO inf; + GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &inf); + rows = 1 + inf.srWindow.Bottom - inf.srWindow.Top; +#else + struct winsize ws; + rows = (ioctl(1, TIOCGWINSZ, &ws) == -1) ? 24 : ws.ws_row; +#endif + return (rows > 0) ? rows : 24; +} + +static void setDisplayAttribute(bool enhancedDisplay, bool error) { +#ifdef _WIN32 + if (enhancedDisplay) { + CONSOLE_SCREEN_BUFFER_INFO inf; + GetConsoleScreenBufferInfo(console_out, &inf); + oldDisplayAttribute = inf.wAttributes; + BYTE oldLowByte = oldDisplayAttribute & 0xFF; + BYTE newLowByte; + switch (oldLowByte) { + case 0x07: + // newLowByte = FOREGROUND_BLUE | FOREGROUND_INTENSITY; // too dim + // newLowByte = FOREGROUND_BLUE; // even dimmer + newLowByte = FOREGROUND_BLUE | + FOREGROUND_GREEN; // most similar to xterm appearance + break; + case 0x70: + newLowByte = BACKGROUND_BLUE | BACKGROUND_INTENSITY; + break; + default: + newLowByte = oldLowByte ^ 0xFF; // default to inverse video + break; + } + inf.wAttributes = (inf.wAttributes & 0xFF00) | newLowByte; + SetConsoleTextAttribute(console_out, inf.wAttributes); + } else { + SetConsoleTextAttribute(console_out, oldDisplayAttribute); + } +#else + if (enhancedDisplay) { + char const* p = (error ? "\x1b[1;31m" : "\x1b[1;34m"); + if (write(1, p, 7) == -1) + return; /* bright blue (visible with both B&W bg) */ + } else { + if (write(1, "\x1b[0m", 4) == -1) return; /* reset */ + } +#endif +} + +/** + * Display the dynamic incremental search prompt and the current user input + * line. + * @param pi PromptBase struct holding information about the prompt and our + * screen position + * @param buf32 input buffer to be displayed + * @param len count of characters in the buffer + * @param pos current cursor position within the buffer (0 <= pos <= len) + */ +static void dynamicRefresh(PromptBase& pi, char32_t* buf32, int len, int pos) { + // calculate the position of the end of the prompt + int xEndOfPrompt, yEndOfPrompt; + calculateScreenPosition(0, 0, pi.promptScreenColumns, pi.promptChars, + xEndOfPrompt, yEndOfPrompt); + pi.promptIndentation = xEndOfPrompt; + + // calculate the position of the end of the input line + int xEndOfInput, yEndOfInput; + calculateScreenPosition(xEndOfPrompt, yEndOfPrompt, pi.promptScreenColumns, + calculateColumnPosition(buf32, len), xEndOfInput, + yEndOfInput); + + // calculate the desired position of the cursor + int xCursorPos, yCursorPos; + calculateScreenPosition(xEndOfPrompt, yEndOfPrompt, pi.promptScreenColumns, + calculateColumnPosition(buf32, pos), xCursorPos, + yCursorPos); + +#ifdef _WIN32 + // position at the start of the prompt, clear to end of previous input + CONSOLE_SCREEN_BUFFER_INFO inf; + GetConsoleScreenBufferInfo(console_out, &inf); + inf.dwCursorPosition.X = 0; + inf.dwCursorPosition.Y -= pi.promptCursorRowOffset /*- pi.promptExtraLines*/; + SetConsoleCursorPosition(console_out, inf.dwCursorPosition); + DWORD count; + FillConsoleOutputCharacterA(console_out, ' ', + pi.promptPreviousLen + pi.promptPreviousInputLen, + inf.dwCursorPosition, &count); + pi.promptPreviousLen = pi.promptIndentation; + pi.promptPreviousInputLen = len; + + // display the prompt + if (!pi.write()) return; + + // display the input line + if (write32(1, buf32, len) == -1) return; + + // position the cursor + GetConsoleScreenBufferInfo(console_out, &inf); + inf.dwCursorPosition.X = xCursorPos; // 0-based on Win32 + inf.dwCursorPosition.Y -= yEndOfInput - yCursorPos; + SetConsoleCursorPosition(console_out, inf.dwCursorPosition); +#else // _WIN32 + char seq[64]; + int cursorRowMovement = pi.promptCursorRowOffset - pi.promptExtraLines; + if (cursorRowMovement > 0) { // move the cursor up as required + snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); + if (write(1, seq, strlen(seq)) == -1) return; + } + // position at the start of the prompt, clear to end of screen + snprintf(seq, sizeof seq, "\x1b[1G\x1b[J"); // 1-based on VT100 + if (write(1, seq, strlen(seq)) == -1) return; + + // display the prompt + if (!pi.write()) return; + + // display the input line + if (write32(1, buf32, len) == -1) return; + + // we have to generate our own newline on line wrap + if (xEndOfInput == 0 && yEndOfInput > 0) + if (write(1, "\n", 1) == -1) return; + + // position the cursor + cursorRowMovement = yEndOfInput - yCursorPos; + if (cursorRowMovement > 0) { // move the cursor up as required + snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); + if (write(1, seq, strlen(seq)) == -1) return; + } + // position the cursor within the line + snprintf(seq, sizeof seq, "\x1b[%dG", xCursorPos + 1); // 1-based on VT100 + if (write(1, seq, strlen(seq)) == -1) return; +#endif + + pi.promptCursorRowOffset = + pi.promptExtraLines + yCursorPos; // remember row for next pass +} + +/** + * Refresh the user's input line: the prompt is already onscreen and is not + * redrawn here + * @param pi PromptBase struct holding information about the prompt and our + * screen position + */ +void InputBuffer::refreshLine(PromptBase& pi) { + // check for a matching brace/bracket/paren, remember its position if found + int highlight = -1; + bool indicateError = false; + if (pos < len) { + /* this scans for a brace matching buf32[pos] to highlight */ + unsigned char part1, part2; + int scanDirection = 0; + if (strchr("}])", buf32[pos])) { + scanDirection = -1; /* backwards */ + if (buf32[pos] == '}') { + part1 = '}'; part2 = '{'; + } else if (buf32[pos] == ']') { + part1 = ']'; part2 = '['; + } else { + part1 = ')'; part2 = '('; + } + } + else if (strchr("{[(", buf32[pos])) { + scanDirection = 1; /* forwards */ + if (buf32[pos] == '{') { + //part1 = '{'; part2 = '}'; + part1 = '}'; part2 = '{'; + } else if (buf32[pos] == '[') { + //part1 = '['; part2 = ']'; + part1 = ']'; part2 = '['; + } else { + //part1 = '('; part2 = ')'; + part1 = ')'; part2 = '('; + } + } + + if (scanDirection) { + int unmatched = scanDirection; + int unmatchedOther = 0; + for (int i = pos + scanDirection; i >= 0 && i < len; i += scanDirection) { + /* TODO: the right thing when inside a string */ + if (strchr("}])", buf32[i])) { + if (buf32[i] == part1) { + --unmatched; + } else { + --unmatchedOther; + } + } else if (strchr("{[(", buf32[i])) { + if (buf32[i] == part2) { + ++unmatched; + } else { + ++unmatchedOther; + } + } +/* + if (strchr("}])", buf32[i])) + --unmatched; + else if (strchr("{[(", buf32[i])) + ++unmatched; +*/ + if (unmatched == 0) { + highlight = i; + indicateError = (unmatchedOther != 0); + break; + } + } + } + } + + // calculate the position of the end of the input line + int xEndOfInput, yEndOfInput; + calculateScreenPosition(pi.promptIndentation, 0, pi.promptScreenColumns, + calculateColumnPosition(buf32, len), xEndOfInput, + yEndOfInput); + + // calculate the desired position of the cursor + int xCursorPos, yCursorPos; + calculateScreenPosition(pi.promptIndentation, 0, pi.promptScreenColumns, + calculateColumnPosition(buf32, pos), xCursorPos, + yCursorPos); + +#ifdef _WIN32 + // position at the end of the prompt, clear to end of previous input + CONSOLE_SCREEN_BUFFER_INFO inf; + GetConsoleScreenBufferInfo(console_out, &inf); + inf.dwCursorPosition.X = pi.promptIndentation; // 0-based on Win32 + inf.dwCursorPosition.Y -= pi.promptCursorRowOffset - pi.promptExtraLines; + SetConsoleCursorPosition(console_out, inf.dwCursorPosition); + DWORD count; + if (len < pi.promptPreviousInputLen) + FillConsoleOutputCharacterA(console_out, ' ', pi.promptPreviousInputLen, + inf.dwCursorPosition, &count); + pi.promptPreviousInputLen = len; + + // display the input line + if (highlight == -1) { + if (write32(1, buf32, len) == -1) return; + } else { + if (write32(1, buf32, highlight) == -1) return; + setDisplayAttribute(true, indicateError); /* bright blue (visible with both B&W bg) */ + if (write32(1, &buf32[highlight], 1) == -1) return; + setDisplayAttribute(false, indicateError); + if (write32(1, buf32 + highlight + 1, len - highlight - 1) == -1) return; + } + + // position the cursor + GetConsoleScreenBufferInfo(console_out, &inf); + inf.dwCursorPosition.X = xCursorPos; // 0-based on Win32 + inf.dwCursorPosition.Y -= yEndOfInput - yCursorPos; + SetConsoleCursorPosition(console_out, inf.dwCursorPosition); +#else // _WIN32 + char seq[64]; + int cursorRowMovement = pi.promptCursorRowOffset - pi.promptExtraLines; + if (cursorRowMovement > 0) { // move the cursor up as required + snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); + if (write(1, seq, strlen(seq)) == -1) return; + } + // position at the end of the prompt, clear to end of screen + snprintf(seq, sizeof seq, "\x1b[%dG\x1b[J", + pi.promptIndentation + 1); // 1-based on VT100 + if (write(1, seq, strlen(seq)) == -1) return; + + if (highlight == -1) { // write unhighlighted text + if (write32(1, buf32, len) == -1) return; + } else { // highlight the matching brace/bracket/parenthesis + if (write32(1, buf32, highlight) == -1) return; + setDisplayAttribute(true, indicateError); + if (write32(1, &buf32[highlight], 1) == -1) return; + setDisplayAttribute(false, indicateError); + if (write32(1, buf32 + highlight + 1, len - highlight - 1) == -1) return; + } + + // we have to generate our own newline on line wrap + if (xEndOfInput == 0 && yEndOfInput > 0) + if (write(1, "\n", 1) == -1) return; + + // position the cursor + cursorRowMovement = yEndOfInput - yCursorPos; + if (cursorRowMovement > 0) { // move the cursor up as required + snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); + if (write(1, seq, strlen(seq)) == -1) return; + } + // position the cursor within the line + snprintf(seq, sizeof seq, "\x1b[%dG", xCursorPos + 1); // 1-based on VT100 + if (write(1, seq, strlen(seq)) == -1) return; +#endif + + pi.promptCursorRowOffset = + pi.promptExtraLines + yCursorPos; // remember row for next pass +} + +#ifndef _WIN32 + +/** + * Read a UTF-8 sequence from the non-Windows keyboard and return the Unicode + * (char32_t) character it + * encodes + * + * @return char32_t Unicode character + */ +static char32_t readUnicodeCharacter(void) { + static char8_t utf8String[5]; + static size_t utf8Count = 0; + while (true) { + char8_t c; + + /* Continue reading if interrupted by signal. */ + ssize_t nread; + do { + nread = read(0, &c, 1); + } while ((nread == -1) && (errno == EINTR)); + + if (nread <= 0) return 0; + if (c <= 0x7F) { // short circuit ASCII + utf8Count = 0; + return c; + } else if (utf8Count < sizeof(utf8String) - 1) { + utf8String[utf8Count++] = c; + utf8String[utf8Count] = 0; + char32_t unicodeChar[2]; + size_t ucharCount; + ConversionResult res = + copyString8to32(unicodeChar, 2, ucharCount, utf8String); + if (res == conversionOK && ucharCount) { + utf8Count = 0; + return unicodeChar[0]; + } + } else { + utf8Count = + 0; // this shouldn't happen: got four bytes but no UTF-8 character + } + } +} + +namespace EscapeSequenceProcessing { // move these out of global namespace + +// This chunk of code does parsing of the escape sequences sent by various Linux +// terminals. +// +// It handles arrow keys, Home, End and Delete keys by interpreting the +// sequences sent by +// gnome terminal, xterm, rxvt, konsole, aterm and yakuake including the Alt and +// Ctrl key +// combinations that are understood by linenoise. +// +// The parsing uses tables, a bunch of intermediate dispatch routines and a +// doDispatch +// loop that reads the tables and sends control to "deeper" routines to continue +// the +// parsing. The starting call to doDispatch( c, initialDispatch ) will +// eventually return +// either a character (with optional CTRL and META bits set), or -1 if parsing +// fails, or +// zero if an attempt to read from the keyboard fails. +// +// This is rather sloppy escape sequence processing, since we're not paying +// attention to what the +// actual TERM is set to and are processing all key sequences for all terminals, +// but it works with +// the most common keystrokes on the most common terminals. It's intricate, but +// the nested 'if' +// statements required to do it directly would be worse. This way has the +// advantage of allowing +// changes and extensions without having to touch a lot of code. + +// This is a typedef for the routine called by doDispatch(). It takes the +// current character +// as input, does any required processing including reading more characters and +// calling other +// dispatch routines, then eventually returns the final (possibly extended or +// special) character. +// +typedef char32_t (*CharacterDispatchRoutine)(char32_t); + +// This structure is used by doDispatch() to hold a list of characters to test +// for and +// a list of routines to call if the character matches. The dispatch routine +// list is one +// longer than the character list; the final entry is used if no character +// matches. +// +struct CharacterDispatch { + unsigned int len; // length of the chars list + const char* chars; // chars to test + CharacterDispatchRoutine* dispatch; // array of routines to call +}; + +// This dispatch routine is given a dispatch table and then farms work out to +// routines +// listed in the table based on the character it is called with. The dispatch +// routines can +// read more input characters to decide what should eventually be returned. +// Eventually, +// a called routine returns either a character or -1 to indicate parsing +// failure. +// +static char32_t doDispatch(char32_t c, CharacterDispatch& dispatchTable) { + for (unsigned int i = 0; i < dispatchTable.len; ++i) { + if (static_cast<unsigned char>(dispatchTable.chars[i]) == c) { + return dispatchTable.dispatch[i](c); + } + } + return dispatchTable.dispatch[dispatchTable.len](c); +} + +static char32_t thisKeyMetaCtrl = + 0; // holds pre-set Meta and/or Ctrl modifiers + +// Final dispatch routines -- return something +// +static char32_t normalKeyRoutine(char32_t c) { return thisKeyMetaCtrl | c; } +static char32_t upArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | UP_ARROW_KEY; +} +static char32_t downArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | DOWN_ARROW_KEY; +} +static char32_t rightArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | RIGHT_ARROW_KEY; +} +static char32_t leftArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | LEFT_ARROW_KEY; +} +static char32_t homeKeyRoutine(char32_t) { return thisKeyMetaCtrl | HOME_KEY; } +static char32_t endKeyRoutine(char32_t) { return thisKeyMetaCtrl | END_KEY; } +static char32_t pageUpKeyRoutine(char32_t) { + return thisKeyMetaCtrl | PAGE_UP_KEY; +} +static char32_t pageDownKeyRoutine(char32_t) { + return thisKeyMetaCtrl | PAGE_DOWN_KEY; +} +static char32_t deleteCharRoutine(char32_t) { + return thisKeyMetaCtrl | ctrlChar('H'); +} // key labeled Backspace +static char32_t deleteKeyRoutine(char32_t) { + return thisKeyMetaCtrl | DELETE_KEY; +} // key labeled Delete +static char32_t ctrlUpArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | CTRL | UP_ARROW_KEY; +} +static char32_t ctrlDownArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | CTRL | DOWN_ARROW_KEY; +} +static char32_t ctrlRightArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | CTRL | RIGHT_ARROW_KEY; +} +static char32_t ctrlLeftArrowKeyRoutine(char32_t) { + return thisKeyMetaCtrl | CTRL | LEFT_ARROW_KEY; +} +static char32_t escFailureRoutine(char32_t) { + beep(); + return -1; +} + +// Handle ESC [ 1 ; 3 (or 5) <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket1Semicolon3or5Routines[] = { + upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, + leftArrowKeyRoutine, escFailureRoutine}; +static CharacterDispatch escLeftBracket1Semicolon3or5Dispatch = { + 4, "ABCD", escLeftBracket1Semicolon3or5Routines}; + +// Handle ESC [ 1 ; <more stuff> escape sequences +// +static char32_t escLeftBracket1Semicolon3Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + thisKeyMetaCtrl |= META; + return doDispatch(c, escLeftBracket1Semicolon3or5Dispatch); +} +static char32_t escLeftBracket1Semicolon5Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + thisKeyMetaCtrl |= CTRL; + return doDispatch(c, escLeftBracket1Semicolon3or5Dispatch); +} +static CharacterDispatchRoutine escLeftBracket1SemicolonRoutines[] = { + escLeftBracket1Semicolon3Routine, escLeftBracket1Semicolon5Routine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket1SemicolonDispatch = { + 2, "35", escLeftBracket1SemicolonRoutines}; + +// Handle ESC [ 1 <more stuff> escape sequences +// +static char32_t escLeftBracket1SemicolonRoutine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket1SemicolonDispatch); +} +static CharacterDispatchRoutine escLeftBracket1Routines[] = { + homeKeyRoutine, escLeftBracket1SemicolonRoutine, escFailureRoutine}; +static CharacterDispatch escLeftBracket1Dispatch = {2, "~;", + escLeftBracket1Routines}; + +// Handle ESC [ 3 <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket3Routines[] = {deleteKeyRoutine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket3Dispatch = {1, "~", + escLeftBracket3Routines}; + +// Handle ESC [ 4 <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket4Routines[] = {endKeyRoutine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket4Dispatch = {1, "~", + escLeftBracket4Routines}; + +// Handle ESC [ 5 <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket5Routines[] = {pageUpKeyRoutine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket5Dispatch = {1, "~", + escLeftBracket5Routines}; + +// Handle ESC [ 6 <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket6Routines[] = {pageDownKeyRoutine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket6Dispatch = {1, "~", + escLeftBracket6Routines}; + +// Handle ESC [ 7 <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket7Routines[] = {homeKeyRoutine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket7Dispatch = {1, "~", + escLeftBracket7Routines}; + +// Handle ESC [ 8 <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracket8Routines[] = {endKeyRoutine, + escFailureRoutine}; +static CharacterDispatch escLeftBracket8Dispatch = {1, "~", + escLeftBracket8Routines}; + +// Handle ESC [ <digit> escape sequences +// +static char32_t escLeftBracket0Routine(char32_t c) { + return escFailureRoutine(c); +} +static char32_t escLeftBracket1Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket1Dispatch); +} +static char32_t escLeftBracket2Routine(char32_t c) { + return escFailureRoutine(c); // Insert key, unused +} +static char32_t escLeftBracket3Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket3Dispatch); +} +static char32_t escLeftBracket4Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket4Dispatch); +} +static char32_t escLeftBracket5Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket5Dispatch); +} +static char32_t escLeftBracket6Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket6Dispatch); +} +static char32_t escLeftBracket7Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket7Dispatch); +} +static char32_t escLeftBracket8Routine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracket8Dispatch); +} +static char32_t escLeftBracket9Routine(char32_t c) { + return escFailureRoutine(c); +} + +// Handle ESC [ <more stuff> escape sequences +// +static CharacterDispatchRoutine escLeftBracketRoutines[] = { + upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, + leftArrowKeyRoutine, homeKeyRoutine, endKeyRoutine, + escLeftBracket0Routine, escLeftBracket1Routine, escLeftBracket2Routine, + escLeftBracket3Routine, escLeftBracket4Routine, escLeftBracket5Routine, + escLeftBracket6Routine, escLeftBracket7Routine, escLeftBracket8Routine, + escLeftBracket9Routine, escFailureRoutine}; +static CharacterDispatch escLeftBracketDispatch = {16, "ABCDHF0123456789", + escLeftBracketRoutines}; + +// Handle ESC O <char> escape sequences +// +static CharacterDispatchRoutine escORoutines[] = { + upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, + leftArrowKeyRoutine, homeKeyRoutine, endKeyRoutine, + ctrlUpArrowKeyRoutine, ctrlDownArrowKeyRoutine, ctrlRightArrowKeyRoutine, + ctrlLeftArrowKeyRoutine, escFailureRoutine}; +static CharacterDispatch escODispatch = {10, "ABCDHFabcd", escORoutines}; + +// Initial ESC dispatch -- could be a Meta prefix or the start of an escape +// sequence +// +static char32_t escLeftBracketRoutine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escLeftBracketDispatch); +} +static char32_t escORoutine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escODispatch); +} +static char32_t setMetaRoutine(char32_t c); // need forward reference +static CharacterDispatchRoutine escRoutines[] = {escLeftBracketRoutine, + escORoutine, setMetaRoutine}; +static CharacterDispatch escDispatch = {2, "[O", escRoutines}; + +// Initial dispatch -- we are not in the middle of anything yet +// +static char32_t escRoutine(char32_t c) { + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escDispatch); +} +static CharacterDispatchRoutine initialRoutines[] = { + escRoutine, deleteCharRoutine, normalKeyRoutine}; +static CharacterDispatch initialDispatch = {2, "\x1B\x7F", initialRoutines}; + +// Special handling for the ESC key because it does double duty +// +static char32_t setMetaRoutine(char32_t c) { + thisKeyMetaCtrl = META; + if (c == 0x1B) { // another ESC, stay in ESC processing mode + c = readUnicodeCharacter(); + if (c == 0) return 0; + return doDispatch(c, escDispatch); + } + return doDispatch(c, initialDispatch); +} + +} // namespace EscapeSequenceProcessing // move these out of global namespace + +#endif // #ifndef _WIN32 + +// linenoiseReadChar -- read a keystroke or keychord from the keyboard, and +// translate it +// into an encoded "keystroke". When convenient, extended keys are translated +// into their +// simpler Emacs keystrokes, so an unmodified "left arrow" becomes Ctrl-B. +// +// A return value of zero means "no input available", and a return value of -1 +// means "invalid key". +// +static char32_t linenoiseReadChar(void) { +#ifdef _WIN32 + + INPUT_RECORD rec; + DWORD count; + int modifierKeys = 0; + bool escSeen = false; + while (true) { + ReadConsoleInputW(console_in, &rec, 1, &count); +#if 0 // helper for debugging keystrokes, display info in the debug "Output" + // window in the debugger + { + if ( rec.EventType == KEY_EVENT ) { + //if ( rec.Event.KeyEvent.uChar.UnicodeChar ) { + char buf[1024]; + sprintf( + buf, + "Unicode character 0x%04X, repeat count %d, virtual keycode 0x%04X, " + "virtual scancode 0x%04X, key %s%s%s%s%s\n", + rec.Event.KeyEvent.uChar.UnicodeChar, + rec.Event.KeyEvent.wRepeatCount, + rec.Event.KeyEvent.wVirtualKeyCode, + rec.Event.KeyEvent.wVirtualScanCode, + rec.Event.KeyEvent.bKeyDown ? "down" : "up", + (rec.Event.KeyEvent.dwControlKeyState & LEFT_CTRL_PRESSED) ? + " L-Ctrl" : "", + (rec.Event.KeyEvent.dwControlKeyState & RIGHT_CTRL_PRESSED) ? + " R-Ctrl" : "", + (rec.Event.KeyEvent.dwControlKeyState & LEFT_ALT_PRESSED) ? + " L-Alt" : "", + (rec.Event.KeyEvent.dwControlKeyState & RIGHT_ALT_PRESSED) ? + " R-Alt" : "" + ); + OutputDebugStringA( buf ); + //} + } + } +#endif + if (rec.EventType != KEY_EVENT) { + continue; + } + // Windows provides for entry of characters that are not on your keyboard by + // sending the + // Unicode characters as a "key up" with virtual keycode 0x12 (VK_MENU == + // Alt key) ... + // accept these characters, otherwise only process characters on "key down" + if (!rec.Event.KeyEvent.bKeyDown && + rec.Event.KeyEvent.wVirtualKeyCode != VK_MENU) { + continue; + } + modifierKeys = 0; + // AltGr is encoded as ( LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED ), so don't + // treat this + // combination as either CTRL or META we just turn off those two bits, so it + // is still + // possible to combine CTRL and/or META with an AltGr key by using + // right-Ctrl and/or + // left-Alt + if ((rec.Event.KeyEvent.dwControlKeyState & + (LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) == + (LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) { + rec.Event.KeyEvent.dwControlKeyState &= + ~(LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED); + } + if (rec.Event.KeyEvent.dwControlKeyState & + (RIGHT_CTRL_PRESSED | LEFT_CTRL_PRESSED)) { + modifierKeys |= CTRL; + } + if (rec.Event.KeyEvent.dwControlKeyState & + (RIGHT_ALT_PRESSED | LEFT_ALT_PRESSED)) { + modifierKeys |= META; + } + if (escSeen) { + modifierKeys |= META; + } + if (rec.Event.KeyEvent.uChar.UnicodeChar == 0) { + switch (rec.Event.KeyEvent.wVirtualKeyCode) { + case VK_LEFT: + return modifierKeys | LEFT_ARROW_KEY; + case VK_RIGHT: + return modifierKeys | RIGHT_ARROW_KEY; + case VK_UP: + return modifierKeys | UP_ARROW_KEY; + case VK_DOWN: + return modifierKeys | DOWN_ARROW_KEY; + case VK_DELETE: + return modifierKeys | DELETE_KEY; + case VK_HOME: + return modifierKeys | HOME_KEY; + case VK_END: + return modifierKeys | END_KEY; + case VK_PRIOR: + return modifierKeys | PAGE_UP_KEY; + case VK_NEXT: + return modifierKeys | PAGE_DOWN_KEY; + default: + continue; // in raw mode, ReadConsoleInput shows shift, ctrl ... + } // ... ignore them + } else if (rec.Event.KeyEvent.uChar.UnicodeChar == + ctrlChar('[')) { // ESC, set flag for later + escSeen = true; + continue; + } else { + // we got a real character, return it + return modifierKeys | rec.Event.KeyEvent.uChar.UnicodeChar; + } + } + +#else + char32_t c; + c = readUnicodeCharacter(); + if (c == 0) return 0; + +// If _DEBUG_LINUX_KEYBOARD is set, then ctrl-^ puts us into a keyboard +// debugging mode +// where we print out decimal and decoded values for whatever the "terminal" +// program +// gives us on different keystrokes. Hit ctrl-C to exit this mode. +// +#define _DEBUG_LINUX_KEYBOARD +#if defined(_DEBUG_LINUX_KEYBOARD) + if (c == ctrlChar('^')) { // ctrl-^, special debug mode, prints all keys hit, + // ctrl-C to get out + printf( + "\nEntering keyboard debugging mode (on ctrl-^), press ctrl-C to exit " + "this mode\n"); + while (true) { + unsigned char keys[10]; + int ret = read(0, keys, 10); + + if (ret <= 0) { + printf("\nret: %d\n", ret); + } + for (int i = 0; i < ret; ++i) { + char32_t key = static_cast<char32_t>(keys[i]); + char* friendlyTextPtr; + char friendlyTextBuf[10]; + const char* prefixText = (key < 0x80) ? "" : "0x80+"; + char32_t keyCopy = (key < 0x80) ? key : key - 0x80; + if (keyCopy >= '!' && keyCopy <= '~') { // printable + friendlyTextBuf[0] = '\''; + friendlyTextBuf[1] = keyCopy; + friendlyTextBuf[2] = '\''; + friendlyTextBuf[3] = 0; + friendlyTextPtr = friendlyTextBuf; + } else if (keyCopy == ' ') { + friendlyTextPtr = const_cast<char*>("space"); + } else if (keyCopy == 27) { + friendlyTextPtr = const_cast<char*>("ESC"); + } else if (keyCopy == 0) { + friendlyTextPtr = const_cast<char*>("NUL"); + } else if (keyCopy == 127) { + friendlyTextPtr = const_cast<char*>("DEL"); + } else { + friendlyTextBuf[0] = '^'; + friendlyTextBuf[1] = keyCopy + 0x40; + friendlyTextBuf[2] = 0; + friendlyTextPtr = friendlyTextBuf; + } + printf("%d x%02X (%s%s) ", key, key, prefixText, friendlyTextPtr); + } + printf("\x1b[1G\n"); // go to first column of new line + + // drop out of this loop on ctrl-C + if (keys[0] == ctrlChar('C')) { + printf("Leaving keyboard debugging mode (on ctrl-C)\n"); + fflush(stdout); + return -2; + } + } + } +#endif // _DEBUG_LINUX_KEYBOARD + + EscapeSequenceProcessing::thisKeyMetaCtrl = + 0; // no modifiers yet at initialDispatch + return EscapeSequenceProcessing::doDispatch( + c, EscapeSequenceProcessing::initialDispatch); +#endif // #_WIN32 +} + +/** + * Free memory used in a recent command completion session + * + * @param lc pointer to a linenoiseCompletions struct + */ +static void freeCompletions(linenoiseCompletions* lc) { + lc->completionStrings.clear(); +} + +/** + * convert {CTRL + 'A'}, {CTRL + 'a'} and {CTRL + ctrlChar( 'A' )} into + * ctrlChar( 'A' ) + * leave META alone + * + * @param c character to clean up + * @return cleaned-up character + */ +static int cleanupCtrl(int c) { + if (c & CTRL) { + int d = c & 0x1FF; + if (d >= 'a' && d <= 'z') { + c = (c + ('a' - ctrlChar('A'))) & ~CTRL; + } + if (d >= 'A' && d <= 'Z') { + c = (c + ('A' - ctrlChar('A'))) & ~CTRL; + } + if (d >= ctrlChar('A') && d <= ctrlChar('Z')) { + c = c & ~CTRL; + } + } + return c; +} + +// break characters that may precede items to be completed +static const char breakChars[] = " =+-/\\*?\"'`&<>;|@{([])}"; + +// maximum number of completions to display without asking +static const size_t completionCountCutoff = 100; + +/** + * Handle command completion, using a completionCallback() routine to provide + * possible substitutions + * This routine handles the mechanics of updating the user's input buffer with + * possible replacement + * of text as the user selects a proposed completion string, or cancels the + * completion attempt. + * @param pi PromptBase struct holding information about the prompt and our + * screen position + */ +int InputBuffer::completeLine(PromptBase& pi) { + linenoiseCompletions lc; + char32_t c = 0; + + // completionCallback() expects a parsable entity, so find the previous break + // character and + // extract a copy to parse. we also handle the case where tab is hit while + // not at end-of-line. + int startIndex = pos; + while (--startIndex >= 0) { + if (strchr(breakChars, buf32[startIndex])) { + break; + } + } + ++startIndex; + int itemLength = pos - startIndex; + Utf32String unicodeCopy(&buf32[startIndex], itemLength); + Utf8String parseItem(unicodeCopy); + + // get a list of completions + completionCallback(parseItem.get(), &lc); + + // if no completions, we are done + if (lc.completionStrings.size() == 0) { + beep(); + freeCompletions(&lc); + return 0; + } + + // at least one completion + int longestCommonPrefix = 0; + int displayLength = 0; + if (lc.completionStrings.size() == 1) { + longestCommonPrefix = static_cast<int>(lc.completionStrings[0].length()); + } else { + bool keepGoing = true; + while (keepGoing) { + for (size_t j = 0; j < lc.completionStrings.size() - 1; ++j) { + char32_t c1 = lc.completionStrings[j][longestCommonPrefix]; + char32_t c2 = lc.completionStrings[j + 1][longestCommonPrefix]; + if ((0 == c1) || (0 == c2) || (c1 != c2)) { + keepGoing = false; + break; + } + } + if (keepGoing) { + ++longestCommonPrefix; + } + } + } + if (lc.completionStrings.size() != 1) { // beep if ambiguous + beep(); + } + + // if we can extend the item, extend it and return to main loop + if (longestCommonPrefix > itemLength) { + displayLength = len + longestCommonPrefix - itemLength; + if (displayLength > buflen) { + longestCommonPrefix -= displayLength - buflen; // don't overflow buffer + displayLength = buflen; // truncate the insertion + beep(); // and make a noise + } + Utf32String displayText(displayLength + 1); + memcpy(displayText.get(), buf32, sizeof(char32_t) * startIndex); + memcpy(&displayText[startIndex], &lc.completionStrings[0][0], + sizeof(char32_t) * longestCommonPrefix); + int tailIndex = startIndex + longestCommonPrefix; + memcpy(&displayText[tailIndex], &buf32[pos], + sizeof(char32_t) * (displayLength - tailIndex + 1)); + copyString32(buf32, displayText.get(), displayLength); + pos = startIndex + longestCommonPrefix; + len = displayLength; + refreshLine(pi); + return 0; + } + + // we can't complete any further, wait for second tab + do { + c = linenoiseReadChar(); + c = cleanupCtrl(c); + } while (c == static_cast<char32_t>(-1)); + + // if any character other than tab, pass it to the main loop + if (c != ctrlChar('I')) { + freeCompletions(&lc); + return c; + } + + // we got a second tab, maybe show list of possible completions + bool showCompletions = true; + bool onNewLine = false; + if (lc.completionStrings.size() > completionCountCutoff) { + int savePos = + pos; // move cursor to EOL to avoid overwriting the command line + pos = len; + refreshLine(pi); + pos = savePos; + printf("\nDisplay all %u possibilities? (y or n)", + static_cast<unsigned int>(lc.completionStrings.size())); + fflush(stdout); + onNewLine = true; + while (c != 'y' && c != 'Y' && c != 'n' && c != 'N' && c != ctrlChar('C')) { + do { + c = linenoiseReadChar(); + c = cleanupCtrl(c); + } while (c == static_cast<char32_t>(-1)); + } + switch (c) { + case 'n': + case 'N': + showCompletions = false; + freeCompletions(&lc); + break; + case ctrlChar('C'): + showCompletions = false; + freeCompletions(&lc); + if (write(1, "^C", 2) == -1) return -1; // Display the ^C we got + c = 0; + break; + } + } + + // if showing the list, do it the way readline does it + bool stopList = false; + if (showCompletions) { + int longestCompletion = 0; + for (size_t j = 0; j < lc.completionStrings.size(); ++j) { + itemLength = static_cast<int>(lc.completionStrings[j].length()); + if (itemLength > longestCompletion) { + longestCompletion = itemLength; + } + } + longestCompletion += 2; + int columnCount = pi.promptScreenColumns / longestCompletion; + if (columnCount < 1) { + columnCount = 1; + } + if (!onNewLine) { // skip this if we showed "Display all %d possibilities?" + int savePos = + pos; // move cursor to EOL to avoid overwriting the command line + pos = len; + refreshLine(pi); + pos = savePos; + } + size_t pauseRow = getScreenRows() - 1; + size_t rowCount = + (lc.completionStrings.size() + columnCount - 1) / columnCount; + for (size_t row = 0; row < rowCount; ++row) { + if (row == pauseRow) { + printf("\n--More--"); + fflush(stdout); + c = 0; + bool doBeep = false; + while (c != ' ' && c != '\r' && c != '\n' && c != 'y' && c != 'Y' && + c != 'n' && c != 'N' && c != 'q' && c != 'Q' && + c != ctrlChar('C')) { + if (doBeep) { + beep(); + } + doBeep = true; + do { + c = linenoiseReadChar(); + c = cleanupCtrl(c); + } while (c == static_cast<char32_t>(-1)); + } + switch (c) { + case ' ': + case 'y': + case 'Y': + printf("\r \r"); + pauseRow += getScreenRows() - 1; + break; + case '\r': + case '\n': + printf("\r \r"); + ++pauseRow; + break; + case 'n': + case 'N': + case 'q': + case 'Q': + printf("\r \r"); + stopList = true; + break; + case ctrlChar('C'): + if (write(1, "^C", 2) == -1) return -1; // Display the ^C we got + stopList = true; + break; + } + } else { + printf("\n"); + } + if (stopList) { + break; + } + for (int column = 0; column < columnCount; ++column) { + size_t index = (column * rowCount) + row; + if (index < lc.completionStrings.size()) { + itemLength = static_cast<int>(lc.completionStrings[index].length()); + fflush(stdout); + if (write32(1, lc.completionStrings[index].get(), itemLength) == -1) + return -1; + if (((column + 1) * rowCount) + row < lc.completionStrings.size()) { + for (int k = itemLength; k < longestCompletion; ++k) { + printf(" "); + } + } + } + } + } + fflush(stdout); + freeCompletions(&lc); + } + + // display the prompt on a new line, then redisplay the input buffer + if (!stopList || c == ctrlChar('C')) { + if (write(1, "\n", 1) == -1) return 0; + } + if (!pi.write()) return 0; +#ifndef _WIN32 + // we have to generate our own newline on line wrap on Linux + if (pi.promptIndentation == 0 && pi.promptExtraLines > 0) + if (write(1, "\n", 1) == -1) return 0; +#endif + pi.promptCursorRowOffset = pi.promptExtraLines; + refreshLine(pi); + return 0; +} + +/** + * Clear the screen ONLY (no redisplay of anything) + */ +void linenoiseClearScreen(void) { +#ifdef _WIN32 + COORD coord = {0, 0}; + CONSOLE_SCREEN_BUFFER_INFO inf; + HANDLE screenHandle = GetStdHandle(STD_OUTPUT_HANDLE); + GetConsoleScreenBufferInfo(screenHandle, &inf); + SetConsoleCursorPosition(screenHandle, coord); + DWORD count; + FillConsoleOutputCharacterA(screenHandle, ' ', inf.dwSize.X * inf.dwSize.Y, + coord, &count); +#else + if (write(1, "\x1b[H\x1b[2J", 7) <= 0) return; +#endif +} + +void InputBuffer::clearScreen(PromptBase& pi) { + linenoiseClearScreen(); + if (!pi.write()) return; +#ifndef _WIN32 + // we have to generate our own newline on line wrap on Linux + if (pi.promptIndentation == 0 && pi.promptExtraLines > 0) + if (write(1, "\n", 1) == -1) return; +#endif + pi.promptCursorRowOffset = pi.promptExtraLines; + refreshLine(pi); +} + +/** + * Incremental history search -- take over the prompt and keyboard as the user + * types a search + * string, deletes characters from it, changes direction, and either accepts the + * found line (for + * execution orediting) or cancels. + * @param pi PromptBase struct holding information about the (old, + * static) prompt and our + * screen position + * @param startChar the character that began the search, used to set the initial + * direction + */ +int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) { + size_t bufferSize; + size_t ucharCount = 0; + + // if not already recalling, add the current line to the history list so we + // don't have to + // special case it + if (historyIndex == historyLen - 1) { + free(history[historyLen - 1]); + bufferSize = sizeof(char32_t) * len + 1; + unique_ptr<char[]> tempBuffer(new char[bufferSize]); + copyString32to8(tempBuffer.get(), bufferSize, buf32); + history[historyLen - 1] = strdup8(tempBuffer.get()); + } + int historyLineLength = len; + int historyLinePosition = pos; + char32_t emptyBuffer[1]; + char emptyWidths[1]; + InputBuffer empty(emptyBuffer, emptyWidths, 1); + empty.refreshLine(pi); // erase the old input first + DynamicPrompt dp(pi, (startChar == ctrlChar('R')) ? -1 : 1); + + dp.promptPreviousLen = pi.promptPreviousLen; + dp.promptPreviousInputLen = pi.promptPreviousInputLen; + dynamicRefresh(dp, buf32, historyLineLength, + historyLinePosition); // draw user's text with our prompt + + // loop until we get an exit character + int c = 0; + bool keepLooping = true; + bool useSearchedLine = true; + bool searchAgain = false; + char32_t* activeHistoryLine = 0; + while (keepLooping) { + c = linenoiseReadChar(); + c = cleanupCtrl(c); // convert CTRL + <char> into normal ctrl + + switch (c) { + // these characters keep the selected text but do not execute it + case ctrlChar('A'): // ctrl-A, move cursor to start of line + case HOME_KEY: + case ctrlChar('B'): // ctrl-B, move cursor left by one character + case LEFT_ARROW_KEY: + case META + 'b': // meta-B, move cursor left by one word + case META + 'B': + case CTRL + LEFT_ARROW_KEY: + case META + LEFT_ARROW_KEY: // Emacs allows Meta, bash & readline don't + case ctrlChar('D'): + case META + 'd': // meta-D, kill word to right of cursor + case META + 'D': + case ctrlChar('E'): // ctrl-E, move cursor to end of line + case END_KEY: + case ctrlChar('F'): // ctrl-F, move cursor right by one character + case RIGHT_ARROW_KEY: + case META + 'f': // meta-F, move cursor right by one word + case META + 'F': + case CTRL + RIGHT_ARROW_KEY: + case META + RIGHT_ARROW_KEY: // Emacs allows Meta, bash & readline don't + case META + ctrlChar('H'): + case ctrlChar('J'): + case ctrlChar('K'): // ctrl-K, kill from cursor to end of line + case ctrlChar('M'): + case ctrlChar('N'): // ctrl-N, recall next line in history + case ctrlChar('P'): // ctrl-P, recall previous line in history + case DOWN_ARROW_KEY: + case UP_ARROW_KEY: + case ctrlChar('T'): // ctrl-T, transpose characters + case ctrlChar( + 'U'): // ctrl-U, kill all characters to the left of the cursor + case ctrlChar('W'): + case META + 'y': // meta-Y, "yank-pop", rotate popped text + case META + 'Y': + case 127: + case DELETE_KEY: + case META + '<': // start of history + case PAGE_UP_KEY: + case META + '>': // end of history + case PAGE_DOWN_KEY: + keepLooping = false; + break; + + // these characters revert the input line to its previous state + case ctrlChar('C'): // ctrl-C, abort this line + case ctrlChar('G'): + case ctrlChar('L'): // ctrl-L, clear screen and redisplay line + keepLooping = false; + useSearchedLine = false; + if (c != ctrlChar('L')) { + c = -1; // ctrl-C and ctrl-G just abort the search and do nothing + // else + } + break; + + // these characters stay in search mode and update the display + case ctrlChar('S'): + case ctrlChar('R'): + if (dp.searchTextLen == + 0) { // if no current search text, recall previous text + if (previousSearchText.length()) { + dp.updateSearchText(previousSearchText.get()); + } + } + if ((dp.direction == 1 && c == ctrlChar('R')) || + (dp.direction == -1 && c == ctrlChar('S'))) { + dp.direction = 0 - dp.direction; // reverse direction + dp.updateSearchPrompt(); // change the prompt + } else { + searchAgain = true; // same direction, search again + } + break; + +// job control is its own thing +#ifndef _WIN32 + case ctrlChar('Z'): // ctrl-Z, job control + disableRawMode(); // Returning to Linux (whatever) shell, leave raw + // mode + raise(SIGSTOP); // Break out in mid-line + enableRawMode(); // Back from Linux shell, re-enter raw mode + { + bufferSize = historyLineLength + 1; + unique_ptr<char32_t[]> tempUnicode(new char32_t[bufferSize]); + copyString8to32(tempUnicode.get(), bufferSize, ucharCount, + history[historyIndex]); + dynamicRefresh(dp, tempUnicode.get(), historyLineLength, + historyLinePosition); + } + continue; + break; +#endif + + // these characters update the search string, and hence the selected input + // line + case ctrlChar('H'): // backspace/ctrl-H, delete char to left of cursor + if (dp.searchTextLen > 0) { + unique_ptr<char32_t[]> tempUnicode(new char32_t[dp.searchTextLen]); + --dp.searchTextLen; + dp.searchText[dp.searchTextLen] = 0; + copyString32(tempUnicode.get(), dp.searchText.get(), + dp.searchTextLen); + dp.updateSearchText(tempUnicode.get()); + } else { + beep(); + } + break; + + case ctrlChar('Y'): // ctrl-Y, yank killed text + break; + + default: + if (!isControlChar(c) && c <= 0x0010FFFF) { // not an action character + unique_ptr<char32_t[]> tempUnicode( + new char32_t[dp.searchTextLen + 2]); + copyString32(tempUnicode.get(), dp.searchText.get(), + dp.searchTextLen); + tempUnicode[dp.searchTextLen] = c; + tempUnicode[dp.searchTextLen + 1] = 0; + dp.updateSearchText(tempUnicode.get()); + } else { + beep(); + } + } // switch + + // if we are staying in search mode, search now + if (keepLooping) { + bufferSize = historyLineLength + 1; + if (activeHistoryLine) { + delete[] activeHistoryLine; + activeHistoryLine = nullptr; + } + activeHistoryLine = new char32_t[bufferSize]; + copyString8to32(activeHistoryLine, bufferSize, ucharCount, + history[historyIndex]); + if (dp.searchTextLen > 0) { + bool found = false; + int historySearchIndex = historyIndex; + int lineLength = static_cast<int>(ucharCount); + int lineSearchPos = historyLinePosition; + if (searchAgain) { + lineSearchPos += dp.direction; + } + searchAgain = false; + while (true) { + while ((dp.direction > 0) ? (lineSearchPos < lineLength) + : (lineSearchPos >= 0)) { + if (strncmp32(dp.searchText.get(), + &activeHistoryLine[lineSearchPos], + dp.searchTextLen) == 0) { + found = true; + break; + } + lineSearchPos += dp.direction; + } + if (found) { + historyIndex = historySearchIndex; + historyLineLength = lineLength; + historyLinePosition = lineSearchPos; + break; + } else if ((dp.direction > 0) ? (historySearchIndex < historyLen - 1) + : (historySearchIndex > 0)) { + historySearchIndex += dp.direction; + bufferSize = strlen8(history[historySearchIndex]) + 1; + delete[] activeHistoryLine; + activeHistoryLine = nullptr; + activeHistoryLine = new char32_t[bufferSize]; + copyString8to32(activeHistoryLine, bufferSize, ucharCount, + history[historySearchIndex]); + lineLength = static_cast<int>(ucharCount); + lineSearchPos = + (dp.direction > 0) ? 0 : (lineLength - dp.searchTextLen); + } else { + beep(); + break; + } + }; // while + } + if (activeHistoryLine) { + delete[] activeHistoryLine; + activeHistoryLine = nullptr; + } + bufferSize = historyLineLength + 1; + activeHistoryLine = new char32_t[bufferSize]; + copyString8to32(activeHistoryLine, bufferSize, ucharCount, + history[historyIndex]); + dynamicRefresh(dp, activeHistoryLine, historyLineLength, + historyLinePosition); // draw user's text with our prompt + } + } // while + + // leaving history search, restore previous prompt, maybe make searched line + // current + PromptBase pb; + pb.promptChars = pi.promptIndentation; + pb.promptBytes = pi.promptBytes; + Utf32String tempUnicode(pb.promptBytes + 1); + + copyString32(tempUnicode.get(), &pi.promptText[pi.promptLastLinePosition], + pb.promptBytes - pi.promptLastLinePosition); + tempUnicode.initFromBuffer(); + pb.promptText = tempUnicode; + pb.promptExtraLines = 0; + pb.promptIndentation = pi.promptIndentation; + pb.promptLastLinePosition = 0; + pb.promptPreviousInputLen = historyLineLength; + pb.promptCursorRowOffset = dp.promptCursorRowOffset; + pb.promptScreenColumns = pi.promptScreenColumns; + pb.promptPreviousLen = dp.promptChars; + if (useSearchedLine && activeHistoryLine) { + historyRecallMostRecent = true; + copyString32(buf32, activeHistoryLine, buflen + 1); + len = historyLineLength; + pos = historyLinePosition; + } + if (activeHistoryLine) { + delete[] activeHistoryLine; + activeHistoryLine = nullptr; + } + dynamicRefresh(pb, buf32, len, + pos); // redraw the original prompt with current input + pi.promptPreviousInputLen = len; + pi.promptCursorRowOffset = pi.promptExtraLines + pb.promptCursorRowOffset; + previousSearchText = + dp.searchText; // save search text for possible reuse on ctrl-R ctrl-R + return c; // pass a character or -1 back to main loop +} + +static bool isCharacterAlphanumeric(char32_t testChar) { +#ifdef _WIN32 + return (iswalnum((wint_t)testChar) != 0 ? true : false); +#else + return (iswalnum(testChar) != 0 ? true : false); +#endif +} + +#ifndef _WIN32 +static bool gotResize = false; +#endif +static int keyType = 0; + +int InputBuffer::getInputLine(PromptBase& pi) { + keyType = 0; + + // The latest history entry is always our current buffer + if (len > 0) { + size_t bufferSize = sizeof(char32_t) * len + 1; + unique_ptr<char[]> tempBuffer(new char[bufferSize]); + copyString32to8(tempBuffer.get(), bufferSize, buf32); + linenoiseHistoryAdd(tempBuffer.get()); + } else { + linenoiseHistoryAdd(""); + } + historyIndex = historyLen - 1; + historyRecallMostRecent = false; + + // display the prompt + if (!pi.write()) return -1; + +#ifndef _WIN32 + // we have to generate our own newline on line wrap on Linux + if (pi.promptIndentation == 0 && pi.promptExtraLines > 0) + if (write(1, "\n", 1) == -1) return -1; +#endif + + // the cursor starts out at the end of the prompt + pi.promptCursorRowOffset = pi.promptExtraLines; + + // kill and yank start in "other" mode + killRing.lastAction = KillRing::actionOther; + + // when history search returns control to us, we execute its terminating + // keystroke + int terminatingKeystroke = -1; + + // if there is already text in the buffer, display it first + if (len > 0) { + refreshLine(pi); + } + + // loop collecting characters, respond to line editing characters + while (true) { + int c; + if (terminatingKeystroke == -1) { + c = linenoiseReadChar(); // get a new keystroke + + keyType = 0; + if (c != 0) { + // set flag that we got some input + if (c == ctrlChar('C')) { + keyType = 1; + } else if (c == ctrlChar('D')) { + keyType = 2; + } + } + +#ifndef _WIN32 + if (c == 0 && gotResize) { + // caught a window resize event + // now redraw the prompt and line + gotResize = false; + pi.promptScreenColumns = getScreenColumns(); + dynamicRefresh(pi, buf32, len, + pos); // redraw the original prompt with current input + continue; + } +#endif + } else { + c = terminatingKeystroke; // use the terminating keystroke from search + terminatingKeystroke = -1; // clear it once we've used it + } + + c = cleanupCtrl(c); // convert CTRL + <char> into normal ctrl + + if (c == 0) { + return len; + } + + if (c == -1) { + refreshLine(pi); + continue; + } + + if (c == -2) { + if (!pi.write()) return -1; + refreshLine(pi); + continue; + } + + // ctrl-I/tab, command completion, needs to be before switch statement + if (c == ctrlChar('I') && completionCallback) { + killRing.lastAction = KillRing::actionOther; + historyRecallMostRecent = false; + + // completeLine does the actual completion and replacement + c = completeLine(pi); + + if (c < 0) // return on error + return len; + + if (c == 0) // read next character when 0 + continue; + + // deliberate fall-through here, so we use the terminating character + } + + switch (c) { + case ctrlChar('A'): // ctrl-A, move cursor to start of line + case HOME_KEY: + killRing.lastAction = KillRing::actionOther; + pos = 0; + refreshLine(pi); + break; + + case ctrlChar('B'): // ctrl-B, move cursor left by one character + case LEFT_ARROW_KEY: + killRing.lastAction = KillRing::actionOther; + if (pos > 0) { + --pos; + refreshLine(pi); + } + break; + + case META + 'b': // meta-B, move cursor left by one word + case META + 'B': + case CTRL + LEFT_ARROW_KEY: + case META + LEFT_ARROW_KEY: // Emacs allows Meta, bash & readline don't + killRing.lastAction = KillRing::actionOther; + if (pos > 0) { + while (pos > 0 && !isCharacterAlphanumeric(buf32[pos - 1])) { + --pos; + } + while (pos > 0 && isCharacterAlphanumeric(buf32[pos - 1])) { + --pos; + } + refreshLine(pi); + } + break; + + case ctrlChar('C'): // ctrl-C, abort this line + killRing.lastAction = KillRing::actionOther; + historyRecallMostRecent = false; + errno = EAGAIN; + --historyLen; + free(history[historyLen]); + // we need one last refresh with the cursor at the end of the line + // so we don't display the next prompt over the previous input line + pos = len; // pass len as pos for EOL + refreshLine(pi); + if (write(1, "^C", 2) == -1) return -1; // Display the ^C we got + return -1; + + case META + 'c': // meta-C, give word initial Cap + case META + 'C': + killRing.lastAction = KillRing::actionOther; + historyRecallMostRecent = false; + if (pos < len) { + while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { + ++pos; + } + if (pos < len && isCharacterAlphanumeric(buf32[pos])) { + if (buf32[pos] >= 'a' && buf32[pos] <= 'z') { + buf32[pos] += 'A' - 'a'; + } + ++pos; + } + while (pos < len && isCharacterAlphanumeric(buf32[pos])) { + if (buf32[pos] >= 'A' && buf32[pos] <= 'Z') { + buf32[pos] += 'a' - 'A'; + } + ++pos; + } + refreshLine(pi); + } + break; + + // ctrl-D, delete the character under the cursor + // on an empty line, exit the shell + case ctrlChar('D'): + killRing.lastAction = KillRing::actionOther; + if (len > 0 && pos < len) { + historyRecallMostRecent = false; + memmove(buf32 + pos, buf32 + pos + 1, sizeof(char32_t) * (len - pos)); + --len; + refreshLine(pi); + } else if (len == 0) { + --historyLen; + free(history[historyLen]); + return -1; + } + break; + + case META + 'd': // meta-D, kill word to right of cursor + case META + 'D': + if (pos < len) { + historyRecallMostRecent = false; + int endingPos = pos; + while (endingPos < len && + !isCharacterAlphanumeric(buf32[endingPos])) { + ++endingPos; + } + while (endingPos < len && isCharacterAlphanumeric(buf32[endingPos])) { + ++endingPos; + } + killRing.kill(&buf32[pos], endingPos - pos, true); + memmove(buf32 + pos, buf32 + endingPos, + sizeof(char32_t) * (len - endingPos + 1)); + len -= endingPos - pos; + refreshLine(pi); + } + killRing.lastAction = KillRing::actionKill; + break; + + case ctrlChar('E'): // ctrl-E, move cursor to end of line + case END_KEY: + killRing.lastAction = KillRing::actionOther; + pos = len; + refreshLine(pi); + break; + + case ctrlChar('F'): // ctrl-F, move cursor right by one character + case RIGHT_ARROW_KEY: + killRing.lastAction = KillRing::actionOther; + if (pos < len) { + ++pos; + refreshLine(pi); + } + break; + + case META + 'f': // meta-F, move cursor right by one word + case META + 'F': + case CTRL + RIGHT_ARROW_KEY: + case META + RIGHT_ARROW_KEY: // Emacs allows Meta, bash & readline don't + killRing.lastAction = KillRing::actionOther; + if (pos < len) { + while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { + ++pos; + } + while (pos < len && isCharacterAlphanumeric(buf32[pos])) { + ++pos; + } + refreshLine(pi); + } + break; + + case ctrlChar('H'): // backspace/ctrl-H, delete char to left of cursor + killRing.lastAction = KillRing::actionOther; + if (pos > 0) { + historyRecallMostRecent = false; + memmove(buf32 + pos - 1, buf32 + pos, + sizeof(char32_t) * (1 + len - pos)); + --pos; + --len; + refreshLine(pi); + } + break; + + // meta-Backspace, kill word to left of cursor + case META + ctrlChar('H'): + if (pos > 0) { + historyRecallMostRecent = false; + int startingPos = pos; + while (pos > 0 && !isCharacterAlphanumeric(buf32[pos - 1])) { + --pos; + } + while (pos > 0 && isCharacterAlphanumeric(buf32[pos - 1])) { + --pos; + } + killRing.kill(&buf32[pos], startingPos - pos, false); + memmove(buf32 + pos, buf32 + startingPos, + sizeof(char32_t) * (len - startingPos + 1)); + len -= startingPos - pos; + refreshLine(pi); + } + killRing.lastAction = KillRing::actionKill; + break; + + case ctrlChar('J'): // ctrl-J/linefeed/newline, accept line + case ctrlChar('M'): // ctrl-M/return/enter + killRing.lastAction = KillRing::actionOther; + // we need one last refresh with the cursor at the end of the line + // so we don't display the next prompt over the previous input line + pos = len; // pass len as pos for EOL + refreshLine(pi); + historyPreviousIndex = historyRecallMostRecent ? historyIndex : -2; + --historyLen; + free(history[historyLen]); + return len; + + case ctrlChar('K'): // ctrl-K, kill from cursor to end of line + killRing.kill(&buf32[pos], len - pos, true); + buf32[pos] = '\0'; + len = pos; + refreshLine(pi); + killRing.lastAction = KillRing::actionKill; + historyRecallMostRecent = false; + break; + + case ctrlChar('L'): // ctrl-L, clear screen and redisplay line + clearScreen(pi); + break; + + case META + 'l': // meta-L, lowercase word + case META + 'L': + killRing.lastAction = KillRing::actionOther; + if (pos < len) { + historyRecallMostRecent = false; + while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { + ++pos; + } + while (pos < len && isCharacterAlphanumeric(buf32[pos])) { + if (buf32[pos] >= 'A' && buf32[pos] <= 'Z') { + buf32[pos] += 'a' - 'A'; + } + ++pos; + } + refreshLine(pi); + } + break; + + case ctrlChar('N'): // ctrl-N, recall next line in history + case ctrlChar('P'): // ctrl-P, recall previous line in history + case DOWN_ARROW_KEY: + case UP_ARROW_KEY: + killRing.lastAction = KillRing::actionOther; + // if not already recalling, add the current line to the history list so + // we don't + // have to special case it + if (historyIndex == historyLen - 1) { + free(history[historyLen - 1]); + size_t tempBufferSize = sizeof(char32_t) * len + 1; + unique_ptr<char[]> tempBuffer(new char[tempBufferSize]); + copyString32to8(tempBuffer.get(), tempBufferSize, buf32); + history[historyLen - 1] = strdup8(tempBuffer.get()); + } + if (historyLen > 1) { + if (c == UP_ARROW_KEY) { + c = ctrlChar('P'); + } + if (historyPreviousIndex != -2 && c != ctrlChar('P')) { + historyIndex = + 1 + historyPreviousIndex; // emulate Windows down-arrow + } else { + historyIndex += (c == ctrlChar('P')) ? -1 : 1; + } + historyPreviousIndex = -2; + if (historyIndex < 0) { + historyIndex = 0; + break; + } else if (historyIndex >= historyLen) { + historyIndex = historyLen - 1; + break; + } + historyRecallMostRecent = true; + size_t ucharCount = 0; + copyString8to32(buf32, buflen, ucharCount, history[historyIndex]); + len = pos = static_cast<int>(ucharCount); + refreshLine(pi); + } + break; + + case ctrlChar('R'): // ctrl-R, reverse history search + case ctrlChar('S'): // ctrl-S, forward history search + terminatingKeystroke = incrementalHistorySearch(pi, c); + break; + + case ctrlChar('T'): // ctrl-T, transpose characters + killRing.lastAction = KillRing::actionOther; + if (pos > 0 && len > 1) { + historyRecallMostRecent = false; + size_t leftCharPos = (pos == len) ? pos - 2 : pos - 1; + char32_t aux = buf32[leftCharPos]; + buf32[leftCharPos] = buf32[leftCharPos + 1]; + buf32[leftCharPos + 1] = aux; + if (pos != len) ++pos; + refreshLine(pi); + } + break; + + case ctrlChar( + 'U'): // ctrl-U, kill all characters to the left of the cursor + if (pos > 0) { + historyRecallMostRecent = false; + killRing.kill(&buf32[0], pos, false); + len -= pos; + memmove(buf32, buf32 + pos, sizeof(char32_t) * (len + 1)); + pos = 0; + refreshLine(pi); + } + killRing.lastAction = KillRing::actionKill; + break; + + case META + 'u': // meta-U, uppercase word + case META + 'U': + killRing.lastAction = KillRing::actionOther; + if (pos < len) { + historyRecallMostRecent = false; + while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { + ++pos; + } + while (pos < len && isCharacterAlphanumeric(buf32[pos])) { + if (buf32[pos] >= 'a' && buf32[pos] <= 'z') { + buf32[pos] += 'A' - 'a'; + } + ++pos; + } + refreshLine(pi); + } + break; + + // ctrl-W, kill to whitespace (not word) to left of cursor + case ctrlChar('W'): + if (pos > 0) { + historyRecallMostRecent = false; + int startingPos = pos; + while (pos > 0 && buf32[pos - 1] == ' ') { + --pos; + } + while (pos > 0 && buf32[pos - 1] != ' ') { + --pos; + } + killRing.kill(&buf32[pos], startingPos - pos, false); + memmove(buf32 + pos, buf32 + startingPos, + sizeof(char32_t) * (len - startingPos + 1)); + len -= startingPos - pos; + refreshLine(pi); + } + killRing.lastAction = KillRing::actionKill; + break; + + case ctrlChar('Y'): // ctrl-Y, yank killed text + historyRecallMostRecent = false; + { + Utf32String* restoredText = killRing.yank(); + if (restoredText) { + bool truncated = false; + size_t ucharCount = restoredText->length(); + if (ucharCount > static_cast<size_t>(buflen - len)) { + ucharCount = buflen - len; + truncated = true; + } + memmove(buf32 + pos + ucharCount, buf32 + pos, + sizeof(char32_t) * (len - pos + 1)); + memmove(buf32 + pos, restoredText->get(), + sizeof(char32_t) * ucharCount); + pos += static_cast<int>(ucharCount); + len += static_cast<int>(ucharCount); + refreshLine(pi); + killRing.lastAction = KillRing::actionYank; + killRing.lastYankSize = ucharCount; + if (truncated) { + beep(); + } + } else { + beep(); + } + } + break; + + case META + 'y': // meta-Y, "yank-pop", rotate popped text + case META + 'Y': + if (killRing.lastAction == KillRing::actionYank) { + historyRecallMostRecent = false; + Utf32String* restoredText = killRing.yankPop(); + if (restoredText) { + bool truncated = false; + size_t ucharCount = restoredText->length(); + if (ucharCount > + static_cast<size_t>(killRing.lastYankSize + buflen - len)) { + ucharCount = killRing.lastYankSize + buflen - len; + truncated = true; + } + if (ucharCount > killRing.lastYankSize) { + memmove(buf32 + pos + ucharCount - killRing.lastYankSize, + buf32 + pos, sizeof(char32_t) * (len - pos + 1)); + memmove(buf32 + pos - killRing.lastYankSize, restoredText->get(), + sizeof(char32_t) * ucharCount); + } else { + memmove(buf32 + pos - killRing.lastYankSize, restoredText->get(), + sizeof(char32_t) * ucharCount); + memmove(buf32 + pos + ucharCount - killRing.lastYankSize, + buf32 + pos, sizeof(char32_t) * (len - pos + 1)); + } + pos += static_cast<int>(ucharCount - killRing.lastYankSize); + len += static_cast<int>(ucharCount - killRing.lastYankSize); + killRing.lastYankSize = ucharCount; + refreshLine(pi); + if (truncated) { + beep(); + } + break; + } + } + beep(); + break; + +#ifndef _WIN32 + case ctrlChar('Z'): // ctrl-Z, job control + disableRawMode(); // Returning to Linux (whatever) shell, leave raw + // mode + raise(SIGSTOP); // Break out in mid-line + enableRawMode(); // Back from Linux shell, re-enter raw mode + if (!pi.write()) break; // Redraw prompt + refreshLine(pi); // Refresh the line + break; +#endif + + // DEL, delete the character under the cursor + case 127: + case DELETE_KEY: + killRing.lastAction = KillRing::actionOther; + if (len > 0 && pos < len) { + historyRecallMostRecent = false; + memmove(buf32 + pos, buf32 + pos + 1, sizeof(char32_t) * (len - pos)); + --len; + refreshLine(pi); + } + break; + + case META + '<': // meta-<, beginning of history + case PAGE_UP_KEY: // Page Up, beginning of history + case META + '>': // meta->, end of history + case PAGE_DOWN_KEY: // Page Down, end of history + killRing.lastAction = KillRing::actionOther; + // if not already recalling, add the current line to the history list so + // we don't + // have to special case it + if (historyIndex == historyLen - 1) { + free(history[historyLen - 1]); + size_t tempBufferSize = sizeof(char32_t) * len + 1; + unique_ptr<char[]> tempBuffer(new char[tempBufferSize]); + copyString32to8(tempBuffer.get(), tempBufferSize, buf32); + history[historyLen - 1] = strdup8(tempBuffer.get()); + } + if (historyLen > 1) { + historyIndex = + (c == META + '<' || c == PAGE_UP_KEY) ? 0 : historyLen - 1; + historyPreviousIndex = -2; + historyRecallMostRecent = true; + size_t ucharCount = 0; + copyString8to32(buf32, buflen, ucharCount, history[historyIndex]); + len = pos = static_cast<int>(ucharCount); + refreshLine(pi); + } + break; + + // not one of our special characters, maybe insert it in the buffer + default: + killRing.lastAction = KillRing::actionOther; + historyRecallMostRecent = false; + if (c & (META | CTRL)) { // beep on unknown Ctrl and/or Meta keys + beep(); + break; + } + if (len < buflen) { + if (isControlChar(c)) { // don't insert control characters + beep(); + break; + } + if (len == pos) { // at end of buffer + buf32[pos] = c; + ++pos; + ++len; + buf32[len] = '\0'; + int inputLen = calculateColumnPosition(buf32, len); + if (pi.promptIndentation + inputLen < pi.promptScreenColumns) { + if (inputLen > pi.promptPreviousInputLen) + pi.promptPreviousInputLen = inputLen; + /* Avoid a full update of the line in the + * trivial case. */ + if (write32(1, reinterpret_cast<char32_t*>(&c), 1) == -1) + return -1; + } else { + refreshLine(pi); + } + } else { // not at end of buffer, have to move characters to our + // right + memmove(buf32 + pos + 1, buf32 + pos, + sizeof(char32_t) * (len - pos)); + buf32[pos] = c; + ++len; + ++pos; + buf32[len] = '\0'; + refreshLine(pi); + } + } else { + beep(); // buffer is full, beep on new characters + } + break; + } + } + return len; +} + +static string preloadedBufferContents; // used with linenoisePreloadBuffer +static string preloadErrorMessage; + +/** + * linenoisePreloadBuffer provides text to be inserted into the command buffer + * + * the provided text will be processed to be usable and will be used to preload + * the input buffer on the next call to linenoise() + * + * @param preloadText text to begin with on the next call to linenoise() + */ +void linenoisePreloadBuffer(const char* preloadText) { + if (!preloadText) { + return; + } + int bufferSize = static_cast<int>(strlen(preloadText) + 1); + unique_ptr<char[]> tempBuffer(new char[bufferSize]); + strncpy(&tempBuffer[0], preloadText, bufferSize); + + // remove characters that won't display correctly + char* pIn = &tempBuffer[0]; + char* pOut = pIn; + bool controlsStripped = false; + bool whitespaceSeen = false; + while (*pIn) { + unsigned char c = + *pIn++; // we need unsigned so chars 0x80 and above are allowed + if ('\r' == c) { // silently skip CR + continue; + } + if ('\n' == c || '\t' == c) { // note newline or tab + whitespaceSeen = true; + continue; + } + if (isControlChar( + c)) { // remove other control characters, flag for message + controlsStripped = true; + *pOut++ = ' '; + continue; + } + if (whitespaceSeen) { // convert whitespace to a single space + *pOut++ = ' '; + whitespaceSeen = false; + } + *pOut++ = c; + } + *pOut = 0; + int processedLength = static_cast<int>(pOut - tempBuffer.get()); + bool lineTruncated = false; + if (processedLength > (LINENOISE_MAX_LINE - 1)) { + lineTruncated = true; + tempBuffer[LINENOISE_MAX_LINE - 1] = 0; + } + preloadedBufferContents = tempBuffer.get(); + if (controlsStripped) { + preloadErrorMessage += + " [Edited line: control characters were converted to spaces]\n"; + } + if (lineTruncated) { + preloadErrorMessage += " [Edited line: the line length was reduced from "; + char buf[128]; + snprintf(buf, sizeof(buf), "%d to %d]\n", processedLength, + (LINENOISE_MAX_LINE - 1)); + preloadErrorMessage += buf; + } +} + +/** + * linenoise is a readline replacement. + * + * call it with a prompt to display and it will return a line of input from the + * user + * + * @param prompt text of prompt to display to the user + * @return the returned string belongs to the caller on return and must be + * freed to prevent + * memory leaks + */ +char* linenoise(const char* prompt) { +#ifndef _WIN32 + gotResize = false; +#endif + if (isatty(STDIN_FILENO)) { // input is from a terminal + char32_t buf32[LINENOISE_MAX_LINE]; + char charWidths[LINENOISE_MAX_LINE]; + if (!preloadErrorMessage.empty()) { + printf("%s", preloadErrorMessage.c_str()); + fflush(stdout); + preloadErrorMessage.clear(); + } + PromptInfo pi(prompt, getScreenColumns()); + if (isUnsupportedTerm()) { + if (!pi.write()) return 0; + fflush(stdout); + if (preloadedBufferContents.empty()) { + unique_ptr<char[]> buf8(new char[LINENOISE_MAX_LINE]); + if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == NULL) { + return NULL; + } + size_t len = strlen(buf8.get()); + while (len && (buf8[len - 1] == '\n' || buf8[len - 1] == '\r')) { + --len; + buf8[len] = '\0'; + } + return strdup(buf8.get()); // caller must free buffer + } else { + char* buf8 = strdup(preloadedBufferContents.c_str()); + preloadedBufferContents.clear(); + return buf8; // caller must free buffer + } + } else { + if (enableRawMode() == -1) { + return NULL; + } + InputBuffer ib(buf32, charWidths, LINENOISE_MAX_LINE); + if (!preloadedBufferContents.empty()) { + ib.preloadBuffer(preloadedBufferContents.c_str()); + preloadedBufferContents.clear(); + } + int count = ib.getInputLine(pi); + disableRawMode(); + printf("\n"); + if (count == -1) { + return NULL; + } + size_t bufferSize = sizeof(char32_t) * ib.length() + 1; + unique_ptr<char[]> buf8(new char[bufferSize]); + copyString32to8(buf8.get(), bufferSize, buf32); + return strdup(buf8.get()); // caller must free buffer + } + } else { // input not from a terminal, we should work with piped input, i.e. + // redirected stdin + unique_ptr<char[]> buf8(new char[LINENOISE_MAX_LINE]); + if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == NULL) { + return NULL; + } + + // if fgets() gave us the newline, remove it + int count = static_cast<int>(strlen(buf8.get())); + if (count > 0 && buf8[count - 1] == '\n') { + --count; + buf8[count] = '\0'; + } + return strdup(buf8.get()); // caller must free buffer + } +} + +/* Register a callback function to be called for tab-completion. */ +void linenoiseSetCompletionCallback(linenoiseCompletionCallback* fn) { + completionCallback = fn; +} + +void linenoiseAddCompletion(linenoiseCompletions* lc, const char* str) { + lc->completionStrings.push_back(Utf32String(str)); +} + +int linenoiseHistoryAdd(const char* line) { + if (historyMaxLen == 0) { + return 0; + } + if (history == NULL) { + history = + reinterpret_cast<char8_t**>(malloc(sizeof(char8_t*) * historyMaxLen)); + if (history == NULL) { + return 0; + } + memset(history, 0, (sizeof(char*) * historyMaxLen)); + } + char8_t* linecopy = strdup8(line); + if (!linecopy) { + return 0; + } + + // convert newlines in multi-line code to spaces before storing + char8_t* p = linecopy; + while (*p) { + if (*p == '\n') { + *p = ' '; + } + ++p; + } + + // prevent duplicate history entries + if (historyLen > 0 && history[historyLen - 1] != nullptr && + strcmp(reinterpret_cast<char const*>(history[historyLen - 1]), + reinterpret_cast<char const*>(linecopy)) == 0) { + free(linecopy); + return 0; + } + + if (historyLen == historyMaxLen) { + free(history[0]); + memmove(history, history + 1, sizeof(char*) * (historyMaxLen - 1)); + --historyLen; + if (--historyPreviousIndex < -1) { + historyPreviousIndex = -2; + } + } + + history[historyLen] = linecopy; + ++historyLen; + return 1; +} + +int linenoiseHistorySetMaxLen(int len) { + if (len < 1) { + return 0; + } + if (history) { + int tocopy = historyLen; + char8_t** newHistory = + reinterpret_cast<char8_t**>(malloc(sizeof(char8_t*) * len)); + if (newHistory == NULL) { + return 0; + } + if (len < tocopy) { + tocopy = len; + } + memcpy(newHistory, history + historyMaxLen - tocopy, + sizeof(char8_t*) * tocopy); + free(history); + history = newHistory; + } + historyMaxLen = len; + if (historyLen > historyMaxLen) { + historyLen = historyMaxLen; + } + return 1; +} + +/* Fetch a line of the history by (zero-based) index. If the requested + * line does not exist, NULL is returned. The return value is a heap-allocated + * copy of the line, and the caller is responsible for de-allocating it. */ +char* linenoiseHistoryLine(int index) { + if (index < 0 || index >= historyLen) return NULL; + + return strdup(reinterpret_cast<char const*>(history[index])); +} + +/* Save the history in the specified file. On success 0 is returned + * otherwise -1 is returned. */ +int linenoiseHistorySave(const char* filename) { +#if _WIN32 + FILE* fp = fopen(filename, "wt"); +#else + int fd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR); + + if (fd < 0) { + return -1; + } + + FILE* fp = fdopen(fd, "wt"); +#endif + + if (fp == NULL) { + return -1; + } + + for (int j = 0; j < historyLen; ++j) { + if (history[j][0] != '\0') { + fprintf(fp, "%s\n", history[j]); + } + } + + fclose(fp); + + return 0; +} + +/* Load the history from the specified file. If the file does not exist + * zero is returned and no operation is performed. + * + * If the file exists and the operation succeeded 0 is returned, otherwise + * on error -1 is returned. */ +int linenoiseHistoryLoad(const char* filename) { + FILE* fp = fopen(filename, "rt"); + if (fp == NULL) { + return -1; + } + + char buf[LINENOISE_MAX_LINE]; + while (fgets(buf, LINENOISE_MAX_LINE, fp) != NULL) { + char* p = strchr(buf, '\r'); + if (!p) { + p = strchr(buf, '\n'); + } + if (p) { + *p = '\0'; + } + if (p != buf) { + linenoiseHistoryAdd(buf); + } + } + fclose(fp); + return 0; +} + +/* Set if to use or not the multi line mode. */ +/* note that this is a stub only, as linenoise-ng always multi-line */ +void linenoiseSetMultiLine(int) {} + +/* This special mode is used by linenoise in order to print scan codes + * on screen for debugging / development purposes. It is implemented + * by the linenoise_example program using the --keycodes option. */ +void linenoisePrintKeyCodes(void) { + char quit[4]; + + printf( + "Linenoise key codes debugging mode.\n" + "Press keys to see scan codes. Type 'quit' at any time to exit.\n"); + if (enableRawMode() == -1) return; + memset(quit, ' ', 4); + while (1) { + char c; + int nread; + +#if _WIN32 + nread = _read(STDIN_FILENO, &c, 1); +#else + nread = read(STDIN_FILENO, &c, 1); +#endif + if (nread <= 0) continue; + memmove(quit, quit + 1, sizeof(quit) - 1); /* shift string to left. */ + quit[sizeof(quit) - 1] = c; /* Insert current char on the right. */ + if (memcmp(quit, "quit", sizeof(quit)) == 0) break; + + printf("'%c' %02x (%d) (type quit to exit)\n", isprint(c) ? c : '?', (int)c, + (int)c); + printf("\r"); /* Go left edge manually, we are in raw mode. */ + fflush(stdout); + } + disableRawMode(); +} + +#ifndef _WIN32 +static void WindowSizeChanged(int) { + // do nothing here but setting this flag + gotResize = true; +} +#endif + +int linenoiseInstallWindowChangeHandler(void) { +#ifndef _WIN32 + struct sigaction sa; + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; + sa.sa_handler = &WindowSizeChanged; + + if (sigaction(SIGWINCH, &sa, nullptr) == -1) { + return errno; + } +#endif + return 0; +} + +int linenoiseKeyType(void) { + return keyType; +} diff --git a/src/linenoise/linenoise.h b/src/linenoise/linenoise.h index ed20232c576e..3a8eb9f7ee63 100644 --- a/src/linenoise/linenoise.h +++ b/src/linenoise/linenoise.h @@ -1,70 +1,70 @@ -/* linenoise.h -- VERSION 1.0 - * - * Guerrilla line editing library against the idea that a line editing lib - * needs to be 20,000 lines of C code. +/* linenoise.h -- guerrilla line editing library against the idea that a + * line editing lib needs to be 20,000 lines of C code. * * See linenoise.c for more information. * - * ------------------------------------------------------------------------ - * - * Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com> - * Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com> + * Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com> + * Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * modification, are permitted provided that the following conditions are met: * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef __LINENOISE_H #define __LINENOISE_H +#define LINENOISE_VERSION "1.0.0" +#define LINENOISE_VERSION_MAJOR 1 +#define LINENOISE_VERSION_MINOR 1 + #ifdef __cplusplus extern "C" { #endif -typedef struct linenoiseCompletions { - size_t len; - char **cvec; -} linenoiseCompletions; +typedef struct linenoiseCompletions linenoiseCompletions; -typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *); -typedef char*(linenoiseHintsCallback)(const char *, int *color, int *bold); -typedef void(linenoiseFreeHintsCallback)(void *); -void linenoiseSetCompletionCallback(linenoiseCompletionCallback *); -void linenoiseSetHintsCallback(linenoiseHintsCallback *); -void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *); -void linenoiseAddCompletion(linenoiseCompletions *, const char *); +typedef void(linenoiseCompletionCallback)(const char*, linenoiseCompletions*); +void linenoiseSetCompletionCallback(linenoiseCompletionCallback* fn); +void linenoiseAddCompletion(linenoiseCompletions* lc, const char* str); -char *linenoise(const char *prompt); -void linenoiseFree(void *ptr); -int linenoiseHistoryAdd(const char *line); +char* linenoise(const char* prompt); +void linenoisePreloadBuffer(const char* preloadText); +int linenoiseHistoryAdd(const char* line); int linenoiseHistorySetMaxLen(int len); -int linenoiseHistorySave(const char *filename); -int linenoiseHistoryLoad(const char *filename); +char* linenoiseHistoryLine(int index); +int linenoiseHistorySave(const char* filename); +int linenoiseHistoryLoad(const char* filename); +void linenoiseHistoryFree(void); void linenoiseClearScreen(void); void linenoiseSetMultiLine(int ml); void linenoisePrintKeyCodes(void); +/* the following are extensions to the original linenoise API */ +int linenoiseInstallWindowChangeHandler(void); +/* returns type of key pressed: 1 = CTRL-C, 2 = CTRL-D, 0 = other */ +int linenoiseKeyType(void); #ifdef __cplusplus } diff --git a/src/linenoise/wcwidth.cpp b/src/linenoise/wcwidth.cpp new file mode 100644 index 000000000000..deec0ba6b57f --- /dev/null +++ b/src/linenoise/wcwidth.cpp @@ -0,0 +1,315 @@ +/* + * This is an implementation of wcwidth() and wcswidth() (defined in + * IEEE Std 1002.1-2001) for Unicode. + * + * http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html + * http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html + * + * In fixed-width output devices, Latin characters all occupy a single + * "cell" position of equal width, whereas ideographic CJK characters + * occupy two such cells. Interoperability between terminal-line + * applications and (teletype-style) character terminals using the + * UTF-8 encoding requires agreement on which character should advance + * the cursor by how many cell positions. No established formal + * standards exist at present on which Unicode character shall occupy + * how many cell positions on character terminals. These routines are + * a first attempt of defining such behavior based on simple rules + * applied to data provided by the Unicode Consortium. + * + * For some graphical characters, the Unicode standard explicitly + * defines a character-cell width via the definition of the East Asian + * FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes. + * In all these cases, there is no ambiguity about which width a + * terminal shall use. For characters in the East Asian Ambiguous (A) + * class, the width choice depends purely on a preference of backward + * compatibility with either historic CJK or Western practice. + * Choosing single-width for these characters is easy to justify as + * the appropriate long-term solution, as the CJK practice of + * displaying these characters as double-width comes from historic + * implementation simplicity (8-bit encoded characters were displayed + * single-width and 16-bit ones double-width, even for Greek, + * Cyrillic, etc.) and not any typographic considerations. + * + * Much less clear is the choice of width for the Not East Asian + * (Neutral) class. Existing practice does not dictate a width for any + * of these characters. It would nevertheless make sense + * typographically to allocate two character cells to characters such + * as for instance EM SPACE or VOLUME INTEGRAL, which cannot be + * represented adequately with a single-width glyph. The following + * routines at present merely assign a single-cell width to all + * neutral characters, in the interest of simplicity. This is not + * entirely satisfactory and should be reconsidered before + * establishing a formal standard in this area. At the moment, the + * decision which Not East Asian (Neutral) characters should be + * represented by double-width glyphs cannot yet be answered by + * applying a simple rule from the Unicode database content. Setting + * up a proper standard for the behavior of UTF-8 character terminals + * will require a careful analysis not only of each Unicode character, + * but also of each presentation form, something the author of these + * routines has avoided to do so far. + * + * http://www.unicode.org/unicode/reports/tr11/ + * + * Markus Kuhn -- 2007-05-26 (Unicode 5.0) + * + * Permission to use, copy, modify, and distribute this software + * for any purpose and without fee is hereby granted. The author + * disclaims all warranties with regard to this software. + * + * Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c + */ + +#include <wchar.h> +#include <string> +#include <memory> + +namespace linenoise_ng { + +struct interval { + char32_t first; + char32_t last; +}; + +/* auxiliary function for binary search in interval table */ +static int bisearch(char32_t ucs, const struct interval *table, int max) { + int min = 0; + int mid; + + if (ucs < table[0].first || ucs > table[max].last) + return 0; + while (max >= min) { + mid = (min + max) / 2; + if (ucs > table[mid].last) + min = mid + 1; + else if (ucs < table[mid].first) + max = mid - 1; + else + return 1; + } + + return 0; +} + + +/* The following two functions define the column width of an ISO 10646 + * character as follows: + * + * - The null character (U+0000) has a column width of 0. + * + * - Other C0/C1 control characters and DEL will lead to a return + * value of -1. + * + * - Non-spacing and enclosing combining characters (general + * category code Mn or Me in the Unicode database) have a + * column width of 0. + * + * - SOFT HYPHEN (U+00AD) has a column width of 1. + * + * - Other format characters (general category code Cf in the Unicode + * database) and ZERO WIDTH SPACE (U+200B) have a column width of 0. + * + * - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF) + * have a column width of 0. + * + * - Spacing characters in the East Asian Wide (W) or East Asian + * Full-width (F) category as defined in Unicode Technical + * Report #11 have a column width of 2. + * + * - All remaining characters (including all printable + * ISO 8859-1 and WGL4 characters, Unicode control characters, + * etc.) have a column width of 1. + * + * This implementation assumes that wchar_t characters are encoded + * in ISO 10646. + */ + +int mk_wcwidth(char32_t ucs) +{ + /* sorted list of non-overlapping intervals of non-spacing characters */ + /* generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c" */ + static const struct interval combining[] = { + { 0x0300, 0x036F }, { 0x0483, 0x0486 }, { 0x0488, 0x0489 }, + { 0x0591, 0x05BD }, { 0x05BF, 0x05BF }, { 0x05C1, 0x05C2 }, + { 0x05C4, 0x05C5 }, { 0x05C7, 0x05C7 }, { 0x0600, 0x0603 }, + { 0x0610, 0x0615 }, { 0x064B, 0x065E }, { 0x0670, 0x0670 }, + { 0x06D6, 0x06E4 }, { 0x06E7, 0x06E8 }, { 0x06EA, 0x06ED }, + { 0x070F, 0x070F }, { 0x0711, 0x0711 }, { 0x0730, 0x074A }, + { 0x07A6, 0x07B0 }, { 0x07EB, 0x07F3 }, { 0x0901, 0x0902 }, + { 0x093C, 0x093C }, { 0x0941, 0x0948 }, { 0x094D, 0x094D }, + { 0x0951, 0x0954 }, { 0x0962, 0x0963 }, { 0x0981, 0x0981 }, + { 0x09BC, 0x09BC }, { 0x09C1, 0x09C4 }, { 0x09CD, 0x09CD }, + { 0x09E2, 0x09E3 }, { 0x0A01, 0x0A02 }, { 0x0A3C, 0x0A3C }, + { 0x0A41, 0x0A42 }, { 0x0A47, 0x0A48 }, { 0x0A4B, 0x0A4D }, + { 0x0A70, 0x0A71 }, { 0x0A81, 0x0A82 }, { 0x0ABC, 0x0ABC }, + { 0x0AC1, 0x0AC5 }, { 0x0AC7, 0x0AC8 }, { 0x0ACD, 0x0ACD }, + { 0x0AE2, 0x0AE3 }, { 0x0B01, 0x0B01 }, { 0x0B3C, 0x0B3C }, + { 0x0B3F, 0x0B3F }, { 0x0B41, 0x0B43 }, { 0x0B4D, 0x0B4D }, + { 0x0B56, 0x0B56 }, { 0x0B82, 0x0B82 }, { 0x0BC0, 0x0BC0 }, + { 0x0BCD, 0x0BCD }, { 0x0C3E, 0x0C40 }, { 0x0C46, 0x0C48 }, + { 0x0C4A, 0x0C4D }, { 0x0C55, 0x0C56 }, { 0x0CBC, 0x0CBC }, + { 0x0CBF, 0x0CBF }, { 0x0CC6, 0x0CC6 }, { 0x0CCC, 0x0CCD }, + { 0x0CE2, 0x0CE3 }, { 0x0D41, 0x0D43 }, { 0x0D4D, 0x0D4D }, + { 0x0DCA, 0x0DCA }, { 0x0DD2, 0x0DD4 }, { 0x0DD6, 0x0DD6 }, + { 0x0E31, 0x0E31 }, { 0x0E34, 0x0E3A }, { 0x0E47, 0x0E4E }, + { 0x0EB1, 0x0EB1 }, { 0x0EB4, 0x0EB9 }, { 0x0EBB, 0x0EBC }, + { 0x0EC8, 0x0ECD }, { 0x0F18, 0x0F19 }, { 0x0F35, 0x0F35 }, + { 0x0F37, 0x0F37 }, { 0x0F39, 0x0F39 }, { 0x0F71, 0x0F7E }, + { 0x0F80, 0x0F84 }, { 0x0F86, 0x0F87 }, { 0x0F90, 0x0F97 }, + { 0x0F99, 0x0FBC }, { 0x0FC6, 0x0FC6 }, { 0x102D, 0x1030 }, + { 0x1032, 0x1032 }, { 0x1036, 0x1037 }, { 0x1039, 0x1039 }, + { 0x1058, 0x1059 }, { 0x1160, 0x11FF }, { 0x135F, 0x135F }, + { 0x1712, 0x1714 }, { 0x1732, 0x1734 }, { 0x1752, 0x1753 }, + { 0x1772, 0x1773 }, { 0x17B4, 0x17B5 }, { 0x17B7, 0x17BD }, + { 0x17C6, 0x17C6 }, { 0x17C9, 0x17D3 }, { 0x17DD, 0x17DD }, + { 0x180B, 0x180D }, { 0x18A9, 0x18A9 }, { 0x1920, 0x1922 }, + { 0x1927, 0x1928 }, { 0x1932, 0x1932 }, { 0x1939, 0x193B }, + { 0x1A17, 0x1A18 }, { 0x1B00, 0x1B03 }, { 0x1B34, 0x1B34 }, + { 0x1B36, 0x1B3A }, { 0x1B3C, 0x1B3C }, { 0x1B42, 0x1B42 }, + { 0x1B6B, 0x1B73 }, { 0x1DC0, 0x1DCA }, { 0x1DFE, 0x1DFF }, + { 0x200B, 0x200F }, { 0x202A, 0x202E }, { 0x2060, 0x2063 }, + { 0x206A, 0x206F }, { 0x20D0, 0x20EF }, { 0x302A, 0x302F }, + { 0x3099, 0x309A }, { 0xA806, 0xA806 }, { 0xA80B, 0xA80B }, + { 0xA825, 0xA826 }, { 0xFB1E, 0xFB1E }, { 0xFE00, 0xFE0F }, + { 0xFE20, 0xFE23 }, { 0xFEFF, 0xFEFF }, { 0xFFF9, 0xFFFB }, + { 0x10A01, 0x10A03 }, { 0x10A05, 0x10A06 }, { 0x10A0C, 0x10A0F }, + { 0x10A38, 0x10A3A }, { 0x10A3F, 0x10A3F }, { 0x1D167, 0x1D169 }, + { 0x1D173, 0x1D182 }, { 0x1D185, 0x1D18B }, { 0x1D1AA, 0x1D1AD }, + { 0x1D242, 0x1D244 }, { 0xE0001, 0xE0001 }, { 0xE0020, 0xE007F }, + { 0xE0100, 0xE01EF } + }; + + /* test for 8-bit control characters */ + if (ucs == 0) + return 0; + if (ucs < 32 || (ucs >= 0x7f && ucs < 0xa0)) + return -1; + + /* binary search in table of non-spacing characters */ + if (bisearch(ucs, combining, + sizeof(combining) / sizeof(struct interval) - 1)) + return 0; + + /* if we arrive here, ucs is not a combining or C0/C1 control character */ + + return 1 + + (ucs >= 0x1100 && + (ucs <= 0x115f || /* Hangul Jamo init. consonants */ + ucs == 0x2329 || ucs == 0x232a || + (ucs >= 0x2e80 && ucs <= 0xa4cf && + ucs != 0x303f) || /* CJK ... Yi */ + (ucs >= 0xac00 && ucs <= 0xd7a3) || /* Hangul Syllables */ + (ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */ + (ucs >= 0xfe10 && ucs <= 0xfe19) || /* Vertical forms */ + (ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */ + (ucs >= 0xff00 && ucs <= 0xff60) || /* Fullwidth Forms */ + (ucs >= 0xffe0 && ucs <= 0xffe6) || + (ucs >= 0x20000 && ucs <= 0x2fffd) || + (ucs >= 0x30000 && ucs <= 0x3fffd))); +} + + +int mk_wcswidth(const char32_t* pwcs, size_t n) +{ + int w, width = 0; + + for (;*pwcs && n-- > 0; pwcs++) + if ((w = mk_wcwidth(*pwcs)) < 0) + return -1; + else + width += w; + + return width; +} + + +/* + * The following functions are the same as mk_wcwidth() and + * mk_wcswidth(), except that spacing characters in the East Asian + * Ambiguous (A) category as defined in Unicode Technical Report #11 + * have a column width of 2. This variant might be useful for users of + * CJK legacy encodings who want to migrate to UCS without changing + * the traditional terminal character-width behaviour. It is not + * otherwise recommended for general use. + */ +int mk_wcwidth_cjk(wchar_t ucs) +{ + /* sorted list of non-overlapping intervals of East Asian Ambiguous + * characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c" */ + static const struct interval ambiguous[] = { + { 0x00A1, 0x00A1 }, { 0x00A4, 0x00A4 }, { 0x00A7, 0x00A8 }, + { 0x00AA, 0x00AA }, { 0x00AE, 0x00AE }, { 0x00B0, 0x00B4 }, + { 0x00B6, 0x00BA }, { 0x00BC, 0x00BF }, { 0x00C6, 0x00C6 }, + { 0x00D0, 0x00D0 }, { 0x00D7, 0x00D8 }, { 0x00DE, 0x00E1 }, + { 0x00E6, 0x00E6 }, { 0x00E8, 0x00EA }, { 0x00EC, 0x00ED }, + { 0x00F0, 0x00F0 }, { 0x00F2, 0x00F3 }, { 0x00F7, 0x00FA }, + { 0x00FC, 0x00FC }, { 0x00FE, 0x00FE }, { 0x0101, 0x0101 }, + { 0x0111, 0x0111 }, { 0x0113, 0x0113 }, { 0x011B, 0x011B }, + { 0x0126, 0x0127 }, { 0x012B, 0x012B }, { 0x0131, 0x0133 }, + { 0x0138, 0x0138 }, { 0x013F, 0x0142 }, { 0x0144, 0x0144 }, + { 0x0148, 0x014B }, { 0x014D, 0x014D }, { 0x0152, 0x0153 }, + { 0x0166, 0x0167 }, { 0x016B, 0x016B }, { 0x01CE, 0x01CE }, + { 0x01D0, 0x01D0 }, { 0x01D2, 0x01D2 }, { 0x01D4, 0x01D4 }, + { 0x01D6, 0x01D6 }, { 0x01D8, 0x01D8 }, { 0x01DA, 0x01DA }, + { 0x01DC, 0x01DC }, { 0x0251, 0x0251 }, { 0x0261, 0x0261 }, + { 0x02C4, 0x02C4 }, { 0x02C7, 0x02C7 }, { 0x02C9, 0x02CB }, + { 0x02CD, 0x02CD }, { 0x02D0, 0x02D0 }, { 0x02D8, 0x02DB }, + { 0x02DD, 0x02DD }, { 0x02DF, 0x02DF }, { 0x0391, 0x03A1 }, + { 0x03A3, 0x03A9 }, { 0x03B1, 0x03C1 }, { 0x03C3, 0x03C9 }, + { 0x0401, 0x0401 }, { 0x0410, 0x044F }, { 0x0451, 0x0451 }, + { 0x2010, 0x2010 }, { 0x2013, 0x2016 }, { 0x2018, 0x2019 }, + { 0x201C, 0x201D }, { 0x2020, 0x2022 }, { 0x2024, 0x2027 }, + { 0x2030, 0x2030 }, { 0x2032, 0x2033 }, { 0x2035, 0x2035 }, + { 0x203B, 0x203B }, { 0x203E, 0x203E }, { 0x2074, 0x2074 }, + { 0x207F, 0x207F }, { 0x2081, 0x2084 }, { 0x20AC, 0x20AC }, + { 0x2103, 0x2103 }, { 0x2105, 0x2105 }, { 0x2109, 0x2109 }, + { 0x2113, 0x2113 }, { 0x2116, 0x2116 }, { 0x2121, 0x2122 }, + { 0x2126, 0x2126 }, { 0x212B, 0x212B }, { 0x2153, 0x2154 }, + { 0x215B, 0x215E }, { 0x2160, 0x216B }, { 0x2170, 0x2179 }, + { 0x2190, 0x2199 }, { 0x21B8, 0x21B9 }, { 0x21D2, 0x21D2 }, + { 0x21D4, 0x21D4 }, { 0x21E7, 0x21E7 }, { 0x2200, 0x2200 }, + { 0x2202, 0x2203 }, { 0x2207, 0x2208 }, { 0x220B, 0x220B }, + { 0x220F, 0x220F }, { 0x2211, 0x2211 }, { 0x2215, 0x2215 }, + { 0x221A, 0x221A }, { 0x221D, 0x2220 }, { 0x2223, 0x2223 }, + { 0x2225, 0x2225 }, { 0x2227, 0x222C }, { 0x222E, 0x222E }, + { 0x2234, 0x2237 }, { 0x223C, 0x223D }, { 0x2248, 0x2248 }, + { 0x224C, 0x224C }, { 0x2252, 0x2252 }, { 0x2260, 0x2261 }, + { 0x2264, 0x2267 }, { 0x226A, 0x226B }, { 0x226E, 0x226F }, + { 0x2282, 0x2283 }, { 0x2286, 0x2287 }, { 0x2295, 0x2295 }, + { 0x2299, 0x2299 }, { 0x22A5, 0x22A5 }, { 0x22BF, 0x22BF }, + { 0x2312, 0x2312 }, { 0x2460, 0x24E9 }, { 0x24EB, 0x254B }, + { 0x2550, 0x2573 }, { 0x2580, 0x258F }, { 0x2592, 0x2595 }, + { 0x25A0, 0x25A1 }, { 0x25A3, 0x25A9 }, { 0x25B2, 0x25B3 }, + { 0x25B6, 0x25B7 }, { 0x25BC, 0x25BD }, { 0x25C0, 0x25C1 }, + { 0x25C6, 0x25C8 }, { 0x25CB, 0x25CB }, { 0x25CE, 0x25D1 }, + { 0x25E2, 0x25E5 }, { 0x25EF, 0x25EF }, { 0x2605, 0x2606 }, + { 0x2609, 0x2609 }, { 0x260E, 0x260F }, { 0x2614, 0x2615 }, + { 0x261C, 0x261C }, { 0x261E, 0x261E }, { 0x2640, 0x2640 }, + { 0x2642, 0x2642 }, { 0x2660, 0x2661 }, { 0x2663, 0x2665 }, + { 0x2667, 0x266A }, { 0x266C, 0x266D }, { 0x266F, 0x266F }, + { 0x273D, 0x273D }, { 0x2776, 0x277F }, { 0xE000, 0xF8FF }, + { 0xFFFD, 0xFFFD }, { 0xF0000, 0xFFFFD }, { 0x100000, 0x10FFFD } + }; + + /* binary search in table of non-spacing characters */ + if (bisearch(ucs, ambiguous, + sizeof(ambiguous) / sizeof(struct interval) - 1)) + return 2; + + return mk_wcwidth(ucs); +} + + +int mk_wcswidth_cjk(const wchar_t *pwcs, size_t n) +{ + int w, width = 0; + + for (;*pwcs && n-- > 0; pwcs++) + if ((w = mk_wcwidth_cjk(*pwcs)) < 0) + return -1; + else + width += w; + + return width; +} + +} diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 21b0a18dd887..21d99878a518 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -141,7 +141,7 @@ void mainWrapped(int argc, char * * argv) else if (*arg == "--version") printVersion(myName); - else if (*arg == "--add-drv-link") + else if (*arg == "--add-drv-link" || *arg == "--indirect") ; // obsolete else if (*arg == "--no-out-link" || *arg == "--no-link") @@ -167,9 +167,6 @@ void mainWrapped(int argc, char * * argv) buildMode = bmRepair; } - else if (*arg == "--hash") - buildMode = bmHash; - else if (*arg == "--run-env") // obsolete runEnv = true; @@ -215,7 +212,7 @@ void mainWrapped(int argc, char * * argv) // read the shebang to understand which packages to read from. Since // this is handled via nix-shell -p, we wrap our ruby script execution // in ruby -e 'load' which ignores the shebangs. - envCommand = (format("exec %1% %2% -e 'load(\"%3%\") -- %4%") % execArgs % interpreter % script % joined.str()).str(); + envCommand = (format("exec %1% %2% -e 'load(\"%3%\")' -- %4%") % execArgs % interpreter % script % joined.str()).str(); } else { envCommand = (format("exec %1% %2% %3% %4%") % execArgs % interpreter % script % joined.str()).str(); } @@ -235,6 +232,8 @@ void mainWrapped(int argc, char * * argv) myArgs.parseCmdline(args); + initPlugins(); + if (packages && fromArgs) throw UsageError("'-p' and '-E' are mutually exclusive"); @@ -263,6 +262,8 @@ void mainWrapped(int argc, char * * argv) if (runEnv) setenv("IN_NIX_SHELL", pure ? "pure" : "impure", 1); + DrvInfos drvs; + /* Parse the expressions. */ std::vector<Expr *> exprs; @@ -270,18 +271,22 @@ void mainWrapped(int argc, char * * argv) exprs = {state.parseStdin()}; else for (auto i : left) { + auto absolute = i; + try { + absolute = canonPath(absPath(i), true); + } catch (Error e) {}; if (fromArgs) exprs.push_back(state.parseExprFromString(i, absPath("."))); + else if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?"))) + drvs.push_back(DrvInfo(state, store, absolute)); else /* If we're in a #! script, interpret filenames relative to the script. */ - exprs.push_back(state.parseExprFromFile(resolveExprPath(lookupFileArg(state, - inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i)))); + exprs.push_back(state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, + inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i))))); } /* Evaluate them into derivations. */ - DrvInfos drvs; - if (attrPaths.empty()) attrPaths = {""}; for (auto e : exprs) { @@ -349,7 +354,7 @@ void mainWrapped(int argc, char * * argv) // Build or fetch all dependencies of the derivation. for (const auto & input : drv.inputDrvs) if (std::all_of(envExclude.cbegin(), envExclude.cend(), [&](const string & exclude) { return !std::regex_search(input.first, std::regex(exclude)); })) - pathsToBuild.insert(input.first); + pathsToBuild.insert(makeDrvPathWithOutputs(input.first, input.second)); for (const auto & src : drv.inputSrcs) pathsToBuild.insert(src); diff --git a/src/nix-channel/local.mk b/src/nix-channel/local.mk index 49fc105c6f79..c14e8c359ca0 100644 --- a/src/nix-channel/local.mk +++ b/src/nix-channel/local.mk @@ -2,6 +2,6 @@ programs += nix-channel nix-channel_DIR := $(d) -nix-channel_LIBS = libmain libutil libformat libstore +nix-channel_LIBS = libmain libformat libstore libutil nix-channel_SOURCES := $(d)/nix-channel.cc diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 370f216abccd..55ebda438965 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -10,8 +10,8 @@ using namespace nix; typedef std::map<string,string> Channels; -static auto channels = Channels{}; -static auto channelsList = Path{}; +static Channels channels; +static Path channelsList; // Reads the list of channels. static void readChannels() @@ -52,7 +52,7 @@ static void addChannel(const string & url, const string & name) writeChannels(); } -static auto profile = Path{}; +static Path profile; // Remove a channel. static void removeChannel(const string & name) @@ -64,7 +64,7 @@ static void removeChannel(const string & name) runProgram(settings.nixBinDir + "/nix-env", true, { "--profile", profile, "--uninstall", name }); } -static auto nixDefExpr = Path{}; +static Path nixDefExpr; // Fetch Nix expressions and binary cache URLs from the subscribed channels. static void update(const StringSet & channelNames) @@ -74,7 +74,7 @@ static void update(const StringSet & channelNames) auto store = openStore(); // Download each channel. - auto exprs = Strings{}; + Strings exprs; for (const auto & channel : channels) { auto name = channel.first; auto url = channel.second; @@ -84,9 +84,9 @@ static void update(const StringSet & channelNames) // We want to download the url to a file to see if it's a tarball while also checking if we // got redirected in the process, so that we can grab the various parts of a nix channel // definition from a consistent location if the redirect changes mid-download. - auto effectiveUrl = string{}; + std::string effectiveUrl; auto dl = getDownloader(); - auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl); + auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl, 0); url = chomp(std::move(effectiveUrl)); // If the URL contains a version number, append it to the name @@ -99,9 +99,9 @@ static void update(const StringSet & channelNames) cname = cname + (string) match[1]; } - auto extraAttrs = string{}; + std::string extraAttrs; - auto unpacked = false; + bool unpacked = false; if (std::regex_search(filename, std::regex("\\.tar\\.(gz|bz2|xz)$"))) { runProgram(settings.nixBinDir + "/nix-build", false, { "--no-out-link", "--expr", "import <nix/unpack-channel.nix> " "{ name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; }" }); @@ -136,7 +136,7 @@ static void update(const StringSet & channelNames) // Unpack the channel tarballs into the Nix store and install them // into the channels profile. std::cerr << "unpacking channels...\n"; - auto envArgs = Strings{ "--profile", profile, "--file", "<nix/unpack-channel.nix>", "--install", "--from-expression" }; + Strings envArgs{ "--profile", profile, "--file", "<nix/unpack-channel.nix>", "--install", "--from-expression" }; for (auto & expr : exprs) envArgs.push_back(std::move(expr)); envArgs.push_back("--quiet"); @@ -162,23 +162,15 @@ int main(int argc, char ** argv) return handleExceptions(argv[0], [&]() { initNix(); - // Turn on caching in nix-prefetch-url. - auto channelCache = settings.nixStateDir + "/channel-cache"; - createDirs(channelCache); - setenv("NIX_DOWNLOAD_CACHE", channelCache.c_str(), 1); - // Figure out the name of the `.nix-channels' file to use auto home = getHome(); channelsList = home + "/.nix-channels"; nixDefExpr = home + "/.nix-defexpr"; // Figure out the name of the channels profile. - auto name = string{}; + ; auto pw = getpwuid(getuid()); - if (!pw) - name = getEnv("USER", ""); - else - name = pw->pw_name; + std::string name = pw ? pw->pw_name : getEnv("USER", ""); if (name.empty()) throw Error("cannot figure out user name"); profile = settings.nixStateDir + "/profiles/per-user/" + name + "/channels"; @@ -192,7 +184,7 @@ int main(int argc, char ** argv) cUpdate, cRollback } cmd = cNone; - auto args = std::vector<string>{}; + std::vector<string> args; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--help") { showManPage("nix-channel"); @@ -213,6 +205,9 @@ int main(int argc, char ** argv) } return true; }); + + initPlugins(); + switch (cmd) { case cNone: throw UsageError("no command specified"); @@ -221,7 +216,7 @@ int main(int argc, char ** argv) throw UsageError("'--add' requires one or two arguments"); { auto url = args[0]; - auto name = string{}; + std::string name; if (args.size() == 2) { name = args[1]; } else { @@ -250,7 +245,7 @@ int main(int argc, char ** argv) case cRollback: if (args.size() > 1) throw UsageError("'--rollback' has at most one argument"); - auto envArgs = Strings{"--profile", profile}; + Strings envArgs{"--profile", profile}; if (args.size() == 1) { envArgs.push_back("--switch-generation"); envArgs.push_back(args[0]); diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index cc663a96924d..37fe22f48134 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -77,6 +77,8 @@ int main(int argc, char * * argv) return true; }); + initPlugins(); + auto profilesDir = settings.nixStateDir + "/profiles"; if (removeOld) removeOldGenerations(profilesDir); diff --git a/src/nix-copy-closure/local.mk b/src/nix-copy-closure/local.mk index 42bb34dd8201..5018ab975b44 100644 --- a/src/nix-copy-closure/local.mk +++ b/src/nix-copy-closure/local.mk @@ -2,6 +2,6 @@ programs += nix-copy-closure nix-copy-closure_DIR := $(d) -nix-copy-closure_LIBS = libmain libutil libformat libstore +nix-copy-closure_LIBS = libmain libformat libstore libutil nix-copy-closure_SOURCES := $(d)/nix-copy-closure.cc diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index 861fc2e5cd64..dfb1b8fc5dc4 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -44,6 +44,8 @@ int main(int argc, char ** argv) return true; }); + initPlugins(); + if (sshHost.empty()) throw UsageError("no host name specified"); diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 5629cc64b96e..3dd8cec290cb 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -37,13 +37,13 @@ using namespace nix; static ssize_t splice(int fd_in, void *off_in, int fd_out, void *off_out, size_t len, unsigned int flags) { /* We ignore most parameters, we just have them for conformance with the linux syscall */ - char buf[8192]; - auto read_count = read(fd_in, buf, sizeof(buf)); + std::vector<char> buf(8192); + auto read_count = read(fd_in, buf.data(), buf.size()); if (read_count == -1) return read_count; auto write_count = decltype(read_count)(0); while (write_count < read_count) { - auto res = write(fd_out, buf + write_count, read_count - write_count); + auto res = write(fd_out, buf.data() + write_count, read_count - write_count); if (res == -1) return res; write_count += res; @@ -197,7 +197,8 @@ struct TunnelSource : BufferedSource { Source & from; TunnelSource(Source & from) : from(from) { } - size_t readUnbuffered(unsigned char * data, size_t len) +protected: + size_t readUnbuffered(unsigned char * data, size_t len) override { to << STDERR_READ << len; to.flush(); @@ -411,7 +412,7 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store, /* Repairing is not atomic, so disallowed for "untrusted" clients. */ if (mode == bmRepair && !trusted) - throw Error("repairing is not supported when building through the Nix daemon"); + throw Error("repairing is not allowed because you are not in 'trusted-users'"); } logger->startWork(); store->buildPaths(drvs, mode); @@ -695,7 +696,7 @@ static void performOp(TunnelLogger * logger, ref<LocalStore> store, parseDump(tee, tee.source); logger->startWork(); - store->addToStore(info, tee.source.data, (RepairFlag) repair, + store.cast<Store>()->addToStore(info, tee.source.data, (RepairFlag) repair, dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr); logger->stopWork(); break; @@ -816,8 +817,11 @@ static void processConnection(bool trusted) static void sigChldHandler(int sigNo) { + // Ensure we don't modify errno of whatever we've interrupted + auto saved_errno = errno; /* Reap all dead children. */ while (waitpid(-1, 0, WNOHANG) > 0) ; + errno = saved_errno; } @@ -994,7 +998,7 @@ static void daemonLoop(char * * argv) if (matchUser(user, group, trustedUsers)) trusted = true; - if (!trusted && !matchUser(user, group, allowedUsers)) + if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup) throw Error(format("user '%1%' is not allowed to connect to the Nix daemon") % user); printInfo(format((string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : "")) @@ -1032,7 +1036,7 @@ static void daemonLoop(char * * argv) }, options); } catch (Interrupted & e) { - throw; + return; } catch (Error & e) { printError(format("error processing connection: %1%") % e.msg()); } @@ -1060,6 +1064,8 @@ int main(int argc, char * * argv) return true; }); + initPlugins(); + if (stdio) { if (getStoreType() == tDaemon) { /* Forward on this connection to the real daemon */ diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 016caf6d2346..f60ff9e07182 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -198,13 +198,13 @@ static Path getDefNixExprPath() } -static int getPriority(EvalState & state, DrvInfo & drv) +static long getPriority(EvalState & state, DrvInfo & drv) { return drv.queryMetaInt("priority", 0); } -static int comparePriorities(EvalState & state, DrvInfo & drv1, DrvInfo & drv2) +static long comparePriorities(EvalState & state, DrvInfo & drv1, DrvInfo & drv2) { return getPriority(state, drv2) - getPriority(state, drv1); } @@ -270,7 +270,7 @@ static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems, for (auto & j : matches) { DrvName drvName(j.first.queryName()); - int d = 1; + long d = 1; Newest::iterator k = newest.find(drvName.name); @@ -578,7 +578,7 @@ static void upgradeDerivations(Globals & globals, (upgradeType == utEq && d == 0) || upgradeType == utAlways) { - int d2 = -1; + long d2 = -1; if (bestElem != availElems.end()) { d2 = comparePriorities(*globals.state, *bestElem, *j); if (d2 == 0) d2 = compareVersions(bestVersion, newName.version); @@ -784,22 +784,22 @@ typedef list<Strings> Table; void printTable(Table & table) { - unsigned int nrColumns = table.size() > 0 ? table.front().size() : 0; + auto nrColumns = table.size() > 0 ? table.front().size() : 0; - vector<unsigned int> widths; + vector<size_t> widths; widths.resize(nrColumns); for (auto & i : table) { assert(i.size() == nrColumns); Strings::iterator j; - unsigned int column; + size_t column; for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) if (j->size() > widths[column]) widths[column] = j->size(); } for (auto & i : table) { Strings::iterator j; - unsigned int column; + size_t column; for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) { string s = *j; replace(s.begin(), s.end(), '\n', ' '); @@ -1393,6 +1393,8 @@ int main(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (!op) throw UsageError("no operation specified"); auto store = openStore(); diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 55ac007e8682..5049460c7544 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -70,7 +70,7 @@ void processExpr(EvalState & state, const Strings & attrPaths, if (gcRoot == "") printGCWarning(); else { - Path rootName = gcRoot; + Path rootName = indirectRoot ? absPath(gcRoot) : gcRoot; if (++rootNr > 1) rootName += "-" + std::to_string(rootNr); auto store2 = state.store.dynamic_pointer_cast<LocalFSStore>(); if (store2) @@ -151,6 +151,8 @@ int main(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (evalOnly && !wantsReadWrite) settings.readOnlyMode = true; @@ -182,7 +184,7 @@ int main(int argc, char * * argv) for (auto & i : files) { Expr * e = fromArgs ? state.parseExprFromString(i, absPath(".")) - : state.parseExprFromFile(resolveExprPath(lookupFileArg(state, i))); + : state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, i)))); processExpr(state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); } diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc index fef3eaa45538..fa7ee254500c 100644 --- a/src/nix-prefetch-url/nix-prefetch-url.cc +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -89,6 +89,8 @@ int main(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (args.size() > 2) throw UsageError("too many arguments"); diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc index 51dedcf0a092..abdfa5e58f93 100644 --- a/src/nix-store/dotgraph.cc +++ b/src/nix-store/dotgraph.cc @@ -47,8 +47,7 @@ static string makeNode(const string & id, const string & label, static string symbolicName(const string & path) { string p = baseNameOf(path); - int dash = p.find('-'); - return string(p, dash + 1); + return string(p, p.find('-') + 1); } diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index f6f276dd1798..e1e27ceef94d 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -122,7 +122,6 @@ static void opRealise(Strings opFlags, Strings opArgs) if (i == "--dry-run") dryRun = true; else if (i == "--repair") buildMode = bmRepair; else if (i == "--check") buildMode = bmCheck; - else if (i == "--hash") buildMode = bmHash; else if (i == "--ignore-unknown") ignoreUnknown = true; else throw UsageError(format("unknown flag '%1%'") % i); @@ -632,6 +631,7 @@ static void opDump(Strings opFlags, Strings opArgs) FdSink sink(STDOUT_FILENO); string path = *opArgs.begin(); dumpPath(path, sink); + sink.flush(); } @@ -657,6 +657,7 @@ static void opExport(Strings opFlags, Strings opArgs) FdSink sink(STDOUT_FILENO); store->exportPaths(opArgs, sink); + sink.flush(); } @@ -1053,6 +1054,8 @@ int main(int argc, char * * argv) return true; }); + initPlugins(); + if (!op) throw UsageError("no operation specified"); if (op != opDump && op != opRestore) /* !!! hack */ diff --git a/src/nix/build.cc b/src/nix/build.cc index f7c99f12dbbf..b329ac38ac2b 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -50,7 +50,9 @@ struct CmdBuild : MixDryRun, InstallablesCommand void run(ref<Store> store) override { - auto buildables = toBuildables(store, dryRun ? DryRun : Build, installables); + auto buildables = build(store, dryRun ? DryRun : Build, installables); + + if (dryRun) return; for (size_t i = 0; i < buildables.size(); ++i) { auto & b(buildables[i]); diff --git a/src/nix/command.cc b/src/nix/command.cc index 1e6f0d2bb75d..3d7d582d6f5e 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -57,8 +57,10 @@ void MultiCommand::printHelp(const string & programName, std::ostream & out) } printTable(out, table); +#if 0 out << "\n"; out << "For full documentation, run 'man " << programName << "' or 'man " << programName << "-<COMMAND>'.\n"; +#endif } bool MultiCommand::processFlag(Strings::iterator & pos, Strings::iterator end) diff --git a/src/nix/command.hh b/src/nix/command.hh index daa3b3fa7030..97a6fee7fd27 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -5,8 +5,10 @@ namespace nix { +extern std::string programPath; + struct Value; -struct Bindings; +class Bindings; class EvalState; /* A command is an argument parser that can be executed by calling its @@ -196,7 +198,7 @@ std::shared_ptr<Installable> parseInstallable( SourceExprCommand & cmd, ref<Store> store, const std::string & installable, bool useDefaultInstallables); -Buildables toBuildables(ref<Store> store, RealiseMode mode, +Buildables build(ref<Store> store, RealiseMode mode, std::vector<std::shared_ptr<Installable>> installables); PathSet toStorePaths(ref<Store> store, RealiseMode mode, diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 2ddea9e70a6a..e4e6c3e303ed 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -57,16 +57,22 @@ struct CmdCopy : StorePathsCommand return { Example{ "To copy Firefox from the local store to a binary cache in file:///tmp/cache:", - "nix copy --to file:///tmp/cache -r $(type -p firefox)" + "nix copy --to file:///tmp/cache $(type -p firefox)" }, Example{ "To copy the entire current NixOS system closure to another machine via SSH:", - "nix copy --to ssh://server -r /run/current-system" + "nix copy --to ssh://server /run/current-system" }, Example{ "To copy a closure from another machine via SSH:", - "nix copy --from ssh://server -r /nix/store/a6cnl93nk1wxnq84brbbwr6hxw9gp2w9-blender-2.79-rc2" + "nix copy --from ssh://server /nix/store/a6cnl93nk1wxnq84brbbwr6hxw9gp2w9-blender-2.79-rc2" }, +#ifdef ENABLE_S3 + Example{ + "To populate the current folder build output to a S3 binary cache:", + "nix copy --to s3://my-bucket?region=eu-west-1" + }, +#endif }; } diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index 1a1866437b07..f411c0cb7c89 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -29,6 +29,7 @@ struct CmdDumpPath : StorePathCommand { FdSink sink(STDOUT_FILENO); store->narFromPath(storePath, sink); + sink.flush(); } }; diff --git a/src/nix/edit.cc b/src/nix/edit.cc index 127be321eee2..c9671f76d0fa 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -52,11 +52,16 @@ struct CmdEdit : InstallableCommand throw Error("cannot parse meta.position attribute '%s'", pos); std::string filename(pos, 0, colon); - int lineno = std::stoi(std::string(pos, colon + 1)); + int lineno; + try { + lineno = std::stoi(std::string(pos, colon + 1)); + } catch (std::invalid_argument e) { + throw Error("cannot parse line number '%s'", pos); + } auto editor = getEnv("EDITOR", "cat"); - Strings args{editor}; + auto args = tokenizeString<Strings>(editor); if (editor.find("emacs") != std::string::npos || editor.find("nano") != std::string::npos || @@ -67,7 +72,7 @@ struct CmdEdit : InstallableCommand stopProgressBar(); - execvp(editor.c_str(), stringsToCharPtrs(args).data()); + execvp(args.front().c_str(), stringsToCharPtrs(args).data()); throw SysError("cannot run editor '%s'", editor); } diff --git a/src/nix/eval.cc b/src/nix/eval.cc index e22128692630..b7058361cbec 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -5,10 +5,11 @@ #include "eval.hh" #include "json.hh" #include "value-to-json.hh" +#include "progress-bar.hh" using namespace nix; -struct CmdEval : MixJSON, InstallablesCommand +struct CmdEval : MixJSON, InstallableCommand { bool raw = false; @@ -42,6 +43,10 @@ struct CmdEval : MixJSON, InstallablesCommand "To get the current version of Nixpkgs:", "nix eval --raw nixpkgs.lib.nixpkgsVersion" }, + Example{ + "To print the store path of the Hello package:", + "nix eval --raw nixpkgs.hello" + }, }; } @@ -52,20 +57,19 @@ struct CmdEval : MixJSON, InstallablesCommand auto state = getEvalState(); - auto jsonOut = json ? std::make_unique<JSONList>(std::cout) : nullptr; + auto v = installable->toValue(*state); + PathSet context; + + stopProgressBar(); - for (auto & i : installables) { - auto v = i->toValue(*state); - if (raw) { - std::cout << state->forceString(*v); - } else if (json) { - PathSet context; - auto jsonElem = jsonOut->placeholder(); - printValueAsJSON(*state, true, *v, jsonElem, context); - } else { - state->forceValueDeep(*v); - std::cout << *v << "\n"; - } + if (raw) { + std::cout << state->coerceToString(noPos, *v, context); + } else if (json) { + JSONPlaceholder jsonOut(std::cout); + printValueAsJSON(*state, true, *v, jsonOut, context); + } else { + state->forceValueDeep(*v); + std::cout << *v << "\n"; } } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index ae93c4ef649e..a3fdd8a2808d 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -30,10 +30,8 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) vSourceExpr = state.allocValue(); - if (file != "") { - Expr * e = state.parseExprFromFile(resolveExprPath(lookupFileArg(state, file))); - state.eval(e, *vSourceExpr); - } + if (file != "") + state.evalFile(lookupFileArg(state, file), *vSourceExpr); else { @@ -255,7 +253,7 @@ std::shared_ptr<Installable> parseInstallable( return installables.front(); } -Buildables toBuildables(ref<Store> store, RealiseMode mode, +Buildables build(ref<Store> store, RealiseMode mode, std::vector<std::shared_ptr<Installable>> installables) { if (mode != Build) @@ -293,7 +291,7 @@ PathSet toStorePaths(ref<Store> store, RealiseMode mode, { PathSet outPaths; - for (auto & b : toBuildables(store, mode, installables)) + for (auto & b : build(store, mode, installables)) for (auto & output : b.outputs) outPaths.insert(output.second); diff --git a/src/nix/local.mk b/src/nix/local.mk index c7d2d328aab5..f76da194467c 100644 --- a/src/nix/local.mk +++ b/src/nix/local.mk @@ -2,8 +2,10 @@ programs += nix nix_DIR := $(d) -nix_SOURCES := $(wildcard $(d)/*.cc) src/linenoise/linenoise.c +nix_SOURCES := $(wildcard $(d)/*.cc) $(wildcard src/linenoise/*.cpp) nix_LIBS = libexpr libmain libstore libutil libformat +nix_LDFLAGS = -pthread + $(eval $(call install-symlink, nix, $(bindir)/nix-hash)) diff --git a/src/nix/log.cc b/src/nix/log.cc index 966ad8b65087..f07ec4e93a16 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -50,6 +50,7 @@ struct CmdLog : InstallableCommand auto b = installable->toBuildable(); + RunPager pager; for (auto & sub : subs) { auto log = b.drvPath != "" ? sub->getBuildLog(b.drvPath) : nullptr; for (auto & output : b.outputs) { diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 5a5fa8f62d92..e99622faf472 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -2,10 +2,12 @@ #include "store-api.hh" #include "fs-accessor.hh" #include "nar-accessor.hh" +#include "common-args.hh" +#include "json.hh" using namespace nix; -struct MixLs : virtual Args +struct MixLs : virtual Args, MixJSON { std::string path; @@ -20,7 +22,7 @@ struct MixLs : virtual Args mkFlag('d', "directory", "show directories rather than their contents", &showDirectory); } - void list(ref<FSAccessor> accessor) + void listText(ref<FSAccessor> accessor) { std::function<void(const FSAccessor::Stat &, const Path &, const std::string &, bool)> doPath; @@ -61,10 +63,6 @@ struct MixLs : virtual Args showFile(curPath, relPath); }; - if (path == "/") { - path = ""; - } - auto st = accessor->stat(path); if (st.type == FSAccessor::Type::tMissing) throw Error(format("path '%1%' does not exist") % path); @@ -72,6 +70,17 @@ struct MixLs : virtual Args st.type == FSAccessor::Type::tDirectory ? "." : baseNameOf(path), showDirectory); } + + void list(ref<FSAccessor> accessor) + { + if (path == "/") path = ""; + + if (json) { + JSONPlaceholder jsonRoot(std::cout); + listNar(jsonRoot, accessor, path, recursive); + } else + listText(accessor); + } }; struct CmdLsStore : StoreCommand, MixLs @@ -81,6 +90,16 @@ struct CmdLsStore : StoreCommand, MixLs expectArg("path", &path); } + Examples examples() override + { + return { + Example{ + "To list the contents of a store path in a binary cache:", + "nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10" + }, + }; + } + std::string name() override { return "ls-store"; @@ -107,6 +126,16 @@ struct CmdLsNar : Command, MixLs expectArg("path", &path); } + Examples examples() override + { + return { + Example{ + "To list a specific file in a NAR:", + "nix ls-nar -l hello.nar /bin/hello" + }, + }; + } + std::string name() override { return "ls-nar"; diff --git a/src/nix/main.cc b/src/nix/main.cc index 060402cd08d5..bb107ec7d3f6 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -16,6 +16,8 @@ void chrootHelper(int argc, char * * argv); namespace nix { +std::string programPath; + struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { NixArgs() : MultiCommand(*RegisterCommand::commands), MixCommonArgs("nix") @@ -43,10 +45,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .longName("version") .description("show version information") .handler([&]() { printVersion(programName); }); - - std::string cat = "config"; - settings.convertToArgs(*this, cat); - hiddenCategories.insert(cat); } void printFlags(std::ostream & out) override @@ -82,7 +80,8 @@ void mainWrapped(int argc, char * * argv) initNix(); initGC(); - string programName = baseNameOf(argv[0]); + programPath = argv[0]; + string programName = baseNameOf(programPath); { auto legacy = (*RegisterLegacyCommand::commands)[programName]; @@ -93,6 +92,8 @@ void mainWrapped(int argc, char * * argv) args.parseCmdline(argvToStrings(argc, argv)); + initPlugins(); + if (!args.command) args.showHelpAndExit(); Finally f([]() { stopProgressBar(); }); diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index ca02a4c929be..47caa401d3c9 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -65,7 +65,7 @@ struct CmdPathInfo : StorePathsCommand, MixJSON pathLen = std::max(pathLen, storePath.size()); if (json) { - JSONPlaceholder jsonRoot(std::cout, true); + JSONPlaceholder jsonRoot(std::cout); store->pathInfoToJSON(jsonRoot, // FIXME: preserve order? PathSet(storePaths.begin(), storePaths.end()), diff --git a/src/nix/ping-store.cc b/src/nix/ping-store.cc new file mode 100644 index 000000000000..310942574a2a --- /dev/null +++ b/src/nix/ping-store.cc @@ -0,0 +1,35 @@ +#include "command.hh" +#include "shared.hh" +#include "store-api.hh" + +using namespace nix; + +struct CmdPingStore : StoreCommand +{ + std::string name() override + { + return "ping-store"; + } + + std::string description() override + { + return "test whether a store can be opened"; + } + + Examples examples() override + { + return { + Example{ + "To test whether connecting to a remote Nix store via SSH works:", + "nix ping-store --store ssh://mac1" + }, + }; + } + + void run(ref<Store> store) override + { + store->connect(); + } +}; + +static RegisterCommand r1(make_ref<CmdPingStore>()); diff --git a/src/nix/progress-bar.cc b/src/nix/progress-bar.cc index fb9955190b40..40b905ba3243 100644 --- a/src/nix/progress-bar.cc +++ b/src/nix/progress-bar.cc @@ -3,8 +3,9 @@ #include "sync.hh" #include "store-api.hh" -#include <map> #include <atomic> +#include <map> +#include <thread> namespace nix { @@ -22,44 +23,6 @@ static uint64_t getI(const std::vector<Logger::Field> & fields, size_t n) return fields[n].i; } -/* Truncate a string to 'width' printable characters. ANSI escape - sequences are copied but not included in the character count. Also, - tabs are expanded to spaces. */ -static std::string ansiTruncate(const std::string & s, int width) -{ - if (width <= 0) return s; - - std::string t; - size_t w = 0; - auto i = s.begin(); - - while (w < (size_t) width && i != s.end()) { - if (*i == '\e') { - t += *i++; - if (i != s.end() && *i == '[') { - t += *i++; - while (i != s.end() && (*i < 0x40 || *i > 0x7e)) { - t += *i++; - } - if (i != s.end()) t += *i++; - } - } - - else if (*i == '\t') { - t += ' '; w++; - while (w < (size_t) width && w & 8) { - t += ' '; w++; - } - } - - else { - t += *i++; w++; - } - } - - return t; -} - class ProgressBar : public Logger { private: @@ -101,15 +64,28 @@ private: Sync<State> state_; + std::thread updateThread; + + std::condition_variable quitCV, updateCV; + public: ProgressBar() { + updateThread = std::thread([&]() { + auto state(state_.lock()); + while (state->active) { + state.wait(updateCV); + draw(*state); + state.wait_for(quitCV, std::chrono::milliseconds(50)); + } + }); } ~ProgressBar() { stop(); + updateThread.join(); } void stop() @@ -121,6 +97,8 @@ public: writeToStderr("\r\e[K"); if (status != "") writeToStderr("[" + status + "]\n"); + updateCV.notify_one(); + quitCV.notify_one(); } void log(Verbosity lvl, const FormatOrString & fs) override @@ -132,7 +110,7 @@ public: void log(State & state, Verbosity lvl, const std::string & s) { writeToStderr("\r\e[K" + s + ANSI_NORMAL "\n"); - update(state); + draw(state); } void startActivity(ActivityId act, Verbosity lvl, ActivityType type, @@ -167,7 +145,12 @@ public: if (type == actSubstitute) { auto name = storePathToName(getS(fields, 0)); - i->s = fmt("fetching " ANSI_BOLD "%s" ANSI_NORMAL " from %s", name, getS(fields, 1)); + auto sub = getS(fields, 1); + i->s = fmt( + hasPrefix(sub, "local") + ? "copying " ANSI_BOLD "%s" ANSI_NORMAL " from %s" + : "fetching " ANSI_BOLD "%s" ANSI_NORMAL " from %s", + name, sub); } if (type == actQueryPathInfo) { @@ -180,7 +163,7 @@ public: || (type == actCopyPath && hasAncestor(*state, actSubstitute, parent))) i->visible = false; - update(*state); + update(); } /* Check whether an activity has an ancestore with the specified @@ -215,7 +198,7 @@ public: state->its.erase(i); } - update(*state); + update(); } void result(ActivityId act, ResultType type, const std::vector<Field> & fields) override @@ -225,7 +208,7 @@ public: if (type == resFileLinked) { state->filesLinked++; state->bytesLinked += getI(fields, 0); - update(*state); + update(); } else if (type == resBuildLogLine) { @@ -238,25 +221,25 @@ public: info.lastLine = lastLine; state->activities.emplace_back(info); i->second = std::prev(state->activities.end()); - update(*state); + update(); } } else if (type == resUntrustedPath) { state->untrustedPaths++; - update(*state); + update(); } else if (type == resCorruptedPath) { state->corruptedPaths++; - update(*state); + update(); } else if (type == resSetPhase) { auto i = state->its.find(act); assert(i != state->its.end()); i->second->phase = getS(fields, 0); - update(*state); + update(); } else if (type == resProgress) { @@ -267,7 +250,7 @@ public: actInfo.expected = getI(fields, 1); actInfo.running = getI(fields, 2); actInfo.failed = getI(fields, 3); - update(*state); + update(); } else if (type == resSetExpected) { @@ -279,17 +262,16 @@ public: state->activitiesByType[type].expected -= j; j = getI(fields, 1); state->activitiesByType[type].expected += j; - update(*state); + update(); } } void update() { - auto state(state_.lock()); - update(*state); + updateCV.notify_one(); } - void update(State & state) + void draw(State & state) { if (!state.active) return; @@ -323,7 +305,10 @@ public: } } - writeToStderr("\r" + ansiTruncate(line, getWindowSize().second) + "\e[K"); + auto width = getWindowSize().second; + if (width <= 0) std::numeric_limits<decltype(width)>::max(); + + writeToStderr("\r" + filterANSIEscapes(line, false, width) + "\e[K"); } std::string getStatus(State & state) diff --git a/src/nix/repl.cc b/src/nix/repl.cc index 28a8ebc8c499..f84774a53367 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -185,9 +185,20 @@ void NixRepl::mainLoop(const std::vector<std::string> & files) bool NixRepl::getLine(string & input, const std::string &prompt) { char * s = linenoise(prompt.c_str()); - Finally doFree([&]() { linenoiseFree(s); }); - if (!s) return false; + Finally doFree([&]() { free(s); }); + if (!s) { + switch (auto type = linenoiseKeyType()) { + case 1: // ctrl-C + input = ""; + return true; + case 2: // ctrl-D + return false; + default: + throw Error(format("Unexpected linenoise keytype: %1%") % type); + } + } input += s; + input += '\n'; return true; } diff --git a/src/nix/run.cc b/src/nix/run.cc index 6657a86314bf..d04e106e037b 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -16,8 +16,6 @@ using namespace nix; std::string chrootHelperName = "__run_in_chroot"; -extern char * * environ; - struct CmdRun : InstallablesCommand { std::vector<std::string> command = { "bash" }; @@ -30,8 +28,8 @@ struct CmdRun : InstallablesCommand .longName("command") .shortName('c') .description("command and arguments to be executed; defaults to 'bash'") - .arity(ArityAny) .labels({"command", "args"}) + .arity(ArityAny) .handler([&](std::vector<std::string> ss) { if (ss.empty()) throw UsageError("--command requires at least one argument"); command = ss; @@ -85,6 +83,10 @@ struct CmdRun : InstallablesCommand "To run GNU Hello:", "nix run nixpkgs.hello -c hello --greeting 'Hi everybody!'" }, + Example{ + "To run GNU Hello in a chroot store:", + "nix run --store ~/my-nix nixpkgs.hello -c hello" + }, }; } @@ -105,7 +107,7 @@ struct CmdRun : InstallablesCommand if (s) kept[var] = s; } - environ = nullptr; + clearEnv(); for (auto & var : kept) setenv(var.first.c_str(), var.second.c_str(), 1); @@ -184,7 +186,7 @@ void chrootHelper(int argc, char * * argv) but that doesn't work in a user namespace yet (Ubuntu has a patch for this: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1478578). */ - if (true /* !pathExists(storeDir) */) { + if (!pathExists(storeDir)) { // FIXME: Use overlayfs? Path tmpDir = createTempDir(); @@ -195,12 +197,15 @@ void chrootHelper(int argc, char * * argv) throw SysError("mounting '%s' on '%s'", realStoreDir, storeDir); for (auto entry : readDirectory("/")) { + auto src = "/" + entry.name; + auto st = lstat(src); + if (!S_ISDIR(st.st_mode)) continue; Path dst = tmpDir + "/" + entry.name; if (pathExists(dst)) continue; if (mkdir(dst.c_str(), 0700) == -1) - throw SysError(format("creating directory '%s'") % dst); - if (mount(("/" + entry.name).c_str(), dst.c_str(), "", MS_BIND | MS_REC, 0) == -1) - throw SysError(format("mounting '%s' on '%s'") % ("/" + entry.name) % dst); + throw SysError("creating directory '%s'", dst); + if (mount(src.c_str(), dst.c_str(), "", MS_BIND | MS_REC, 0) == -1) + throw SysError("mounting '%s' on '%s'", src, dst); } char * cwd = getcwd(0, 0); diff --git a/src/nix/search.cc b/src/nix/search.cc index f458367dcb55..539676698086 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -25,14 +25,14 @@ std::string hilite(const std::string & s, const std::smatch & m) struct CmdSearch : SourceExprCommand, MixJSON { - std::string re; + std::vector<std::string> res; bool writeCache = true; bool useCache = true; CmdSearch() { - expectArg("regex", &re, true); + expectArgs("regex", &res); mkFlag() .longName("update-cache") @@ -68,9 +68,13 @@ struct CmdSearch : SourceExprCommand, MixJSON "nix search blender" }, Example{ - "To search for Firefox and Chromium:", + "To search for Firefox or Chromium:", "nix search 'firefox|chromium'" }, + Example{ + "To search for git and frontend or gui:", + "nix search git 'frontend|gui'" + }, }; } @@ -78,25 +82,38 @@ struct CmdSearch : SourceExprCommand, MixJSON { settings.readOnlyMode = true; - std::regex regex(re, std::regex::extended | std::regex::icase); + // Empty search string should match all packages + // Use "^" here instead of ".*" due to differences in resulting highlighting + // (see #1893 -- libc++ claims empty search string is not in POSIX grammar) + if (res.empty()) { + res.push_back("^"); + } - auto state = getEvalState(); + std::vector<std::regex> regexes; + regexes.reserve(res.size()); - bool first = true; + for (auto &re : res) { + regexes.push_back(std::regex(re, std::regex::extended | std::regex::icase)); + } + + auto state = getEvalState(); - auto jsonOut = json ? std::make_unique<JSONObject>(std::cout, true) : nullptr; + auto jsonOut = json ? std::make_unique<JSONObject>(std::cout) : nullptr; auto sToplevel = state->symbols.create("_toplevel"); auto sRecurse = state->symbols.create("recurseForDerivations"); bool fromCache = false; + std::map<std::string, std::string> results; + std::function<void(Value *, std::string, bool, JSONObject *)> doExpr; doExpr = [&](Value * v, std::string attrPath, bool toplevel, JSONObject * cache) { debug("at attribute '%s'", attrPath); try { + uint found = 0; state->forceValue(*v); @@ -110,25 +127,33 @@ struct CmdSearch : SourceExprCommand, MixJSON if (state->isDerivation(*v)) { DrvInfo drv(*state, attrPath, v->attrs); + std::string description; + std::smatch attrPathMatch; + std::smatch descriptionMatch; + std::smatch nameMatch; + std::string name; DrvName parsed(drv.queryName()); - std::smatch attrPathMatch; - std::regex_search(attrPath, attrPathMatch, regex); + for (auto ®ex : regexes) { + std::regex_search(attrPath, attrPathMatch, regex); - auto name = parsed.name; - std::smatch nameMatch; - std::regex_search(name, nameMatch, regex); + name = parsed.name; + std::regex_search(name, nameMatch, regex); - std::string description = drv.queryMetaString("description"); - std::replace(description.begin(), description.end(), '\n', ' '); - std::smatch descriptionMatch; - std::regex_search(description, descriptionMatch, regex); + description = drv.queryMetaString("description"); + std::replace(description.begin(), description.end(), '\n', ' '); + std::regex_search(description, descriptionMatch, regex); + + if (!attrPathMatch.empty() + || !nameMatch.empty() + || !descriptionMatch.empty()) + { + found++; + } + } - if (!attrPathMatch.empty() - || !nameMatch.empty() - || !descriptionMatch.empty()) - { + if (found == res.size()) { if (json) { auto jsonElem = jsonOut->object(attrPath); @@ -138,10 +163,7 @@ struct CmdSearch : SourceExprCommand, MixJSON jsonElem.attr("description", description); } else { - if (!first) std::cout << "\n"; - first = false; - - std::cout << fmt( + results[attrPath] = fmt( "Attribute name: %s\n" "Package name: %s\n" "Version: %s\n" @@ -214,17 +236,35 @@ struct CmdSearch : SourceExprCommand, MixJSON } else { + createDirs(dirOf(jsonCacheFileName)); + Path tmpFile = fmt("%s.tmp.%d", jsonCacheFileName, getpid()); - std::ofstream jsonCacheFile(tmpFile); + std::ofstream jsonCacheFile; - auto cache = writeCache ? std::make_unique<JSONObject>(jsonCacheFile, false) : nullptr; + try { + // iostream considered harmful + jsonCacheFile.exceptions(std::ofstream::failbit); + jsonCacheFile.open(tmpFile); + + auto cache = writeCache ? std::make_unique<JSONObject>(jsonCacheFile, false) : nullptr; + + doExpr(getSourceExpr(*state), "", true, cache.get()); - doExpr(getSourceExpr(*state), "", true, cache.get()); + } catch (std::exception &) { + /* Fun fact: catching std::ios::failure does not work + due to C++11 ABI shenanigans. + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66145 */ + if (!jsonCacheFile) + throw Error("error writing to %s", tmpFile); + } - if (rename(tmpFile.c_str(), jsonCacheFileName.c_str()) == -1) + if (writeCache && rename(tmpFile.c_str(), jsonCacheFileName.c_str()) == -1) throw SysError("cannot rename '%s' to '%s'", tmpFile, jsonCacheFileName); } + + for (auto el : results) std::cout << el.second << "\n"; + } }; diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc index c628c2898d73..c64b12c8dd62 100644 --- a/src/nix/show-config.cc +++ b/src/nix/show-config.cc @@ -26,7 +26,7 @@ struct CmdShowConfig : Command, MixJSON { if (json) { // FIXME: use appropriate JSON types (bool, ints, etc). - JSONObject jsonObj(std::cout, true); + JSONObject jsonObj(std::cout); settings.toJSON(jsonObj); } else { for (auto & s : settings.getSettings()) diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc new file mode 100644 index 000000000000..758bbbc688bc --- /dev/null +++ b/src/nix/upgrade-nix.cc @@ -0,0 +1,131 @@ +#include "command.hh" +#include "store-api.hh" +#include "download.hh" +#include "eval.hh" +#include "attr-path.hh" + +using namespace nix; + +struct CmdUpgradeNix : StoreCommand +{ + Path profileDir; + + CmdUpgradeNix() + { + mkFlag() + .longName("profile") + .shortName('p') + .labels({"profile-dir"}) + .description("the Nix profile to upgrade") + .dest(&profileDir); + } + + std::string name() override + { + return "upgrade-nix"; + } + + std::string description() override + { + return "upgrade Nix to the latest stable version"; + } + + Examples examples() override + { + return { + Example{ + "To upgrade Nix to the latest stable version:", + "nix upgrade-nix" + }, + Example{ + "To upgrade Nix in a specific profile:", + "nix upgrade-nix -p /nix/var/nix/profiles/per-user/alice/profile" + }, + }; + } + + void run(ref<Store> store) override + { + settings.pureEval = true; + + if (profileDir == "") + profileDir = getProfileDir(store); + + printInfo("upgrading Nix in profile '%s'", profileDir); + + Path storePath; + { + Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); + storePath = getLatestNix(store); + } + + { + Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", storePath)); + store->ensurePath(storePath); + } + + { + Activity act(*logger, lvlInfo, actUnknown, fmt("verifying that '%s' works...", storePath)); + auto program = storePath + "/bin/nix-env"; + auto s = runProgram(program, false, {"--version"}); + if (s.find("Nix") == std::string::npos) + throw Error("could not verify that '%s' works", program); + } + + { + Activity act(*logger, lvlInfo, actUnknown, fmt("installing '%s' into profile '%s'...", storePath, profileDir)); + runProgram(settings.nixBinDir + "/nix-env", false, + {"--profile", profileDir, "-i", storePath, "--no-sandbox"}); + } + } + + /* Return the profile in which Nix is installed. */ + Path getProfileDir(ref<Store> store) + { + Path where; + + for (auto & dir : tokenizeString<Strings>(getEnv("PATH"), ":")) + if (pathExists(dir + "/nix-env")) { + where = dir; + break; + } + + if (where == "") + throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); + + printInfo("found Nix in '%s'", where); + + if (hasPrefix(where, "/run/current-system")) + throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); + + Path profileDir; + Path userEnv; + + if (baseNameOf(where) != "bin" || + !hasSuffix(userEnv = canonPath(profileDir = dirOf(where), true), "user-environment")) + throw Error("directory '%s' does not appear to be part of a Nix profile", where); + + if (!store->isValidPath(userEnv)) + throw Error("directory '%s' is not in the Nix store", userEnv); + + return profileDir; + } + + /* Return the store path of the latest stable Nix. */ + Path getLatestNix(ref<Store> store) + { + // FIXME: use nixos.org? + auto req = DownloadRequest("https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix"); + auto res = getDownloader()->download(req); + + EvalState state(Strings(), store); + auto v = state.allocValue(); + state.eval(state.parseExprFromString(*res.data, "/no-such-path"), *v); + Bindings & bindings(*state.allocBindings(0)); + auto v2 = findAlongAttrPath(state, settings.thisSystem, bindings, *v); + + return state.forceString(*v2); + } +}; + +static RegisterCommand r1(make_ref<CmdUpgradeNix>()); diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp new file mode 100644 index 000000000000..5b0b0ea5b301 --- /dev/null +++ b/src/nlohmann/json.hpp @@ -0,0 +1,14874 @@ +/* + __ _____ _____ _____ + __| | __| | | | JSON for Modern C++ +| | |__ | | | | | | version 3.0.1 +|_____|_____|_____|_|___| https://github.com/nlohmann/json + +Licensed under the MIT License <http://opensource.org/licenses/MIT>. +Copyright (c) 2013-2017 Niels Lohmann <http://nlohmann.me>. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +#ifndef NLOHMANN_JSON_HPP +#define NLOHMANN_JSON_HPP + +#include <algorithm> // all_of, copy, fill, find, for_each, generate_n, none_of, remove, reverse, transform +#include <array> // array +#include <cassert> // assert +#include <ciso646> // and, not, or +#include <clocale> // lconv, localeconv +#include <cmath> // isfinite, labs, ldexp, signbit +#include <cstddef> // nullptr_t, ptrdiff_t, size_t +#include <cstdint> // int64_t, uint64_t +#include <cstdlib> // abort, strtod, strtof, strtold, strtoul, strtoll, strtoull +#include <cstring> // memcpy, strlen +#include <forward_list> // forward_list +#include <functional> // function, hash, less +#include <initializer_list> // initializer_list +#include <iomanip> // hex +#include <iosfwd> // istream, ostream +#include <iterator> // advance, begin, back_inserter, bidirectional_iterator_tag, distance, end, inserter, iterator, iterator_traits, next, random_access_iterator_tag, reverse_iterator +#include <limits> // numeric_limits +#include <locale> // locale +#include <map> // map +#include <memory> // addressof, allocator, allocator_traits, unique_ptr +#include <numeric> // accumulate +#include <sstream> // stringstream +#include <string> // getline, stoi, string, to_string +#include <type_traits> // add_pointer, conditional, decay, enable_if, false_type, integral_constant, is_arithmetic, is_base_of, is_const, is_constructible, is_convertible, is_default_constructible, is_enum, is_floating_point, is_integral, is_nothrow_move_assignable, is_nothrow_move_constructible, is_pointer, is_reference, is_same, is_scalar, is_signed, remove_const, remove_cv, remove_pointer, remove_reference, true_type, underlying_type +#include <utility> // declval, forward, make_pair, move, pair, swap +#include <valarray> // valarray +#include <vector> // vector + +// exclude unsupported compilers +#if defined(__clang__) + #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 + #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" + #endif +#elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900 + #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" + #endif +#endif + +// disable float-equal warnings on GCC/clang +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + +// disable documentation warnings on clang +#if defined(__clang__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdocumentation" +#endif + +// allow for portable deprecation warnings +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #define JSON_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) + #define JSON_DEPRECATED __declspec(deprecated) +#else + #define JSON_DEPRECATED +#endif + +// allow to disable exceptions +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && not defined(JSON_NOEXCEPTION) + #define JSON_THROW(exception) throw exception + #define JSON_TRY try + #define JSON_CATCH(exception) catch(exception) +#else + #define JSON_THROW(exception) std::abort() + #define JSON_TRY if(true) + #define JSON_CATCH(exception) if(false) +#endif + +// manual branch prediction +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #define JSON_LIKELY(x) __builtin_expect(!!(x), 1) + #define JSON_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else + #define JSON_LIKELY(x) x + #define JSON_UNLIKELY(x) x +#endif + +// C++ language standard detection +#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 +#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #define JSON_HAS_CPP_14 +#endif + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann +{ +template<typename = void, typename = void> +struct adl_serializer; + +// forward declaration of basic_json (required to split the class) +template<template<typename, typename, typename...> class ObjectType = std::map, + template<typename, typename...> class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template<typename> class AllocatorType = std::allocator, + template<typename, typename = void> class JSONSerializer = adl_serializer> +class basic_json; + +// Ugly macros to avoid uglier copy-paste when specializing basic_json. They +// may be removed in the future once the class is split. + +#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ + template<template<typename, typename, typename...> class ObjectType, \ + template<typename, typename...> class ArrayType, \ + class StringType, class BooleanType, class NumberIntegerType, \ + class NumberUnsignedType, class NumberFloatType, \ + template<typename> class AllocatorType, \ + template<typename, typename = void> class JSONSerializer> + +#define NLOHMANN_BASIC_JSON_TPL \ + basic_json<ObjectType, ArrayType, StringType, BooleanType, \ + NumberIntegerType, NumberUnsignedType, NumberFloatType, \ + AllocatorType, JSONSerializer> + + +/*! +@brief unnamed namespace with internal helper functions + +This namespace collects some functions that could not be defined inside the +@ref basic_json class. + +@since version 2.1.0 +*/ +namespace detail +{ +//////////////// +// exceptions // +//////////////// + +/*! +@brief general exception of the @ref basic_json class + +This class is an extension of `std::exception` objects with a member @a id for +exception ids. It is used as the base class for all exceptions thrown by the +@ref basic_json class. This class can hence be used as "wildcard" to catch +exceptions. + +Subclasses: +- @ref parse_error for exceptions indicating a parse error +- @ref invalid_iterator for exceptions indicating errors with iterators +- @ref type_error for exceptions indicating executing a member function with + a wrong type +- @ref out_of_range for exceptions indicating access out of the defined range +- @ref other_error for exceptions indicating other library errors + +@internal +@note To have nothrow-copy-constructible exceptions, we internally use + `std::runtime_error` which can cope with arbitrary-length error messages. + Intermediate strings are built with static functions and then passed to + the actual constructor. +@endinternal + +@liveexample{The following code shows how arbitrary library exceptions can be +caught.,exception} + +@since version 3.0.0 +*/ +class exception : public std::exception +{ + public: + /// returns the explanatory string + const char* what() const noexcept override + { + return m.what(); + } + + /// the id of the exception + const int id; + + protected: + exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} + + static std::string name(const std::string& ename, int id_) + { + return "[json.exception." + ename + "." + std::to_string(id_) + "] "; + } + + private: + /// an exception object as storage for error messages + std::runtime_error m; +}; + +/*! +@brief exception indicating a parse error + +This exception is thrown by the library when a parse error occurs. Parse errors +can occur during the deserialization of JSON text, CBOR, MessagePack, as well +as when using JSON Patch. + +Member @a byte holds the byte index of the last read character in the input +file. + +Exceptions have ids 1xx. + +name / id | example message | description +------------------------------ | --------------- | ------------------------- +json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. +json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. +json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. +json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. +json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. +json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. +json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. +json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. +json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. +json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. +json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. +json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. + +@note For an input with n bytes, 1 is the index of the first character and n+1 + is the index of the terminating null byte or the end of file. This also + holds true when reading a byte vector (CBOR or MessagePack). + +@liveexample{The following code shows how a `parse_error` exception can be +caught.,parse_error} + +@sa @ref exception for the base class of the library exceptions +@sa @ref invalid_iterator for exceptions indicating errors with iterators +@sa @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa @ref out_of_range for exceptions indicating access out of the defined range +@sa @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class parse_error : public exception +{ + public: + /*! + @brief create a parse error exception + @param[in] id_ the id of the exception + @param[in] byte_ the byte index where the error occurred (or 0 if the + position cannot be determined) + @param[in] what_arg the explanatory string + @return parse_error object + */ + static parse_error create(int id_, std::size_t byte_, const std::string& what_arg) + { + std::string w = exception::name("parse_error", id_) + "parse error" + + (byte_ != 0 ? (" at " + std::to_string(byte_)) : "") + + ": " + what_arg; + return parse_error(id_, byte_, w.c_str()); + } + + /*! + @brief byte index of the parse error + + The byte index of the last read character in the input file. + + @note For an input with n bytes, 1 is the index of the first character and + n+1 is the index of the terminating null byte or the end of file. + This also holds true when reading a byte vector (CBOR or MessagePack). + */ + const std::size_t byte; + + private: + parse_error(int id_, std::size_t byte_, const char* what_arg) + : exception(id_, what_arg), byte(byte_) {} +}; + +/*! +@brief exception indicating errors with iterators + +This exception is thrown if iterators passed to a library function do not match +the expected semantics. + +Exceptions have ids 2xx. + +name / id | example message | description +----------------------------------- | --------------- | ------------------------- +json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. +json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. +json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. +json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. +json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. +json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. +json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. +json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. +json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. +json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. +json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. +json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. +json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. +json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). + +@liveexample{The following code shows how an `invalid_iterator` exception can be +caught.,invalid_iterator} + +@sa @ref exception for the base class of the library exceptions +@sa @ref parse_error for exceptions indicating a parse error +@sa @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa @ref out_of_range for exceptions indicating access out of the defined range +@sa @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class invalid_iterator : public exception +{ + public: + static invalid_iterator create(int id_, const std::string& what_arg) + { + std::string w = exception::name("invalid_iterator", id_) + what_arg; + return invalid_iterator(id_, w.c_str()); + } + + private: + invalid_iterator(int id_, const char* what_arg) + : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating executing a member function with a wrong type + +This exception is thrown in case of a type error; that is, a library function is +executed on a JSON value whose type does not match the expected semantics. + +Exceptions have ids 3xx. + +name / id | example message | description +----------------------------- | --------------- | ------------------------- +json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. +json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. +json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t&. +json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. +json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. +json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. +json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. +json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. +json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. +json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. +json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. +json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. +json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. +json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. +json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. +json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | + +@liveexample{The following code shows how a `type_error` exception can be +caught.,type_error} + +@sa @ref exception for the base class of the library exceptions +@sa @ref parse_error for exceptions indicating a parse error +@sa @ref invalid_iterator for exceptions indicating errors with iterators +@sa @ref out_of_range for exceptions indicating access out of the defined range +@sa @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class type_error : public exception +{ + public: + static type_error create(int id_, const std::string& what_arg) + { + std::string w = exception::name("type_error", id_) + what_arg; + return type_error(id_, w.c_str()); + } + + private: + type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating access out of the defined range + +This exception is thrown in case a library function is called on an input +parameter that exceeds the expected range, for instance in case of array +indices or nonexisting object keys. + +Exceptions have ids 4xx. + +name / id | example message | description +------------------------------- | --------------- | ------------------------- +json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1. +json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it. +json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object. +json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved. +json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value. +json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF. + +@liveexample{The following code shows how an `out_of_range` exception can be +caught.,out_of_range} + +@sa @ref exception for the base class of the library exceptions +@sa @ref parse_error for exceptions indicating a parse error +@sa @ref invalid_iterator for exceptions indicating errors with iterators +@sa @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class out_of_range : public exception +{ + public: + static out_of_range create(int id_, const std::string& what_arg) + { + std::string w = exception::name("out_of_range", id_) + what_arg; + return out_of_range(id_, w.c_str()); + } + + private: + out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating other library errors + +This exception is thrown in case of errors that cannot be classified with the +other exception types. + +Exceptions have ids 5xx. + +name / id | example message | description +------------------------------ | --------------- | ------------------------- +json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. + +@sa @ref exception for the base class of the library exceptions +@sa @ref parse_error for exceptions indicating a parse error +@sa @ref invalid_iterator for exceptions indicating errors with iterators +@sa @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa @ref out_of_range for exceptions indicating access out of the defined range + +@liveexample{The following code shows how an `other_error` exception can be +caught.,other_error} + +@since version 3.0.0 +*/ +class other_error : public exception +{ + public: + static other_error create(int id_, const std::string& what_arg) + { + std::string w = exception::name("other_error", id_) + what_arg; + return other_error(id_, w.c_str()); + } + + private: + other_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + + + +/////////////////////////// +// JSON type enumeration // +/////////////////////////// + +/*! +@brief the JSON type enumeration + +This enumeration collects the different JSON types. It is internally used to +distinguish the stored values, and the functions @ref basic_json::is_null(), +@ref basic_json::is_object(), @ref basic_json::is_array(), +@ref basic_json::is_string(), @ref basic_json::is_boolean(), +@ref basic_json::is_number() (with @ref basic_json::is_number_integer(), +@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), +@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and +@ref basic_json::is_structured() rely on it. + +@note There are three enumeration entries (number_integer, number_unsigned, and +number_float), because the library distinguishes these three types for numbers: +@ref basic_json::number_unsigned_t is used for unsigned integers, +@ref basic_json::number_integer_t is used for signed integers, and +@ref basic_json::number_float_t is used for floating-point numbers or to +approximate integers which do not fit in the limits of their respective type. + +@sa @ref basic_json::basic_json(const value_t value_type) -- create a JSON +value with the default value for a given type + +@since version 1.0.0 +*/ +enum class value_t : uint8_t +{ + null, ///< null value + object, ///< object (unordered set of name/value pairs) + array, ///< array (ordered collection of values) + string, ///< string value + boolean, ///< boolean value + number_integer, ///< number value (signed integer) + number_unsigned, ///< number value (unsigned integer) + number_float, ///< number value (floating-point) + discarded ///< discarded by the the parser callback function +}; + +/*! +@brief comparison operator for JSON types + +Returns an ordering that is similar to Python: +- order: null < boolean < number < object < array < string +- furthermore, each type is not smaller than itself +- discarded values are not comparable + +@since version 1.0.0 +*/ +inline bool operator<(const value_t lhs, const value_t rhs) noexcept +{ + static constexpr std::array<uint8_t, 8> order = {{ + 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, + 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */ + } + }; + + const auto l_index = static_cast<std::size_t>(lhs); + const auto r_index = static_cast<std::size_t>(rhs); + return l_index < order.size() and r_index < order.size() and order[l_index] < order[r_index]; +} + + +///////////// +// helpers // +///////////// + +template<typename> struct is_basic_json : std::false_type {}; + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +struct is_basic_json<NLOHMANN_BASIC_JSON_TPL> : std::true_type {}; + +// alias templates to reduce boilerplate +template<bool B, typename T = void> +using enable_if_t = typename std::enable_if<B, T>::type; + +template<typename T> +using uncvref_t = typename std::remove_cv<typename std::remove_reference<T>::type>::type; + +// implementation of C++14 index_sequence and affiliates +// source: https://stackoverflow.com/a/32223343 +template<std::size_t... Ints> +struct index_sequence +{ + using type = index_sequence; + using value_type = std::size_t; + static constexpr std::size_t size() noexcept + { + return sizeof...(Ints); + } +}; + +template<class Sequence1, class Sequence2> +struct merge_and_renumber; + +template<std::size_t... I1, std::size_t... I2> +struct merge_and_renumber<index_sequence<I1...>, index_sequence<I2...>> + : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; + +template<std::size_t N> +struct make_index_sequence + : merge_and_renumber < typename make_index_sequence < N / 2 >::type, + typename make_index_sequence < N - N / 2 >::type > {}; + +template<> struct make_index_sequence<0> : index_sequence<> {}; +template<> struct make_index_sequence<1> : index_sequence<0> {}; + +template<typename... Ts> +using index_sequence_for = make_index_sequence<sizeof...(Ts)>; + +/* +Implementation of two C++17 constructs: conjunction, negation. This is needed +to avoid evaluating all the traits in a condition + +For example: not std::is_same<void, T>::value and has_value_type<T>::value +will not compile when T = void (on MSVC at least). Whereas +conjunction<negation<std::is_same<void, T>>, has_value_type<T>>::value will +stop evaluating if negation<...>::value == false + +Please note that those constructs must be used with caution, since symbols can +become very long quickly (which can slow down compilation and cause MSVC +internal compiler errors). Only use it when you have to (see example ahead). +*/ +template<class...> struct conjunction : std::true_type {}; +template<class B1> struct conjunction<B1> : B1 {}; +template<class B1, class... Bn> +struct conjunction<B1, Bn...> : std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {}; + +template<class B> struct negation : std::integral_constant<bool, not B::value> {}; + +// dispatch utility (taken from ranges-v3) +template<unsigned N> struct priority_tag : priority_tag < N - 1 > {}; +template<> struct priority_tag<0> {}; + + +////////////////// +// constructors // +////////////////// + +template<value_t> struct external_constructor; + +template<> +struct external_constructor<value_t::boolean> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept + { + j.m_type = value_t::boolean; + j.m_value = b; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor<value_t::string> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s) + { + j.m_type = value_t::string; + j.m_value = s; + j.assert_invariant(); + } + + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s) + { + j.m_type = value_t::string; + j.m_value = std::move(s); + j.assert_invariant(); + } +}; + +template<> +struct external_constructor<value_t::number_float> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept + { + j.m_type = value_t::number_float; + j.m_value = val; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor<value_t::number_unsigned> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept + { + j.m_type = value_t::number_unsigned; + j.m_value = val; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor<value_t::number_integer> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept + { + j.m_type = value_t::number_integer; + j.m_value = val; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor<value_t::array> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr) + { + j.m_type = value_t::array; + j.m_value = arr; + j.assert_invariant(); + } + + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr) + { + j.m_type = value_t::array; + j.m_value = std::move(arr); + j.assert_invariant(); + } + + template<typename BasicJsonType, typename CompatibleArrayType, + enable_if_t<not std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value, + int> = 0> + static void construct(BasicJsonType& j, const CompatibleArrayType& arr) + { + using std::begin; + using std::end; + j.m_type = value_t::array; + j.m_value.array = j.template create<typename BasicJsonType::array_t>(begin(arr), end(arr)); + j.assert_invariant(); + } + + template<typename BasicJsonType> + static void construct(BasicJsonType& j, const std::vector<bool>& arr) + { + j.m_type = value_t::array; + j.m_value = value_t::array; + j.m_value.array->reserve(arr.size()); + for (const bool x : arr) + { + j.m_value.array->push_back(x); + } + j.assert_invariant(); + } + + template<typename BasicJsonType, typename T, + enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0> + static void construct(BasicJsonType& j, const std::valarray<T>& arr) + { + j.m_type = value_t::array; + j.m_value = value_t::array; + j.m_value.array->resize(arr.size()); + std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin()); + j.assert_invariant(); + } +}; + +template<> +struct external_constructor<value_t::object> +{ + template<typename BasicJsonType> + static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj) + { + j.m_type = value_t::object; + j.m_value = obj; + j.assert_invariant(); + } + + template<typename BasicJsonType> + static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj) + { + j.m_type = value_t::object; + j.m_value = std::move(obj); + j.assert_invariant(); + } + + template<typename BasicJsonType, typename CompatibleObjectType, + enable_if_t<not std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int> = 0> + static void construct(BasicJsonType& j, const CompatibleObjectType& obj) + { + using std::begin; + using std::end; + + j.m_type = value_t::object; + j.m_value.object = j.template create<typename BasicJsonType::object_t>(begin(obj), end(obj)); + j.assert_invariant(); + } +}; + + +//////////////////////// +// has_/is_ functions // +//////////////////////// + +/*! +@brief Helper to determine whether there's a key_type for T. + +This helper is used to tell associative containers apart from other containers +such as sequence containers. For instance, `std::map` passes the test as it +contains a `mapped_type`, whereas `std::vector` fails the test. + +@sa http://stackoverflow.com/a/7728728/266378 +@since version 1.0.0, overworked in version 2.0.6 +*/ +#define NLOHMANN_JSON_HAS_HELPER(type) \ + template<typename T> struct has_##type { \ + private: \ + template<typename U, typename = typename U::type> \ + static int detect(U &&); \ + static void detect(...); \ + public: \ + static constexpr bool value = \ + std::is_integral<decltype(detect(std::declval<T>()))>::value; \ + } + +NLOHMANN_JSON_HAS_HELPER(mapped_type); +NLOHMANN_JSON_HAS_HELPER(key_type); +NLOHMANN_JSON_HAS_HELPER(value_type); +NLOHMANN_JSON_HAS_HELPER(iterator); + +#undef NLOHMANN_JSON_HAS_HELPER + + +template<bool B, class RealType, class CompatibleObjectType> +struct is_compatible_object_type_impl : std::false_type {}; + +template<class RealType, class CompatibleObjectType> +struct is_compatible_object_type_impl<true, RealType, CompatibleObjectType> +{ + static constexpr auto value = + std::is_constructible<typename RealType::key_type, typename CompatibleObjectType::key_type>::value and + std::is_constructible<typename RealType::mapped_type, typename CompatibleObjectType::mapped_type>::value; +}; + +template<class BasicJsonType, class CompatibleObjectType> +struct is_compatible_object_type +{ + static auto constexpr value = is_compatible_object_type_impl < + conjunction<negation<std::is_same<void, CompatibleObjectType>>, + has_mapped_type<CompatibleObjectType>, + has_key_type<CompatibleObjectType>>::value, + typename BasicJsonType::object_t, CompatibleObjectType >::value; +}; + +template<typename BasicJsonType, typename T> +struct is_basic_json_nested_type +{ + static auto constexpr value = std::is_same<T, typename BasicJsonType::iterator>::value or + std::is_same<T, typename BasicJsonType::const_iterator>::value or + std::is_same<T, typename BasicJsonType::reverse_iterator>::value or + std::is_same<T, typename BasicJsonType::const_reverse_iterator>::value; +}; + +template<class BasicJsonType, class CompatibleArrayType> +struct is_compatible_array_type +{ + static auto constexpr value = + conjunction<negation<std::is_same<void, CompatibleArrayType>>, + negation<is_compatible_object_type< + BasicJsonType, CompatibleArrayType>>, + negation<std::is_constructible<typename BasicJsonType::string_t, + CompatibleArrayType>>, + negation<is_basic_json_nested_type<BasicJsonType, CompatibleArrayType>>, + has_value_type<CompatibleArrayType>, + has_iterator<CompatibleArrayType>>::value; +}; + +template<bool, typename, typename> +struct is_compatible_integer_type_impl : std::false_type {}; + +template<typename RealIntegerType, typename CompatibleNumberIntegerType> +struct is_compatible_integer_type_impl<true, RealIntegerType, CompatibleNumberIntegerType> +{ + // is there an assert somewhere on overflows? + using RealLimits = std::numeric_limits<RealIntegerType>; + using CompatibleLimits = std::numeric_limits<CompatibleNumberIntegerType>; + + static constexpr auto value = + std::is_constructible<RealIntegerType, CompatibleNumberIntegerType>::value and + CompatibleLimits::is_integer and + RealLimits::is_signed == CompatibleLimits::is_signed; +}; + +template<typename RealIntegerType, typename CompatibleNumberIntegerType> +struct is_compatible_integer_type +{ + static constexpr auto value = + is_compatible_integer_type_impl < + std::is_integral<CompatibleNumberIntegerType>::value and + not std::is_same<bool, CompatibleNumberIntegerType>::value, + RealIntegerType, CompatibleNumberIntegerType >::value; +}; + + +// trait checking if JSONSerializer<T>::from_json(json const&, udt&) exists +template<typename BasicJsonType, typename T> +struct has_from_json +{ + private: + // also check the return type of from_json + template<typename U, typename = enable_if_t<std::is_same<void, decltype(uncvref_t<U>::from_json( + std::declval<BasicJsonType>(), std::declval<T&>()))>::value>> + static int detect(U&&); + static void detect(...); + + public: + static constexpr bool value = std::is_integral<decltype( + detect(std::declval<typename BasicJsonType::template json_serializer<T, void>>()))>::value; +}; + +// This trait checks if JSONSerializer<T>::from_json(json const&) exists +// this overload is used for non-default-constructible user-defined-types +template<typename BasicJsonType, typename T> +struct has_non_default_from_json +{ + private: + template<typename U, typename = + enable_if_t<std::is_same<T, decltype(uncvref_t<U>::from_json(std::declval<BasicJsonType>()))>::value>> + static int detect(U&&); + static void detect(...); + + public: + static constexpr bool value = std::is_integral<decltype(detect( + std::declval<typename BasicJsonType::template json_serializer<T, void>>()))>::value; +}; + +// This trait checks if BasicJsonType::json_serializer<T>::to_json exists +template<typename BasicJsonType, typename T> +struct has_to_json +{ + private: + template<typename U, typename = decltype(uncvref_t<U>::to_json( + std::declval<BasicJsonType&>(), std::declval<T>()))> + static int detect(U&&); + static void detect(...); + + public: + static constexpr bool value = std::is_integral<decltype(detect( + std::declval<typename BasicJsonType::template json_serializer<T, void>>()))>::value; +}; + + +///////////// +// to_json // +///////////// + +template<typename BasicJsonType, typename T, + enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0> +void to_json(BasicJsonType& j, T b) noexcept +{ + external_constructor<value_t::boolean>::construct(j, b); +} + +template<typename BasicJsonType, typename CompatibleString, + enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0> +void to_json(BasicJsonType& j, const CompatibleString& s) +{ + external_constructor<value_t::string>::construct(j, s); +} + +template<typename BasicJsonType> +void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s) +{ + external_constructor<value_t::string>::construct(j, std::move(s)); +} + +template<typename BasicJsonType, typename FloatType, + enable_if_t<std::is_floating_point<FloatType>::value, int> = 0> +void to_json(BasicJsonType& j, FloatType val) noexcept +{ + external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val)); +} + +template<typename BasicJsonType, typename CompatibleNumberUnsignedType, + enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0> +void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept +{ + external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val)); +} + +template<typename BasicJsonType, typename CompatibleNumberIntegerType, + enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0> +void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept +{ + external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val)); +} + +template<typename BasicJsonType, typename EnumType, + enable_if_t<std::is_enum<EnumType>::value, int> = 0> +void to_json(BasicJsonType& j, EnumType e) noexcept +{ + using underlying_type = typename std::underlying_type<EnumType>::type; + external_constructor<value_t::number_integer>::construct(j, static_cast<underlying_type>(e)); +} + +template<typename BasicJsonType> +void to_json(BasicJsonType& j, const std::vector<bool>& e) +{ + external_constructor<value_t::array>::construct(j, e); +} + +template<typename BasicJsonType, typename CompatibleArrayType, + enable_if_t<is_compatible_array_type<BasicJsonType, CompatibleArrayType>::value or + std::is_same<typename BasicJsonType::array_t, CompatibleArrayType>::value, + int> = 0> +void to_json(BasicJsonType& j, const CompatibleArrayType& arr) +{ + external_constructor<value_t::array>::construct(j, arr); +} + +template<typename BasicJsonType, typename T, + enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0> +void to_json(BasicJsonType& j, std::valarray<T> arr) +{ + external_constructor<value_t::array>::construct(j, std::move(arr)); +} + +template<typename BasicJsonType> +void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr) +{ + external_constructor<value_t::array>::construct(j, std::move(arr)); +} + +template<typename BasicJsonType, typename CompatibleObjectType, + enable_if_t<is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value, int> = 0> +void to_json(BasicJsonType& j, const CompatibleObjectType& obj) +{ + external_constructor<value_t::object>::construct(j, obj); +} + +template<typename BasicJsonType> +void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj) +{ + external_constructor<value_t::object>::construct(j, std::move(obj)); +} + +template<typename BasicJsonType, typename T, std::size_t N, + enable_if_t<not std::is_constructible<typename BasicJsonType::string_t, T (&)[N]>::value, int> = 0> +void to_json(BasicJsonType& j, T (&arr)[N]) +{ + external_constructor<value_t::array>::construct(j, arr); +} + +template<typename BasicJsonType, typename... Args> +void to_json(BasicJsonType& j, const std::pair<Args...>& p) +{ + j = {p.first, p.second}; +} + +template<typename BasicJsonType, typename Tuple, std::size_t... Idx> +void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...>) +{ + j = {std::get<Idx>(t)...}; +} + +template<typename BasicJsonType, typename... Args> +void to_json(BasicJsonType& j, const std::tuple<Args...>& t) +{ + to_json_tuple_impl(j, t, index_sequence_for<Args...> {}); +} + +/////////////// +// from_json // +/////////////// + +// overloads for basic_json template parameters +template<typename BasicJsonType, typename ArithmeticType, + enable_if_t<std::is_arithmetic<ArithmeticType>::value and + not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value, + int> = 0> +void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val) +{ + switch (static_cast<value_t>(j)) + { + case value_t::number_unsigned: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>()); + break; + } + case value_t::number_integer: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>()); + break; + } + case value_t::number_float: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>()); + break; + } + + default: + JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name()))); + } +} + +template<typename BasicJsonType> +void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b) +{ + if (JSON_UNLIKELY(not j.is_boolean())) + { + JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(j.type_name()))); + } + b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>(); +} + +template<typename BasicJsonType> +void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s) +{ + if (JSON_UNLIKELY(not j.is_string())) + { + JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name()))); + } + s = *j.template get_ptr<const typename BasicJsonType::string_t*>(); +} + +template<typename BasicJsonType> +void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val) +{ + get_arithmetic_value(j, val); +} + +template<typename BasicJsonType> +void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val) +{ + get_arithmetic_value(j, val); +} + +template<typename BasicJsonType> +void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val) +{ + get_arithmetic_value(j, val); +} + +template<typename BasicJsonType, typename EnumType, + enable_if_t<std::is_enum<EnumType>::value, int> = 0> +void from_json(const BasicJsonType& j, EnumType& e) +{ + typename std::underlying_type<EnumType>::type val; + get_arithmetic_value(j, val); + e = static_cast<EnumType>(val); +} + +template<typename BasicJsonType> +void from_json(const BasicJsonType& j, typename BasicJsonType::array_t& arr) +{ + if (JSON_UNLIKELY(not j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()))); + } + arr = *j.template get_ptr<const typename BasicJsonType::array_t*>(); +} + +// forward_list doesn't have an insert method +template<typename BasicJsonType, typename T, typename Allocator, + enable_if_t<std::is_convertible<BasicJsonType, T>::value, int> = 0> +void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l) +{ + if (JSON_UNLIKELY(not j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()))); + } + std::transform(j.rbegin(), j.rend(), + std::front_inserter(l), [](const BasicJsonType & i) + { + return i.template get<T>(); + }); +} + +// valarray doesn't have an insert method +template<typename BasicJsonType, typename T, + enable_if_t<std::is_convertible<BasicJsonType, T>::value, int> = 0> +void from_json(const BasicJsonType& j, std::valarray<T>& l) +{ + if (JSON_UNLIKELY(not j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()))); + } + l.resize(j.size()); + std::copy(j.m_value.array->begin(), j.m_value.array->end(), std::begin(l)); +} + +template<typename BasicJsonType, typename CompatibleArrayType> +void from_json_array_impl(const BasicJsonType& j, CompatibleArrayType& arr, priority_tag<0> /*unused*/) +{ + using std::end; + + std::transform(j.begin(), j.end(), + std::inserter(arr, end(arr)), [](const BasicJsonType & i) + { + // get<BasicJsonType>() returns *this, this won't call a from_json + // method when value_type is BasicJsonType + return i.template get<typename CompatibleArrayType::value_type>(); + }); +} + +template<typename BasicJsonType, typename CompatibleArrayType> +auto from_json_array_impl(const BasicJsonType& j, CompatibleArrayType& arr, priority_tag<1> /*unused*/) +-> decltype( + arr.reserve(std::declval<typename CompatibleArrayType::size_type>()), + void()) +{ + using std::end; + + arr.reserve(j.size()); + std::transform(j.begin(), j.end(), + std::inserter(arr, end(arr)), [](const BasicJsonType & i) + { + // get<BasicJsonType>() returns *this, this won't call a from_json + // method when value_type is BasicJsonType + return i.template get<typename CompatibleArrayType::value_type>(); + }); +} + +template<typename BasicJsonType, typename T, std::size_t N> +void from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr, priority_tag<2> /*unused*/) +{ + for (std::size_t i = 0; i < N; ++i) + { + arr[i] = j.at(i).template get<T>(); + } +} + +template<typename BasicJsonType, typename CompatibleArrayType, + enable_if_t<is_compatible_array_type<BasicJsonType, CompatibleArrayType>::value and + std::is_convertible<BasicJsonType, typename CompatibleArrayType::value_type>::value and + not std::is_same<typename BasicJsonType::array_t, CompatibleArrayType>::value, int> = 0> +void from_json(const BasicJsonType& j, CompatibleArrayType& arr) +{ + if (JSON_UNLIKELY(not j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()))); + } + + from_json_array_impl(j, arr, priority_tag<2> {}); +} + +template<typename BasicJsonType, typename CompatibleObjectType, + enable_if_t<is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value, int> = 0> +void from_json(const BasicJsonType& j, CompatibleObjectType& obj) +{ + if (JSON_UNLIKELY(not j.is_object())) + { + JSON_THROW(type_error::create(302, "type must be object, but is " + std::string(j.type_name()))); + } + + auto inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>(); + using value_type = typename CompatibleObjectType::value_type; + std::transform( + inner_object->begin(), inner_object->end(), + std::inserter(obj, obj.begin()), + [](typename BasicJsonType::object_t::value_type const & p) + { + return value_type(p.first, p.second.template get<typename CompatibleObjectType::mapped_type>()); + }); +} + +// overload for arithmetic types, not chosen for basic_json template arguments +// (BooleanType, etc..); note: Is it really necessary to provide explicit +// overloads for boolean_t etc. in case of a custom BooleanType which is not +// an arithmetic type? +template<typename BasicJsonType, typename ArithmeticType, + enable_if_t < + std::is_arithmetic<ArithmeticType>::value and + not std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value and + not std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value and + not std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value and + not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value, + int> = 0> +void from_json(const BasicJsonType& j, ArithmeticType& val) +{ + switch (static_cast<value_t>(j)) + { + case value_t::number_unsigned: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>()); + break; + } + case value_t::number_integer: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>()); + break; + } + case value_t::number_float: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>()); + break; + } + case value_t::boolean: + { + val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::boolean_t*>()); + break; + } + + default: + JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name()))); + } +} + +template<typename BasicJsonType, typename A1, typename A2> +void from_json(const BasicJsonType& j, std::pair<A1, A2>& p) +{ + p = {j.at(0).template get<A1>(), j.at(1).template get<A2>()}; +} + +template<typename BasicJsonType, typename Tuple, std::size_t... Idx> +void from_json_tuple_impl(const BasicJsonType& j, Tuple& t, index_sequence<Idx...>) +{ + t = std::make_tuple(j.at(Idx).template get<typename std::tuple_element<Idx, Tuple>::type>()...); +} + +template<typename BasicJsonType, typename... Args> +void from_json(const BasicJsonType& j, std::tuple<Args...>& t) +{ + from_json_tuple_impl(j, t, index_sequence_for<Args...> {}); +} + +struct to_json_fn +{ + private: + template<typename BasicJsonType, typename T> + auto call(BasicJsonType& j, T&& val, priority_tag<1> /*unused*/) const noexcept(noexcept(to_json(j, std::forward<T>(val)))) + -> decltype(to_json(j, std::forward<T>(val)), void()) + { + return to_json(j, std::forward<T>(val)); + } + + template<typename BasicJsonType, typename T> + void call(BasicJsonType& /*unused*/, T&& /*unused*/, priority_tag<0> /*unused*/) const noexcept + { + static_assert(sizeof(BasicJsonType) == 0, + "could not find to_json() method in T's namespace"); + +#ifdef _MSC_VER + // MSVC does not show a stacktrace for the above assert + using decayed = uncvref_t<T>; + static_assert(sizeof(typename decayed::force_msvc_stacktrace) == 0, + "forcing MSVC stacktrace to show which T we're talking about."); +#endif + } + + public: + template<typename BasicJsonType, typename T> + void operator()(BasicJsonType& j, T&& val) const + noexcept(noexcept(std::declval<to_json_fn>().call(j, std::forward<T>(val), priority_tag<1> {}))) + { + return call(j, std::forward<T>(val), priority_tag<1> {}); + } +}; + +struct from_json_fn +{ + private: + template<typename BasicJsonType, typename T> + auto call(const BasicJsonType& j, T& val, priority_tag<1> /*unused*/) const + noexcept(noexcept(from_json(j, val))) + -> decltype(from_json(j, val), void()) + { + return from_json(j, val); + } + + template<typename BasicJsonType, typename T> + void call(const BasicJsonType& /*unused*/, T& /*unused*/, priority_tag<0> /*unused*/) const noexcept + { + static_assert(sizeof(BasicJsonType) == 0, + "could not find from_json() method in T's namespace"); +#ifdef _MSC_VER + // MSVC does not show a stacktrace for the above assert + using decayed = uncvref_t<T>; + static_assert(sizeof(typename decayed::force_msvc_stacktrace) == 0, + "forcing MSVC stacktrace to show which T we're talking about."); +#endif + } + + public: + template<typename BasicJsonType, typename T> + void operator()(const BasicJsonType& j, T& val) const + noexcept(noexcept(std::declval<from_json_fn>().call(j, val, priority_tag<1> {}))) + { + return call(j, val, priority_tag<1> {}); + } +}; + +// taken from ranges-v3 +template<typename T> +struct static_const +{ + static constexpr T value{}; +}; + +template<typename T> +constexpr T static_const<T>::value; + +//////////////////// +// input adapters // +//////////////////// + +/*! +@brief abstract input adapter interface + +Produces a stream of std::char_traits<char>::int_type characters from a +std::istream, a buffer, or some other input type. Accepts the return of exactly +one non-EOF character for future input. The int_type characters returned +consist of all valid char values as positive values (typically unsigned char), +plus an EOF value outside that range, specified by the value of the function +std::char_traits<char>::eof(). This value is typically -1, but could be any +arbitrary value which is not a valid char value. +*/ +struct input_adapter_protocol +{ + /// get a character [0,255] or std::char_traits<char>::eof(). + virtual std::char_traits<char>::int_type get_character() = 0; + /// restore the last non-eof() character to input + virtual void unget_character() = 0; + virtual ~input_adapter_protocol() = default; +}; + +/// a type to simplify interfaces +using input_adapter_t = std::shared_ptr<input_adapter_protocol>; + +/*! +Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at +beginning of input. Does not support changing the underlying std::streambuf +in mid-input. Maintains underlying std::istream and std::streambuf to support +subsequent use of standard std::istream operations to process any input +characters following those used in parsing the JSON input. Clears the +std::istream flags; any input errors (e.g., EOF) will be detected by the first +subsequent call for input from the std::istream. +*/ +class input_stream_adapter : public input_adapter_protocol +{ + public: + ~input_stream_adapter() override + { + // clear stream flags; we use underlying streambuf I/O, do not + // maintain ifstream flags + is.clear(); + } + + explicit input_stream_adapter(std::istream& i) + : is(i), sb(*i.rdbuf()) + { + // skip byte order mark + std::char_traits<char>::int_type c; + if ((c = get_character()) == 0xEF) + { + if ((c = get_character()) == 0xBB) + { + if ((c = get_character()) == 0xBF) + { + return; // Ignore BOM + } + else if (c != std::char_traits<char>::eof()) + { + is.unget(); + } + is.putback('\xBB'); + } + else if (c != std::char_traits<char>::eof()) + { + is.unget(); + } + is.putback('\xEF'); + } + else if (c != std::char_traits<char>::eof()) + { + is.unget(); // no byte order mark; process as usual + } + } + + // delete because of pointer members + input_stream_adapter(const input_stream_adapter&) = delete; + input_stream_adapter& operator=(input_stream_adapter&) = delete; + + // std::istream/std::streambuf use std::char_traits<char>::to_int_type, to + // ensure that std::char_traits<char>::eof() and the character 0xFF do not + // end up as the same value, eg. 0xFFFFFFFF. + std::char_traits<char>::int_type get_character() override + { + return sb.sbumpc(); + } + + void unget_character() override + { + sb.sungetc(); // is.unget() avoided for performance + } + + private: + /// the associated input stream + std::istream& is; + std::streambuf& sb; +}; + +/// input adapter for buffer input +class input_buffer_adapter : public input_adapter_protocol +{ + public: + input_buffer_adapter(const char* b, const std::size_t l) + : cursor(b), limit(b + l), start(b) + { + // skip byte order mark + if (l >= 3 and b[0] == '\xEF' and b[1] == '\xBB' and b[2] == '\xBF') + { + cursor += 3; + } + } + + // delete because of pointer members + input_buffer_adapter(const input_buffer_adapter&) = delete; + input_buffer_adapter& operator=(input_buffer_adapter&) = delete; + + std::char_traits<char>::int_type get_character() noexcept override + { + if (JSON_LIKELY(cursor < limit)) + { + return std::char_traits<char>::to_int_type(*(cursor++)); + } + + return std::char_traits<char>::eof(); + } + + void unget_character() noexcept override + { + if (JSON_LIKELY(cursor > start)) + { + --cursor; + } + } + + private: + /// pointer to the current character + const char* cursor; + /// pointer past the last character + const char* limit; + /// pointer to the first character + const char* start; +}; + +class input_adapter +{ + public: + // native support + + /// input adapter for input stream + input_adapter(std::istream& i) + : ia(std::make_shared<input_stream_adapter>(i)) {} + + /// input adapter for input stream + input_adapter(std::istream&& i) + : ia(std::make_shared<input_stream_adapter>(i)) {} + + /// input adapter for buffer + template<typename CharT, + typename std::enable_if< + std::is_pointer<CharT>::value and + std::is_integral<typename std::remove_pointer<CharT>::type>::value and + sizeof(typename std::remove_pointer<CharT>::type) == 1, + int>::type = 0> + input_adapter(CharT b, std::size_t l) + : ia(std::make_shared<input_buffer_adapter>(reinterpret_cast<const char*>(b), l)) {} + + // derived support + + /// input adapter for string literal + template<typename CharT, + typename std::enable_if< + std::is_pointer<CharT>::value and + std::is_integral<typename std::remove_pointer<CharT>::type>::value and + sizeof(typename std::remove_pointer<CharT>::type) == 1, + int>::type = 0> + input_adapter(CharT b) + : input_adapter(reinterpret_cast<const char*>(b), + std::strlen(reinterpret_cast<const char*>(b))) {} + + /// input adapter for iterator range with contiguous storage + template<class IteratorType, + typename std::enable_if< + std::is_same<typename std::iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value, + int>::type = 0> + input_adapter(IteratorType first, IteratorType last) + { + // assertion to check that the iterator range is indeed contiguous, + // see http://stackoverflow.com/a/35008842/266378 for more discussion + assert(std::accumulate( + first, last, std::pair<bool, int>(true, 0), + [&first](std::pair<bool, int> res, decltype(*first) val) + { + res.first &= (val == *(std::next(std::addressof(*first), res.second++))); + return res; + }).first); + + // assertion to check that each element is 1 byte long + static_assert( + sizeof(typename std::iterator_traits<IteratorType>::value_type) == 1, + "each element in the iterator range must have the size of 1 byte"); + + const auto len = static_cast<size_t>(std::distance(first, last)); + if (JSON_LIKELY(len > 0)) + { + // there is at least one element: use the address of first + ia = std::make_shared<input_buffer_adapter>(reinterpret_cast<const char*>(&(*first)), len); + } + else + { + // the address of first cannot be used: use nullptr + ia = std::make_shared<input_buffer_adapter>(nullptr, len); + } + } + + /// input adapter for array + template<class T, std::size_t N> + input_adapter(T (&array)[N]) + : input_adapter(std::begin(array), std::end(array)) {} + + /// input adapter for contiguous container + template<class ContiguousContainer, typename + std::enable_if<not std::is_pointer<ContiguousContainer>::value and + std::is_base_of<std::random_access_iterator_tag, typename std::iterator_traits<decltype(std::begin(std::declval<ContiguousContainer const>()))>::iterator_category>::value, + int>::type = 0> + input_adapter(const ContiguousContainer& c) + : input_adapter(std::begin(c), std::end(c)) {} + + operator input_adapter_t() + { + return ia; + } + + private: + /// the actual adapter + input_adapter_t ia = nullptr; +}; + +////////////////////// +// lexer and parser // +////////////////////// + +/*! +@brief lexical analysis + +This class organizes the lexical analysis during JSON deserialization. +*/ +template<typename BasicJsonType> +class lexer +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + + public: + /// token types for the parser + enum class token_type + { + uninitialized, ///< indicating the scanner is uninitialized + literal_true, ///< the `true` literal + literal_false, ///< the `false` literal + literal_null, ///< the `null` literal + value_string, ///< a string -- use get_string() for actual value + value_unsigned, ///< an unsigned integer -- use get_number_unsigned() for actual value + value_integer, ///< a signed integer -- use get_number_integer() for actual value + value_float, ///< an floating point number -- use get_number_float() for actual value + begin_array, ///< the character for array begin `[` + begin_object, ///< the character for object begin `{` + end_array, ///< the character for array end `]` + end_object, ///< the character for object end `}` + name_separator, ///< the name separator `:` + value_separator, ///< the value separator `,` + parse_error, ///< indicating a parse error + end_of_input, ///< indicating the end of the input buffer + literal_or_value ///< a literal or the begin of a value (only for diagnostics) + }; + + /// return name of values of type token_type (only used for errors) + static const char* token_type_name(const token_type t) noexcept + { + switch (t) + { + case token_type::uninitialized: + return "<uninitialized>"; + case token_type::literal_true: + return "true literal"; + case token_type::literal_false: + return "false literal"; + case token_type::literal_null: + return "null literal"; + case token_type::value_string: + return "string literal"; + case lexer::token_type::value_unsigned: + case lexer::token_type::value_integer: + case lexer::token_type::value_float: + return "number literal"; + case token_type::begin_array: + return "'['"; + case token_type::begin_object: + return "'{'"; + case token_type::end_array: + return "']'"; + case token_type::end_object: + return "'}'"; + case token_type::name_separator: + return "':'"; + case token_type::value_separator: + return "','"; + case token_type::parse_error: + return "<parse error>"; + case token_type::end_of_input: + return "end of input"; + case token_type::literal_or_value: + return "'[', '{', or a literal"; + default: // catch non-enum values + return "unknown token"; // LCOV_EXCL_LINE + } + } + + explicit lexer(detail::input_adapter_t adapter) + : ia(std::move(adapter)), decimal_point_char(get_decimal_point()) {} + + // delete because of pointer members + lexer(const lexer&) = delete; + lexer& operator=(lexer&) = delete; + + private: + ///////////////////// + // locales + ///////////////////// + + /// return the locale-dependent decimal point + static char get_decimal_point() noexcept + { + const auto loc = localeconv(); + assert(loc != nullptr); + return (loc->decimal_point == nullptr) ? '.' : *(loc->decimal_point); + } + + ///////////////////// + // scan functions + ///////////////////// + + /*! + @brief get codepoint from 4 hex characters following `\u` + + For input "\u c1 c2 c3 c4" the codepoint is: + (c1 * 0x1000) + (c2 * 0x0100) + (c3 * 0x0010) + c4 + = (c1 << 12) + (c2 << 8) + (c3 << 4) + (c4 << 0) + + Furthermore, the possible characters '0'..'9', 'A'..'F', and 'a'..'f' + must be converted to the integers 0x0..0x9, 0xA..0xF, 0xA..0xF, resp. The + conversion is done by subtracting the offset (0x30, 0x37, and 0x57) + between the ASCII value of the character and the desired integer value. + + @return codepoint (0x0000..0xFFFF) or -1 in case of an error (e.g. EOF or + non-hex character) + */ + int get_codepoint() + { + // this function only makes sense after reading `\u` + assert(current == 'u'); + int codepoint = 0; + + const auto factors = { 12, 8, 4, 0 }; + for (const auto factor : factors) + { + get(); + + if (current >= '0' and current <= '9') + { + codepoint += ((current - 0x30) << factor); + } + else if (current >= 'A' and current <= 'F') + { + codepoint += ((current - 0x37) << factor); + } + else if (current >= 'a' and current <= 'f') + { + codepoint += ((current - 0x57) << factor); + } + else + { + return -1; + } + } + + assert(0x0000 <= codepoint and codepoint <= 0xFFFF); + return codepoint; + } + + /*! + @brief check if the next byte(s) are inside a given range + + Adds the current byte and, for each passed range, reads a new byte and + checks if it is inside the range. If a violation was detected, set up an + error message and return false. Otherwise, return true. + + @param[in] ranges list of integers; interpreted as list of pairs of + inclusive lower and upper bound, respectively + + @pre The passed list @a ranges must have 2, 4, or 6 elements; that is, + 1, 2, or 3 pairs. This precondition is enforced by an assertion. + + @return true if and only if no range violation was detected + */ + bool next_byte_in_range(std::initializer_list<int> ranges) + { + assert(ranges.size() == 2 or ranges.size() == 4 or ranges.size() == 6); + add(current); + + for (auto range = ranges.begin(); range != ranges.end(); ++range) + { + get(); + if (JSON_LIKELY(*range <= current and current <= *(++range))) + { + add(current); + } + else + { + error_message = "invalid string: ill-formed UTF-8 byte"; + return false; + } + } + + return true; + } + + /*! + @brief scan a string literal + + This function scans a string according to Sect. 7 of RFC 7159. While + scanning, bytes are escaped and copied into buffer yytext. Then the function + returns successfully, yytext is *not* null-terminated (as it may contain \0 + bytes), and yytext.size() is the number of bytes in the string. + + @return token_type::value_string if string could be successfully scanned, + token_type::parse_error otherwise + + @note In case of errors, variable error_message contains a textual + description. + */ + token_type scan_string() + { + // reset yytext (ignore opening quote) + reset(); + + // we entered the function by reading an open quote + assert(current == '\"'); + + while (true) + { + // get next character + switch (get()) + { + // end of file while parsing string + case std::char_traits<char>::eof(): + { + error_message = "invalid string: missing closing quote"; + return token_type::parse_error; + } + + // closing quote + case '\"': + { + return token_type::value_string; + } + + // escapes + case '\\': + { + switch (get()) + { + // quotation mark + case '\"': + add('\"'); + break; + // reverse solidus + case '\\': + add('\\'); + break; + // solidus + case '/': + add('/'); + break; + // backspace + case 'b': + add('\b'); + break; + // form feed + case 'f': + add('\f'); + break; + // line feed + case 'n': + add('\n'); + break; + // carriage return + case 'r': + add('\r'); + break; + // tab + case 't': + add('\t'); + break; + + // unicode escapes + case 'u': + { + const int codepoint1 = get_codepoint(); + int codepoint = codepoint1; // start with codepoint1 + + if (JSON_UNLIKELY(codepoint1 == -1)) + { + error_message = "invalid string: '\\u' must be followed by 4 hex digits"; + return token_type::parse_error; + } + + // check if code point is a high surrogate + if (0xD800 <= codepoint1 and codepoint1 <= 0xDBFF) + { + // expect next \uxxxx entry + if (JSON_LIKELY(get() == '\\' and get() == 'u')) + { + const int codepoint2 = get_codepoint(); + + if (JSON_UNLIKELY(codepoint2 == -1)) + { + error_message = "invalid string: '\\u' must be followed by 4 hex digits"; + return token_type::parse_error; + } + + // check if codepoint2 is a low surrogate + if (JSON_LIKELY(0xDC00 <= codepoint2 and codepoint2 <= 0xDFFF)) + { + // overwrite codepoint + codepoint = + // high surrogate occupies the most significant 22 bits + (codepoint1 << 10) + // low surrogate occupies the least significant 15 bits + + codepoint2 + // there is still the 0xD800, 0xDC00 and 0x10000 noise + // in the result so we have to subtract with: + // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00 + - 0x35FDC00; + } + else + { + error_message = "invalid string: surrogate U+DC00..U+DFFF must be followed by U+DC00..U+DFFF"; + return token_type::parse_error; + } + } + else + { + error_message = "invalid string: surrogate U+DC00..U+DFFF must be followed by U+DC00..U+DFFF"; + return token_type::parse_error; + } + } + else + { + if (JSON_UNLIKELY(0xDC00 <= codepoint1 and codepoint1 <= 0xDFFF)) + { + error_message = "invalid string: surrogate U+DC00..U+DFFF must follow U+D800..U+DBFF"; + return token_type::parse_error; + } + } + + // result of the above calculation yields a proper codepoint + assert(0x00 <= codepoint and codepoint <= 0x10FFFF); + + // translate codepoint into bytes + if (codepoint < 0x80) + { + // 1-byte characters: 0xxxxxxx (ASCII) + add(codepoint); + } + else if (codepoint <= 0x7FF) + { + // 2-byte characters: 110xxxxx 10xxxxxx + add(0xC0 | (codepoint >> 6)); + add(0x80 | (codepoint & 0x3F)); + } + else if (codepoint <= 0xFFFF) + { + // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx + add(0xE0 | (codepoint >> 12)); + add(0x80 | ((codepoint >> 6) & 0x3F)); + add(0x80 | (codepoint & 0x3F)); + } + else + { + // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + add(0xF0 | (codepoint >> 18)); + add(0x80 | ((codepoint >> 12) & 0x3F)); + add(0x80 | ((codepoint >> 6) & 0x3F)); + add(0x80 | (codepoint & 0x3F)); + } + + break; + } + + // other characters after escape + default: + error_message = "invalid string: forbidden character after backslash"; + return token_type::parse_error; + } + + break; + } + + // invalid control characters + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: + { + error_message = "invalid string: control character must be escaped"; + return token_type::parse_error; + } + + // U+0020..U+007F (except U+0022 (quote) and U+005C (backspace)) + case 0x20: + case 0x21: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: + case 0x59: + case 0x5A: + case 0x5B: + case 0x5D: + case 0x5E: + case 0x5F: + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: + case 0x79: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: + { + add(current); + break; + } + + // U+0080..U+07FF: bytes C2..DF 80..BF + case 0xC2: + case 0xC3: + case 0xC4: + case 0xC5: + case 0xC6: + case 0xC7: + case 0xC8: + case 0xC9: + case 0xCA: + case 0xCB: + case 0xCC: + case 0xCD: + case 0xCE: + case 0xCF: + case 0xD0: + case 0xD1: + case 0xD2: + case 0xD3: + case 0xD4: + case 0xD5: + case 0xD6: + case 0xD7: + case 0xD8: + case 0xD9: + case 0xDA: + case 0xDB: + case 0xDC: + case 0xDD: + case 0xDE: + case 0xDF: + { + if (JSON_UNLIKELY(not next_byte_in_range({0x80, 0xBF}))) + { + return token_type::parse_error; + } + break; + } + + // U+0800..U+0FFF: bytes E0 A0..BF 80..BF + case 0xE0: + { + if (JSON_UNLIKELY(not (next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+1000..U+CFFF: bytes E1..EC 80..BF 80..BF + // U+E000..U+FFFF: bytes EE..EF 80..BF 80..BF + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xEE: + case 0xEF: + { + if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+D000..U+D7FF: bytes ED 80..9F 80..BF + case 0xED: + { + if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x9F, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+10000..U+3FFFF F0 90..BF 80..BF 80..BF + case 0xF0: + { + if (JSON_UNLIKELY(not (next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+40000..U+FFFFF F1..F3 80..BF 80..BF 80..BF + case 0xF1: + case 0xF2: + case 0xF3: + { + if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+100000..U+10FFFF F4 80..8F 80..BF 80..BF + case 0xF4: + { + if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // remaining bytes (80..C1 and F5..FF) are ill-formed + default: + { + error_message = "invalid string: ill-formed UTF-8 byte"; + return token_type::parse_error; + } + } + } + } + + static void strtof(float& f, const char* str, char** endptr) noexcept + { + f = std::strtof(str, endptr); + } + + static void strtof(double& f, const char* str, char** endptr) noexcept + { + f = std::strtod(str, endptr); + } + + static void strtof(long double& f, const char* str, char** endptr) noexcept + { + f = std::strtold(str, endptr); + } + + /*! + @brief scan a number literal + + This function scans a string according to Sect. 6 of RFC 7159. + + The function is realized with a deterministic finite state machine derived + from the grammar described in RFC 7159. Starting in state "init", the + input is read and used to determined the next state. Only state "done" + accepts the number. State "error" is a trap state to model errors. In the + table below, "anything" means any character but the ones listed before. + + state | 0 | 1-9 | e E | + | - | . | anything + ---------|----------|----------|----------|---------|---------|----------|----------- + init | zero | any1 | [error] | [error] | minus | [error] | [error] + minus | zero | any1 | [error] | [error] | [error] | [error] | [error] + zero | done | done | exponent | done | done | decimal1 | done + any1 | any1 | any1 | exponent | done | done | decimal1 | done + decimal1 | decimal2 | [error] | [error] | [error] | [error] | [error] | [error] + decimal2 | decimal2 | decimal2 | exponent | done | done | done | done + exponent | any2 | any2 | [error] | sign | sign | [error] | [error] + sign | any2 | any2 | [error] | [error] | [error] | [error] | [error] + any2 | any2 | any2 | done | done | done | done | done + + The state machine is realized with one label per state (prefixed with + "scan_number_") and `goto` statements between them. The state machine + contains cycles, but any cycle can be left when EOF is read. Therefore, + the function is guaranteed to terminate. + + During scanning, the read bytes are stored in yytext. This string is + then converted to a signed integer, an unsigned integer, or a + floating-point number. + + @return token_type::value_unsigned, token_type::value_integer, or + token_type::value_float if number could be successfully scanned, + token_type::parse_error otherwise + + @note The scanner is independent of the current locale. Internally, the + locale's decimal point is used instead of `.` to work with the + locale-dependent converters. + */ + token_type scan_number() + { + // reset yytext to store the number's bytes + reset(); + + // the type of the parsed number; initially set to unsigned; will be + // changed if minus sign, decimal point or exponent is read + token_type number_type = token_type::value_unsigned; + + // state (init): we just found out we need to scan a number + switch (current) + { + case '-': + { + add(current); + goto scan_number_minus; + } + + case '0': + { + add(current); + goto scan_number_zero; + } + + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any1; + } + + default: + { + // all other characters are rejected outside scan_number() + assert(false); // LCOV_EXCL_LINE + } + } + +scan_number_minus: + // state: we just parsed a leading minus sign + number_type = token_type::value_integer; + switch (get()) + { + case '0': + { + add(current); + goto scan_number_zero; + } + + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any1; + } + + default: + { + error_message = "invalid number; expected digit after '-'"; + return token_type::parse_error; + } + } + +scan_number_zero: + // state: we just parse a zero (maybe with a leading minus sign) + switch (get()) + { + case '.': + { + add(decimal_point_char); + goto scan_number_decimal1; + } + + case 'e': + case 'E': + { + add(current); + goto scan_number_exponent; + } + + default: + goto scan_number_done; + } + +scan_number_any1: + // state: we just parsed a number 0-9 (maybe with a leading minus sign) + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any1; + } + + case '.': + { + add(decimal_point_char); + goto scan_number_decimal1; + } + + case 'e': + case 'E': + { + add(current); + goto scan_number_exponent; + } + + default: + goto scan_number_done; + } + +scan_number_decimal1: + // state: we just parsed a decimal point + number_type = token_type::value_float; + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_decimal2; + } + + default: + { + error_message = "invalid number; expected digit after '.'"; + return token_type::parse_error; + } + } + +scan_number_decimal2: + // we just parsed at least one number after a decimal point + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_decimal2; + } + + case 'e': + case 'E': + { + add(current); + goto scan_number_exponent; + } + + default: + goto scan_number_done; + } + +scan_number_exponent: + // we just parsed an exponent + number_type = token_type::value_float; + switch (get()) + { + case '+': + case '-': + { + add(current); + goto scan_number_sign; + } + + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any2; + } + + default: + { + error_message = + "invalid number; expected '+', '-', or digit after exponent"; + return token_type::parse_error; + } + } + +scan_number_sign: + // we just parsed an exponent sign + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any2; + } + + default: + { + error_message = "invalid number; expected digit after exponent sign"; + return token_type::parse_error; + } + } + +scan_number_any2: + // we just parsed a number after the exponent or exponent sign + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any2; + } + + default: + goto scan_number_done; + } + +scan_number_done: + // unget the character after the number (we only read it to know that + // we are done scanning a number) + unget(); + + char* endptr = nullptr; + errno = 0; + + // try to parse integers first and fall back to floats + if (number_type == token_type::value_unsigned) + { + const auto x = std::strtoull(yytext.data(), &endptr, 10); + + // we checked the number format before + assert(endptr == yytext.data() + yytext.size()); + + if (errno == 0) + { + value_unsigned = static_cast<number_unsigned_t>(x); + if (value_unsigned == x) + { + return token_type::value_unsigned; + } + } + } + else if (number_type == token_type::value_integer) + { + const auto x = std::strtoll(yytext.data(), &endptr, 10); + + // we checked the number format before + assert(endptr == yytext.data() + yytext.size()); + + if (errno == 0) + { + value_integer = static_cast<number_integer_t>(x); + if (value_integer == x) + { + return token_type::value_integer; + } + } + } + + // this code is reached if we parse a floating-point number or if an + // integer conversion above failed + strtof(value_float, yytext.data(), &endptr); + + // we checked the number format before + assert(endptr == yytext.data() + yytext.size()); + + return token_type::value_float; + } + + /*! + @param[in] literal_text the literal text to expect + @param[in] length the length of the passed literal text + @param[in] return_type the token type to return on success + */ + token_type scan_literal(const char* literal_text, const std::size_t length, + token_type return_type) + { + assert(current == literal_text[0]); + for (std::size_t i = 1; i < length; ++i) + { + if (JSON_UNLIKELY(get() != literal_text[i])) + { + error_message = "invalid literal"; + return token_type::parse_error; + } + } + return return_type; + } + + ///////////////////// + // input management + ///////////////////// + + /// reset yytext; current character is beginning of token + void reset() noexcept + { + yytext.clear(); + token_string.clear(); + token_string.push_back(std::char_traits<char>::to_char_type(current)); + } + + /* + @brief get next character from the input + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns a + `std::char_traits<char>::eof()` in that case. Stores the scanned characters + for use in error messages. + + @return character read from the input + */ + std::char_traits<char>::int_type get() + { + ++chars_read; + current = ia->get_character(); + if (JSON_LIKELY(current != std::char_traits<char>::eof())) + { + token_string.push_back(std::char_traits<char>::to_char_type(current)); + } + return current; + } + + /// unget current character (return it again on next get) + void unget() + { + --chars_read; + if (JSON_LIKELY(current != std::char_traits<char>::eof())) + { + ia->unget_character(); + assert(token_string.size() != 0); + token_string.pop_back(); + } + } + + /// add a character to yytext + void add(int c) + { + yytext.push_back(std::char_traits<char>::to_char_type(c)); + } + + public: + ///////////////////// + // value getters + ///////////////////// + + /// return integer value + constexpr number_integer_t get_number_integer() const noexcept + { + return value_integer; + } + + /// return unsigned integer value + constexpr number_unsigned_t get_number_unsigned() const noexcept + { + return value_unsigned; + } + + /// return floating-point value + constexpr number_float_t get_number_float() const noexcept + { + return value_float; + } + + /// return current string value (implicitly resets the token; useful only once) + std::string move_string() + { + return std::move(yytext); + } + + ///////////////////// + // diagnostics + ///////////////////// + + /// return position of last read token + constexpr std::size_t get_position() const noexcept + { + return chars_read; + } + + /// return the last read token (for errors only). Will never contain EOF + /// (an arbitrary value that is not a valid char value, often -1), because + /// 255 may legitimately occur. May contain NUL, which should be escaped. + std::string get_token_string() const + { + // escape control characters + std::string result; + for (const auto c : token_string) + { + if ('\x00' <= c and c <= '\x1F') + { + // escape control characters + std::stringstream ss; + ss << "<U+" << std::setw(4) << std::uppercase << std::setfill('0') + << std::hex << static_cast<int>(c) << ">"; + result += ss.str(); + } + else + { + // add character as is + result.push_back(c); + } + } + + return result; + } + + /// return syntax error message + constexpr const char* get_error_message() const noexcept + { + return error_message; + } + + ///////////////////// + // actual scanner + ///////////////////// + + token_type scan() + { + // read next character and ignore whitespace + do + { + get(); + } + while (current == ' ' or current == '\t' or current == '\n' or current == '\r'); + + switch (current) + { + // structural characters + case '[': + return token_type::begin_array; + case ']': + return token_type::end_array; + case '{': + return token_type::begin_object; + case '}': + return token_type::end_object; + case ':': + return token_type::name_separator; + case ',': + return token_type::value_separator; + + // literals + case 't': + return scan_literal("true", 4, token_type::literal_true); + case 'f': + return scan_literal("false", 5, token_type::literal_false); + case 'n': + return scan_literal("null", 4, token_type::literal_null); + + // string + case '\"': + return scan_string(); + + // number + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + return scan_number(); + + // end of input (the null byte is needed when parsing from + // string literals) + case '\0': + case std::char_traits<char>::eof(): + return token_type::end_of_input; + + // error + default: + error_message = "invalid literal"; + return token_type::parse_error; + } + } + + private: + /// input adapter + detail::input_adapter_t ia = nullptr; + + /// the current character + std::char_traits<char>::int_type current = std::char_traits<char>::eof(); + + /// the number of characters read + std::size_t chars_read = 0; + + /// raw input token string (for error messages) + std::vector<char> token_string {}; + + /// buffer for variable-length tokens (numbers, strings) + std::string yytext {}; + + /// a description of occurred lexer errors + const char* error_message = ""; + + // number values + number_integer_t value_integer = 0; + number_unsigned_t value_unsigned = 0; + number_float_t value_float = 0; + + /// the decimal point + const char decimal_point_char = '.'; +}; + +/*! +@brief syntax analysis + +This class implements a recursive decent parser. +*/ +template<typename BasicJsonType> +class parser +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using lexer_t = lexer<BasicJsonType>; + using token_type = typename lexer_t::token_type; + + public: + enum class parse_event_t : uint8_t + { + /// the parser read `{` and started to process a JSON object + object_start, + /// the parser read `}` and finished processing a JSON object + object_end, + /// the parser read `[` and started to process a JSON array + array_start, + /// the parser read `]` and finished processing a JSON array + array_end, + /// the parser read a key of a value in an object + key, + /// the parser finished reading a JSON value + value + }; + + using parser_callback_t = + std::function<bool(int depth, parse_event_t event, BasicJsonType& parsed)>; + + /// a parser reading from an input adapter + explicit parser(detail::input_adapter_t adapter, + const parser_callback_t cb = nullptr, + const bool allow_exceptions_ = true) + : callback(cb), m_lexer(adapter), allow_exceptions(allow_exceptions_) + {} + + /*! + @brief public parser interface + + @param[in] strict whether to expect the last token to be EOF + @param[in,out] result parsed JSON value + + @throw parse_error.101 in case of an unexpected token + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + */ + void parse(const bool strict, BasicJsonType& result) + { + // read first token + get_token(); + + parse_internal(true, result); + result.assert_invariant(); + + // in strict mode, input must be completely read + if (strict) + { + get_token(); + expect(token_type::end_of_input); + } + + // in case of an error, return discarded value + if (errored) + { + result = value_t::discarded; + return; + } + + // set top-level value to null if it was discarded by the callback + // function + if (result.is_discarded()) + { + result = nullptr; + } + } + + /*! + @brief public accept interface + + @param[in] strict whether to expect the last token to be EOF + @return whether the input is a proper JSON text + */ + bool accept(const bool strict = true) + { + // read first token + get_token(); + + if (not accept_internal()) + { + return false; + } + + // strict => last token must be EOF + return not strict or (get_token() == token_type::end_of_input); + } + + private: + /*! + @brief the actual parser + @throw parse_error.101 in case of an unexpected token + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + */ + void parse_internal(bool keep, BasicJsonType& result) + { + // never parse after a parse error was detected + assert(not errored); + + // start with a discarded value + if (not result.is_discarded()) + { + result.m_value.destroy(result.m_type); + result.m_type = value_t::discarded; + } + + switch (last_token) + { + case token_type::begin_object: + { + if (keep) + { + if (callback) + { + keep = callback(depth++, parse_event_t::object_start, result); + } + + if (not callback or keep) + { + // explicitly set result to object to cope with {} + result.m_type = value_t::object; + result.m_value = value_t::object; + } + } + + // read next token + get_token(); + + // closing } -> we are done + if (last_token == token_type::end_object) + { + if (keep and callback and not callback(--depth, parse_event_t::object_end, result)) + { + result.m_value.destroy(result.m_type); + result.m_type = value_t::discarded; + } + break; + } + + // parse values + std::string key; + BasicJsonType value; + while (true) + { + // store key + if (not expect(token_type::value_string)) + { + return; + } + key = m_lexer.move_string(); + + bool keep_tag = false; + if (keep) + { + if (callback) + { + BasicJsonType k(key); + keep_tag = callback(depth, parse_event_t::key, k); + } + else + { + keep_tag = true; + } + } + + // parse separator (:) + get_token(); + if (not expect(token_type::name_separator)) + { + return; + } + + // parse and add value + get_token(); + value.m_value.destroy(value.m_type); + value.m_type = value_t::discarded; + parse_internal(keep, value); + + if (JSON_UNLIKELY(errored)) + { + return; + } + + if (keep and keep_tag and not value.is_discarded()) + { + result.m_value.object->emplace(std::move(key), std::move(value)); + } + + // comma -> next value + get_token(); + if (last_token == token_type::value_separator) + { + get_token(); + continue; + } + + // closing } + if (not expect(token_type::end_object)) + { + return; + } + break; + } + + if (keep and callback and not callback(--depth, parse_event_t::object_end, result)) + { + result.m_value.destroy(result.m_type); + result.m_type = value_t::discarded; + } + break; + } + + case token_type::begin_array: + { + if (keep) + { + if (callback) + { + keep = callback(depth++, parse_event_t::array_start, result); + } + + if (not callback or keep) + { + // explicitly set result to array to cope with [] + result.m_type = value_t::array; + result.m_value = value_t::array; + } + } + + // read next token + get_token(); + + // closing ] -> we are done + if (last_token == token_type::end_array) + { + if (callback and not callback(--depth, parse_event_t::array_end, result)) + { + result.m_value.destroy(result.m_type); + result.m_type = value_t::discarded; + } + break; + } + + // parse values + BasicJsonType value; + while (true) + { + // parse value + value.m_value.destroy(value.m_type); + value.m_type = value_t::discarded; + parse_internal(keep, value); + + if (JSON_UNLIKELY(errored)) + { + return; + } + + if (keep and not value.is_discarded()) + { + result.m_value.array->push_back(std::move(value)); + } + + // comma -> next value + get_token(); + if (last_token == token_type::value_separator) + { + get_token(); + continue; + } + + // closing ] + if (not expect(token_type::end_array)) + { + return; + } + break; + } + + if (keep and callback and not callback(--depth, parse_event_t::array_end, result)) + { + result.m_value.destroy(result.m_type); + result.m_type = value_t::discarded; + } + break; + } + + case token_type::literal_null: + { + result.m_type = value_t::null; + break; + } + + case token_type::value_string: + { + result.m_type = value_t::string; + result.m_value = m_lexer.move_string(); + break; + } + + case token_type::literal_true: + { + result.m_type = value_t::boolean; + result.m_value = true; + break; + } + + case token_type::literal_false: + { + result.m_type = value_t::boolean; + result.m_value = false; + break; + } + + case token_type::value_unsigned: + { + result.m_type = value_t::number_unsigned; + result.m_value = m_lexer.get_number_unsigned(); + break; + } + + case token_type::value_integer: + { + result.m_type = value_t::number_integer; + result.m_value = m_lexer.get_number_integer(); + break; + } + + case token_type::value_float: + { + result.m_type = value_t::number_float; + result.m_value = m_lexer.get_number_float(); + + // throw in case of infinity or NAN + if (JSON_UNLIKELY(not std::isfinite(result.m_value.number_float))) + { + if (allow_exceptions) + { + JSON_THROW(out_of_range::create(406, "number overflow parsing '" + + m_lexer.get_token_string() + "'")); + } + expect(token_type::uninitialized); + } + break; + } + + case token_type::parse_error: + { + // using "uninitialized" to avoid "expected" message + if (not expect(token_type::uninitialized)) + { + return; + } + break; // LCOV_EXCL_LINE + } + + default: + { + // the last token was unexpected; we expected a value + if (not expect(token_type::literal_or_value)) + { + return; + } + break; // LCOV_EXCL_LINE + } + } + + if (keep and callback and not callback(depth, parse_event_t::value, result)) + { + result.m_type = value_t::discarded; + } + } + + /*! + @brief the actual acceptor + + @invariant 1. The last token is not yet processed. Therefore, the caller + of this function must make sure a token has been read. + 2. When this function returns, the last token is processed. + That is, the last read character was already considered. + + This invariant makes sure that no token needs to be "unput". + */ + bool accept_internal() + { + switch (last_token) + { + case token_type::begin_object: + { + // read next token + get_token(); + + // closing } -> we are done + if (last_token == token_type::end_object) + { + return true; + } + + // parse values + while (true) + { + // parse key + if (last_token != token_type::value_string) + { + return false; + } + + // parse separator (:) + get_token(); + if (last_token != token_type::name_separator) + { + return false; + } + + // parse value + get_token(); + if (not accept_internal()) + { + return false; + } + + // comma -> next value + get_token(); + if (last_token == token_type::value_separator) + { + get_token(); + continue; + } + + // closing } + return (last_token == token_type::end_object); + } + } + + case token_type::begin_array: + { + // read next token + get_token(); + + // closing ] -> we are done + if (last_token == token_type::end_array) + { + return true; + } + + // parse values + while (true) + { + // parse value + if (not accept_internal()) + { + return false; + } + + // comma -> next value + get_token(); + if (last_token == token_type::value_separator) + { + get_token(); + continue; + } + + // closing ] + return (last_token == token_type::end_array); + } + } + + case token_type::value_float: + { + // reject infinity or NAN + return std::isfinite(m_lexer.get_number_float()); + } + + case token_type::literal_false: + case token_type::literal_null: + case token_type::literal_true: + case token_type::value_integer: + case token_type::value_string: + case token_type::value_unsigned: + return true; + + default: // the last token was unexpected + return false; + } + } + + /// get next token from lexer + token_type get_token() + { + return (last_token = m_lexer.scan()); + } + + /*! + @throw parse_error.101 if expected token did not occur + */ + bool expect(token_type t) + { + if (JSON_UNLIKELY(t != last_token)) + { + errored = true; + expected = t; + if (allow_exceptions) + { + throw_exception(); + } + else + { + return false; + } + } + + return true; + } + + [[noreturn]] void throw_exception() const + { + std::string error_msg = "syntax error - "; + if (last_token == token_type::parse_error) + { + error_msg += std::string(m_lexer.get_error_message()) + "; last read: '" + + m_lexer.get_token_string() + "'"; + } + else + { + error_msg += "unexpected " + std::string(lexer_t::token_type_name(last_token)); + } + + if (expected != token_type::uninitialized) + { + error_msg += "; expected " + std::string(lexer_t::token_type_name(expected)); + } + + JSON_THROW(parse_error::create(101, m_lexer.get_position(), error_msg)); + } + + private: + /// current level of recursion + int depth = 0; + /// callback function + const parser_callback_t callback = nullptr; + /// the type of the last read token + token_type last_token = token_type::uninitialized; + /// the lexer + lexer_t m_lexer; + /// whether a syntax error occurred + bool errored = false; + /// possible reason for the syntax error + token_type expected = token_type::uninitialized; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; +}; + +/////////////// +// iterators // +/////////////// + +/*! +@brief an iterator for primitive JSON types + +This class models an iterator for primitive JSON types (boolean, number, +string). It's only purpose is to allow the iterator/const_iterator classes +to "iterate" over primitive values. Internally, the iterator is modeled by +a `difference_type` variable. Value begin_value (`0`) models the begin, +end_value (`1`) models past the end. +*/ +class primitive_iterator_t +{ + public: + using difference_type = std::ptrdiff_t; + + constexpr difference_type get_value() const noexcept + { + return m_it; + } + + /// set iterator to a defined beginning + void set_begin() noexcept + { + m_it = begin_value; + } + + /// set iterator to a defined past the end + void set_end() noexcept + { + m_it = end_value; + } + + /// return whether the iterator can be dereferenced + constexpr bool is_begin() const noexcept + { + return m_it == begin_value; + } + + /// return whether the iterator is at end + constexpr bool is_end() const noexcept + { + return m_it == end_value; + } + + friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept + { + return lhs.m_it == rhs.m_it; + } + + friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept + { + return lhs.m_it < rhs.m_it; + } + + primitive_iterator_t operator+(difference_type i) + { + auto result = *this; + result += i; + return result; + } + + friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept + { + return lhs.m_it - rhs.m_it; + } + + friend std::ostream& operator<<(std::ostream& os, primitive_iterator_t it) + { + return os << it.m_it; + } + + primitive_iterator_t& operator++() + { + ++m_it; + return *this; + } + + primitive_iterator_t const operator++(int) + { + auto result = *this; + m_it++; + return result; + } + + primitive_iterator_t& operator--() + { + --m_it; + return *this; + } + + primitive_iterator_t const operator--(int) + { + auto result = *this; + m_it--; + return result; + } + + primitive_iterator_t& operator+=(difference_type n) + { + m_it += n; + return *this; + } + + primitive_iterator_t& operator-=(difference_type n) + { + m_it -= n; + return *this; + } + + private: + static constexpr difference_type begin_value = 0; + static constexpr difference_type end_value = begin_value + 1; + + /// iterator as signed integer type + difference_type m_it = (std::numeric_limits<std::ptrdiff_t>::min)(); +}; + +/*! +@brief an iterator value + +@note This structure could easily be a union, but MSVC currently does not allow +unions members with complex constructors, see https://github.com/nlohmann/json/pull/105. +*/ +template<typename BasicJsonType> struct internal_iterator +{ + /// iterator for JSON objects + typename BasicJsonType::object_t::iterator object_iterator {}; + /// iterator for JSON arrays + typename BasicJsonType::array_t::iterator array_iterator {}; + /// generic iterator for all other types + primitive_iterator_t primitive_iterator {}; +}; + +template<typename IteratorType> class iteration_proxy; + +/*! +@brief a template for a bidirectional iterator for the @ref basic_json class + +This class implements a both iterators (iterator and const_iterator) for the +@ref basic_json class. + +@note An iterator is called *initialized* when a pointer to a JSON value has + been set (e.g., by a constructor or a copy assignment). If the iterator is + default-constructed, it is *uninitialized* and most methods are undefined. + **The library uses assertions to detect calls on uninitialized iterators.** + +@requirement The class satisfies the following concept requirements: +- +[BidirectionalIterator](http://en.cppreference.com/w/cpp/concept/BidirectionalIterator): + The iterator that can be moved can be moved in both directions (i.e. + incremented and decremented). + +@since version 1.0.0, simplified in version 2.0.9, change to bidirectional + iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593) +*/ +template<typename BasicJsonType> +class iter_impl +{ + /// allow basic_json to access private members + friend iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>; + friend BasicJsonType; + friend iteration_proxy<iter_impl>; + + using object_t = typename BasicJsonType::object_t; + using array_t = typename BasicJsonType::array_t; + // make sure BasicJsonType is basic_json or const basic_json + static_assert(is_basic_json<typename std::remove_const<BasicJsonType>::type>::value, + "iter_impl only accepts (const) basic_json"); + + public: + + /// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17. + /// The C++ Standard has never required user-defined iterators to derive from std::iterator. + /// A user-defined iterator should provide publicly accessible typedefs named + /// iterator_category, value_type, difference_type, pointer, and reference. + /// Note that value_type is required to be non-const, even for constant iterators. + using iterator_category = std::bidirectional_iterator_tag; + + /// the type of the values when the iterator is dereferenced + using value_type = typename BasicJsonType::value_type; + /// a type to represent differences between iterators + using difference_type = typename BasicJsonType::difference_type; + /// defines a pointer to the type iterated over (value_type) + using pointer = typename std::conditional<std::is_const<BasicJsonType>::value, + typename BasicJsonType::const_pointer, + typename BasicJsonType::pointer>::type; + /// defines a reference to the type iterated over (value_type) + using reference = + typename std::conditional<std::is_const<BasicJsonType>::value, + typename BasicJsonType::const_reference, + typename BasicJsonType::reference>::type; + + /// default constructor + iter_impl() = default; + + /*! + @brief constructor for a given JSON instance + @param[in] object pointer to a JSON object for this iterator + @pre object != nullptr + @post The iterator is initialized; i.e. `m_object != nullptr`. + */ + explicit iter_impl(pointer object) noexcept : m_object(object) + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + m_it.object_iterator = typename object_t::iterator(); + break; + } + + case value_t::array: + { + m_it.array_iterator = typename array_t::iterator(); + break; + } + + default: + { + m_it.primitive_iterator = primitive_iterator_t(); + break; + } + } + } + + /*! + @note The conventional copy constructor and copy assignment are implicitly + defined. Combined with the following converting constructor and + assignment, they support: (1) copy from iterator to iterator, (2) + copy from const iterator to const iterator, and (3) conversion from + iterator to const iterator. However conversion from const iterator + to iterator is not defined. + */ + + /*! + @brief converting constructor + @param[in] other non-const iterator to copy from + @note It is not checked whether @a other is initialized. + */ + iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept + : m_object(other.m_object), m_it(other.m_it) {} + + /*! + @brief converting assignment + @param[in,out] other non-const iterator to copy from + @return const/non-const iterator + @note It is not checked whether @a other is initialized. + */ + iter_impl& operator=(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept + { + m_object = other.m_object; + m_it = other.m_it; + return *this; + } + + private: + /*! + @brief set the iterator to the first value + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + void set_begin() noexcept + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + m_it.object_iterator = m_object->m_value.object->begin(); + break; + } + + case value_t::array: + { + m_it.array_iterator = m_object->m_value.array->begin(); + break; + } + + case value_t::null: + { + // set to end so begin()==end() is true: null is empty + m_it.primitive_iterator.set_end(); + break; + } + + default: + { + m_it.primitive_iterator.set_begin(); + break; + } + } + } + + /*! + @brief set the iterator past the last value + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + void set_end() noexcept + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + m_it.object_iterator = m_object->m_value.object->end(); + break; + } + + case value_t::array: + { + m_it.array_iterator = m_object->m_value.array->end(); + break; + } + + default: + { + m_it.primitive_iterator.set_end(); + break; + } + } + } + + public: + /*! + @brief return a reference to the value pointed to by the iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + reference operator*() const + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + assert(m_it.object_iterator != m_object->m_value.object->end()); + return m_it.object_iterator->second; + } + + case value_t::array: + { + assert(m_it.array_iterator != m_object->m_value.array->end()); + return *m_it.array_iterator; + } + + case value_t::null: + JSON_THROW(invalid_iterator::create(214, "cannot get value")); + + default: + { + if (JSON_LIKELY(m_it.primitive_iterator.is_begin())) + { + return *m_object; + } + + JSON_THROW(invalid_iterator::create(214, "cannot get value")); + } + } + } + + /*! + @brief dereference the iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + pointer operator->() const + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + assert(m_it.object_iterator != m_object->m_value.object->end()); + return &(m_it.object_iterator->second); + } + + case value_t::array: + { + assert(m_it.array_iterator != m_object->m_value.array->end()); + return &*m_it.array_iterator; + } + + default: + { + if (JSON_LIKELY(m_it.primitive_iterator.is_begin())) + { + return m_object; + } + + JSON_THROW(invalid_iterator::create(214, "cannot get value")); + } + } + } + + /*! + @brief post-increment (it++) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl const operator++(int) + { + auto result = *this; + ++(*this); + return result; + } + + /*! + @brief pre-increment (++it) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator++() + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + std::advance(m_it.object_iterator, 1); + break; + } + + case value_t::array: + { + std::advance(m_it.array_iterator, 1); + break; + } + + default: + { + ++m_it.primitive_iterator; + break; + } + } + + return *this; + } + + /*! + @brief post-decrement (it--) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl const operator--(int) + { + auto result = *this; + --(*this); + return result; + } + + /*! + @brief pre-decrement (--it) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator--() + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + std::advance(m_it.object_iterator, -1); + break; + } + + case value_t::array: + { + std::advance(m_it.array_iterator, -1); + break; + } + + default: + { + --m_it.primitive_iterator; + break; + } + } + + return *this; + } + + /*! + @brief comparison: equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator==(const iter_impl& other) const + { + // if objects are not the same, the comparison is undefined + if (JSON_UNLIKELY(m_object != other.m_object)) + { + JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers")); + } + + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + return (m_it.object_iterator == other.m_it.object_iterator); + + case value_t::array: + return (m_it.array_iterator == other.m_it.array_iterator); + + default: + return (m_it.primitive_iterator == other.m_it.primitive_iterator); + } + } + + /*! + @brief comparison: not equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator!=(const iter_impl& other) const + { + return not operator==(other); + } + + /*! + @brief comparison: smaller + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator<(const iter_impl& other) const + { + // if objects are not the same, the comparison is undefined + if (JSON_UNLIKELY(m_object != other.m_object)) + { + JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers")); + } + + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators")); + + case value_t::array: + return (m_it.array_iterator < other.m_it.array_iterator); + + default: + return (m_it.primitive_iterator < other.m_it.primitive_iterator); + } + } + + /*! + @brief comparison: less than or equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator<=(const iter_impl& other) const + { + return not other.operator < (*this); + } + + /*! + @brief comparison: greater than + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator>(const iter_impl& other) const + { + return not operator<=(other); + } + + /*! + @brief comparison: greater than or equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator>=(const iter_impl& other) const + { + return not operator<(other); + } + + /*! + @brief add to iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator+=(difference_type i) + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators")); + + case value_t::array: + { + std::advance(m_it.array_iterator, i); + break; + } + + default: + { + m_it.primitive_iterator += i; + break; + } + } + + return *this; + } + + /*! + @brief subtract from iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator-=(difference_type i) + { + return operator+=(-i); + } + + /*! + @brief add to iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl operator+(difference_type i) const + { + auto result = *this; + result += i; + return result; + } + + /*! + @brief addition of distance and iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + friend iter_impl operator+(difference_type i, const iter_impl& it) + { + auto result = it; + result += i; + return result; + } + + /*! + @brief subtract from iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl operator-(difference_type i) const + { + auto result = *this; + result -= i; + return result; + } + + /*! + @brief return difference + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + difference_type operator-(const iter_impl& other) const + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators")); + + case value_t::array: + return m_it.array_iterator - other.m_it.array_iterator; + + default: + return m_it.primitive_iterator - other.m_it.primitive_iterator; + } + } + + /*! + @brief access to successor + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + reference operator[](difference_type n) const + { + assert(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators")); + + case value_t::array: + return *std::next(m_it.array_iterator, n); + + case value_t::null: + JSON_THROW(invalid_iterator::create(214, "cannot get value")); + + default: + { + if (JSON_LIKELY(m_it.primitive_iterator.get_value() == -n)) + { + return *m_object; + } + + JSON_THROW(invalid_iterator::create(214, "cannot get value")); + } + } + } + + /*! + @brief return the key of an object iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + typename object_t::key_type key() const + { + assert(m_object != nullptr); + + if (JSON_LIKELY(m_object->is_object())) + { + return m_it.object_iterator->first; + } + + JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators")); + } + + /*! + @brief return the value of an iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + reference value() const + { + return operator*(); + } + + private: + /// associated JSON instance + pointer m_object = nullptr; + /// the actual iterator of the associated instance + internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it = {}; +}; + +/// proxy class for the iterator_wrapper functions +template<typename IteratorType> class iteration_proxy +{ + private: + /// helper class for iteration + class iteration_proxy_internal + { + private: + /// the iterator + IteratorType anchor; + /// an index for arrays (used to create key names) + std::size_t array_index = 0; + + public: + explicit iteration_proxy_internal(IteratorType it) noexcept : anchor(it) {} + + /// dereference operator (needed for range-based for) + iteration_proxy_internal& operator*() + { + return *this; + } + + /// increment operator (needed for range-based for) + iteration_proxy_internal& operator++() + { + ++anchor; + ++array_index; + + return *this; + } + + /// inequality operator (needed for range-based for) + bool operator!=(const iteration_proxy_internal& o) const noexcept + { + return anchor != o.anchor; + } + + /// return key of the iterator + std::string key() const + { + assert(anchor.m_object != nullptr); + + switch (anchor.m_object->type()) + { + // use integer array index as key + case value_t::array: + return std::to_string(array_index); + + // use key from the object + case value_t::object: + return anchor.key(); + + // use an empty key for all primitive types + default: + return ""; + } + } + + /// return value of the iterator + typename IteratorType::reference value() const + { + return anchor.value(); + } + }; + + /// the container to iterate + typename IteratorType::reference container; + + public: + /// construct iteration proxy from a container + explicit iteration_proxy(typename IteratorType::reference cont) + : container(cont) {} + + /// return iterator begin (needed for range-based for) + iteration_proxy_internal begin() noexcept + { + return iteration_proxy_internal(container.begin()); + } + + /// return iterator end (needed for range-based for) + iteration_proxy_internal end() noexcept + { + return iteration_proxy_internal(container.end()); + } +}; + +/*! +@brief a template for a reverse iterator class + +@tparam Base the base iterator type to reverse. Valid types are @ref +iterator (to create @ref reverse_iterator) and @ref const_iterator (to +create @ref const_reverse_iterator). + +@requirement The class satisfies the following concept requirements: +- +[BidirectionalIterator](http://en.cppreference.com/w/cpp/concept/BidirectionalIterator): + The iterator that can be moved can be moved in both directions (i.e. + incremented and decremented). +- [OutputIterator](http://en.cppreference.com/w/cpp/concept/OutputIterator): + It is possible to write to the pointed-to element (only if @a Base is + @ref iterator). + +@since version 1.0.0 +*/ +template<typename Base> +class json_reverse_iterator : public std::reverse_iterator<Base> +{ + public: + using difference_type = std::ptrdiff_t; + /// shortcut to the reverse iterator adapter + using base_iterator = std::reverse_iterator<Base>; + /// the reference type for the pointed-to element + using reference = typename Base::reference; + + /// create reverse iterator from iterator + json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept + : base_iterator(it) {} + + /// create reverse iterator from base class + json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {} + + /// post-increment (it++) + json_reverse_iterator const operator++(int) + { + return static_cast<json_reverse_iterator>(base_iterator::operator++(1)); + } + + /// pre-increment (++it) + json_reverse_iterator& operator++() + { + return static_cast<json_reverse_iterator&>(base_iterator::operator++()); + } + + /// post-decrement (it--) + json_reverse_iterator const operator--(int) + { + return static_cast<json_reverse_iterator>(base_iterator::operator--(1)); + } + + /// pre-decrement (--it) + json_reverse_iterator& operator--() + { + return static_cast<json_reverse_iterator&>(base_iterator::operator--()); + } + + /// add to iterator + json_reverse_iterator& operator+=(difference_type i) + { + return static_cast<json_reverse_iterator&>(base_iterator::operator+=(i)); + } + + /// add to iterator + json_reverse_iterator operator+(difference_type i) const + { + return static_cast<json_reverse_iterator>(base_iterator::operator+(i)); + } + + /// subtract from iterator + json_reverse_iterator operator-(difference_type i) const + { + return static_cast<json_reverse_iterator>(base_iterator::operator-(i)); + } + + /// return difference + difference_type operator-(const json_reverse_iterator& other) const + { + return base_iterator(*this) - base_iterator(other); + } + + /// access to successor + reference operator[](difference_type n) const + { + return *(this->operator+(n)); + } + + /// return the key of an object iterator + auto key() const -> decltype(std::declval<Base>().key()) + { + auto it = --this->base(); + return it.key(); + } + + /// return the value of an iterator + reference value() const + { + auto it = --this->base(); + return it.operator * (); + } +}; + +///////////////////// +// output adapters // +///////////////////// + +/// abstract output adapter interface +template<typename CharType> struct output_adapter_protocol +{ + virtual void write_character(CharType c) = 0; + virtual void write_characters(const CharType* s, std::size_t length) = 0; + virtual ~output_adapter_protocol() = default; +}; + +/// a type to simplify interfaces +template<typename CharType> +using output_adapter_t = std::shared_ptr<output_adapter_protocol<CharType>>; + +/// output adapter for byte vectors +template<typename CharType> +class output_vector_adapter : public output_adapter_protocol<CharType> +{ + public: + explicit output_vector_adapter(std::vector<CharType>& vec) : v(vec) {} + + void write_character(CharType c) override + { + v.push_back(c); + } + + void write_characters(const CharType* s, std::size_t length) override + { + std::copy(s, s + length, std::back_inserter(v)); + } + + private: + std::vector<CharType>& v; +}; + +/// output adapter for output streams +template<typename CharType> +class output_stream_adapter : public output_adapter_protocol<CharType> +{ + public: + explicit output_stream_adapter(std::basic_ostream<CharType>& s) : stream(s) {} + + void write_character(CharType c) override + { + stream.put(c); + } + + void write_characters(const CharType* s, std::size_t length) override + { + stream.write(s, static_cast<std::streamsize>(length)); + } + + private: + std::basic_ostream<CharType>& stream; +}; + +/// output adapter for basic_string +template<typename CharType> +class output_string_adapter : public output_adapter_protocol<CharType> +{ + public: + explicit output_string_adapter(std::basic_string<CharType>& s) : str(s) {} + + void write_character(CharType c) override + { + str.push_back(c); + } + + void write_characters(const CharType* s, std::size_t length) override + { + str.append(s, length); + } + + private: + std::basic_string<CharType>& str; +}; + +template<typename CharType> +class output_adapter +{ + public: + output_adapter(std::vector<CharType>& vec) + : oa(std::make_shared<output_vector_adapter<CharType>>(vec)) {} + + output_adapter(std::basic_ostream<CharType>& s) + : oa(std::make_shared<output_stream_adapter<CharType>>(s)) {} + + output_adapter(std::basic_string<CharType>& s) + : oa(std::make_shared<output_string_adapter<CharType>>(s)) {} + + operator output_adapter_t<CharType>() + { + return oa; + } + + private: + output_adapter_t<CharType> oa = nullptr; +}; + +////////////////////////////// +// binary reader and writer // +////////////////////////////// + +/*! +@brief deserialization of CBOR and MessagePack values +*/ +template<typename BasicJsonType> +class binary_reader +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + + public: + /*! + @brief create a binary reader + + @param[in] adapter input adapter to read from + */ + explicit binary_reader(input_adapter_t adapter) : ia(std::move(adapter)) + { + assert(ia); + } + + /*! + @brief create a JSON value from CBOR input + + @param[in] strict whether to expect the input to be consumed completed + @return JSON value created from CBOR input + + @throw parse_error.110 if input ended unexpectedly or the end of file was + not reached when @a strict was set to true + @throw parse_error.112 if unsupported byte was read + */ + BasicJsonType parse_cbor(const bool strict) + { + const auto res = parse_cbor_internal(); + if (strict) + { + get(); + check_eof(true); + } + return res; + } + + /*! + @brief create a JSON value from MessagePack input + + @param[in] strict whether to expect the input to be consumed completed + @return JSON value created from MessagePack input + + @throw parse_error.110 if input ended unexpectedly or the end of file was + not reached when @a strict was set to true + @throw parse_error.112 if unsupported byte was read + */ + BasicJsonType parse_msgpack(const bool strict) + { + const auto res = parse_msgpack_internal(); + if (strict) + { + get(); + check_eof(true); + } + return res; + } + + /*! + @brief determine system byte order + + @return true if and only if system's byte order is little endian + + @note from http://stackoverflow.com/a/1001328/266378 + */ + static constexpr bool little_endianess(int num = 1) noexcept + { + return (*reinterpret_cast<char*>(&num) == 1); + } + + private: + /*! + @param[in] get_char whether a new character should be retrieved from the + input (true, default) or whether the last read + character should be considered instead + */ + BasicJsonType parse_cbor_internal(const bool get_char = true) + { + switch (get_char ? get() : current) + { + // EOF + case std::char_traits<char>::eof(): + JSON_THROW(parse_error::create(110, chars_read, "unexpected end of input")); + + // Integer 0x00..0x17 (0..23) + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + return static_cast<number_unsigned_t>(current); + + case 0x18: // Unsigned integer (one-byte uint8_t follows) + return get_number<uint8_t>(); + + case 0x19: // Unsigned integer (two-byte uint16_t follows) + return get_number<uint16_t>(); + + case 0x1A: // Unsigned integer (four-byte uint32_t follows) + return get_number<uint32_t>(); + + case 0x1B: // Unsigned integer (eight-byte uint64_t follows) + return get_number<uint64_t>(); + + // Negative integer -1-0x00..-1-0x17 (-1..-24) + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + return static_cast<int8_t>(0x20 - 1 - current); + + case 0x38: // Negative integer (one-byte uint8_t follows) + { + // must be uint8_t ! + return static_cast<number_integer_t>(-1) - get_number<uint8_t>(); + } + + case 0x39: // Negative integer -1-n (two-byte uint16_t follows) + { + return static_cast<number_integer_t>(-1) - get_number<uint16_t>(); + } + + case 0x3A: // Negative integer -1-n (four-byte uint32_t follows) + { + return static_cast<number_integer_t>(-1) - get_number<uint32_t>(); + } + + case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows) + { + return static_cast<number_integer_t>(-1) - + static_cast<number_integer_t>(get_number<uint64_t>()); + } + + // UTF-8 string (0x00..0x17 bytes follow) + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: // UTF-8 string (one-byte uint8_t for n follows) + case 0x79: // UTF-8 string (two-byte uint16_t for n follow) + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + case 0x7F: // UTF-8 string (indefinite length) + { + return get_cbor_string(); + } + + // array (0x00..0x17 data items follow) + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: + { + return get_cbor_array(current & 0x1F); + } + + case 0x98: // array (one-byte uint8_t for n follows) + { + return get_cbor_array(get_number<uint8_t>()); + } + + case 0x99: // array (two-byte uint16_t for n follow) + { + return get_cbor_array(get_number<uint16_t>()); + } + + case 0x9A: // array (four-byte uint32_t for n follow) + { + return get_cbor_array(get_number<uint32_t>()); + } + + case 0x9B: // array (eight-byte uint64_t for n follow) + { + return get_cbor_array(get_number<uint64_t>()); + } + + case 0x9F: // array (indefinite length) + { + BasicJsonType result = value_t::array; + while (get() != 0xFF) + { + result.push_back(parse_cbor_internal(false)); + } + return result; + } + + // map (0x00..0x17 pairs of data items follow) + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + { + return get_cbor_object(current & 0x1F); + } + + case 0xB8: // map (one-byte uint8_t for n follows) + { + return get_cbor_object(get_number<uint8_t>()); + } + + case 0xB9: // map (two-byte uint16_t for n follow) + { + return get_cbor_object(get_number<uint16_t>()); + } + + case 0xBA: // map (four-byte uint32_t for n follow) + { + return get_cbor_object(get_number<uint32_t>()); + } + + case 0xBB: // map (eight-byte uint64_t for n follow) + { + return get_cbor_object(get_number<uint64_t>()); + } + + case 0xBF: // map (indefinite length) + { + BasicJsonType result = value_t::object; + while (get() != 0xFF) + { + auto key = get_cbor_string(); + result[key] = parse_cbor_internal(); + } + return result; + } + + case 0xF4: // false + { + return false; + } + + case 0xF5: // true + { + return true; + } + + case 0xF6: // null + { + return value_t::null; + } + + case 0xF9: // Half-Precision Float (two-byte IEEE 754) + { + const int byte1 = get(); + check_eof(); + const int byte2 = get(); + check_eof(); + + // code from RFC 7049, Appendix D, Figure 3: + // As half-precision floating-point numbers were only added + // to IEEE 754 in 2008, today's programming platforms often + // still only have limited support for them. It is very + // easy to include at least decoding support for them even + // without such support. An example of a small decoder for + // half-precision floating-point numbers in the C language + // is shown in Fig. 3. + const int half = (byte1 << 8) + byte2; + const int exp = (half >> 10) & 0x1F; + const int mant = half & 0x3FF; + double val; + if (exp == 0) + { + val = std::ldexp(mant, -24); + } + else if (exp != 31) + { + val = std::ldexp(mant + 1024, exp - 25); + } + else + { + val = (mant == 0) ? std::numeric_limits<double>::infinity() + : std::numeric_limits<double>::quiet_NaN(); + } + return (half & 0x8000) != 0 ? -val : val; + } + + case 0xFA: // Single-Precision Float (four-byte IEEE 754) + { + return get_number<float>(); + } + + case 0xFB: // Double-Precision Float (eight-byte IEEE 754) + { + return get_number<double>(); + } + + default: // anything else (0xFF is handled inside the other types) + { + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; + JSON_THROW(parse_error::create(112, chars_read, "error reading CBOR; last byte: 0x" + ss.str())); + } + } + } + + BasicJsonType parse_msgpack_internal() + { + switch (get()) + { + // EOF + case std::char_traits<char>::eof(): + JSON_THROW(parse_error::create(110, chars_read, "unexpected end of input")); + + // positive fixint + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: + case 0x59: + case 0x5A: + case 0x5B: + case 0x5C: + case 0x5D: + case 0x5E: + case 0x5F: + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: + case 0x79: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: + return static_cast<number_unsigned_t>(current); + + // fixmap + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: + { + return get_msgpack_object(current & 0x0F); + } + + // fixarray + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: + case 0x98: + case 0x99: + case 0x9A: + case 0x9B: + case 0x9C: + case 0x9D: + case 0x9E: + case 0x9F: + { + return get_msgpack_array(current & 0x0F); + } + + // fixstr + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + return get_msgpack_string(); + + case 0xC0: // nil + return value_t::null; + + case 0xC2: // false + return false; + + case 0xC3: // true + return true; + + case 0xCA: // float 32 + return get_number<float>(); + + case 0xCB: // float 64 + return get_number<double>(); + + case 0xCC: // uint 8 + return get_number<uint8_t>(); + + case 0xCD: // uint 16 + return get_number<uint16_t>(); + + case 0xCE: // uint 32 + return get_number<uint32_t>(); + + case 0xCF: // uint 64 + return get_number<uint64_t>(); + + case 0xD0: // int 8 + return get_number<int8_t>(); + + case 0xD1: // int 16 + return get_number<int16_t>(); + + case 0xD2: // int 32 + return get_number<int32_t>(); + + case 0xD3: // int 64 + return get_number<int64_t>(); + + case 0xD9: // str 8 + case 0xDA: // str 16 + case 0xDB: // str 32 + return get_msgpack_string(); + + case 0xDC: // array 16 + { + return get_msgpack_array(get_number<uint16_t>()); + } + + case 0xDD: // array 32 + { + return get_msgpack_array(get_number<uint32_t>()); + } + + case 0xDE: // map 16 + { + return get_msgpack_object(get_number<uint16_t>()); + } + + case 0xDF: // map 32 + { + return get_msgpack_object(get_number<uint32_t>()); + } + + // positive fixint + case 0xE0: + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xED: + case 0xEE: + case 0xEF: + case 0xF0: + case 0xF1: + case 0xF2: + case 0xF3: + case 0xF4: + case 0xF5: + case 0xF6: + case 0xF7: + case 0xF8: + case 0xF9: + case 0xFA: + case 0xFB: + case 0xFC: + case 0xFD: + case 0xFE: + case 0xFF: + return static_cast<int8_t>(current); + + default: // anything else + { + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; + JSON_THROW(parse_error::create(112, chars_read, + "error reading MessagePack; last byte: 0x" + ss.str())); + } + } + } + + /*! + @brief get next character from the input + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns a -'ve valued + `std::char_traits<char>::eof()` in that case. + + @return character read from the input + */ + int get() + { + ++chars_read; + return (current = ia->get_character()); + } + + /* + @brief read a number from the input + + @tparam NumberType the type of the number + + @return number of type @a NumberType + + @note This function needs to respect the system's endianess, because + bytes in CBOR and MessagePack are stored in network order (big + endian) and therefore need reordering on little endian systems. + + @throw parse_error.110 if input has less than `sizeof(NumberType)` bytes + */ + template<typename NumberType> NumberType get_number() + { + // step 1: read input into array with system's byte order + std::array<uint8_t, sizeof(NumberType)> vec; + for (std::size_t i = 0; i < sizeof(NumberType); ++i) + { + get(); + check_eof(); + + // reverse byte order prior to conversion if necessary + if (is_little_endian) + { + vec[sizeof(NumberType) - i - 1] = static_cast<uint8_t>(current); + } + else + { + vec[i] = static_cast<uint8_t>(current); // LCOV_EXCL_LINE + } + } + + // step 2: convert array into number of type T and return + NumberType result; + std::memcpy(&result, vec.data(), sizeof(NumberType)); + return result; + } + + /*! + @brief create a string by reading characters from the input + + @param[in] len number of bytes to read + + @note We can not reserve @a len bytes for the result, because @a len + may be too large. Usually, @ref check_eof() detects the end of + the input before we run out of string memory. + + @return string created by reading @a len bytes + + @throw parse_error.110 if input has less than @a len bytes + */ + template<typename NumberType> + std::string get_string(const NumberType len) + { + std::string result; + std::generate_n(std::back_inserter(result), len, [this]() + { + get(); + check_eof(); + return static_cast<char>(current); + }); + return result; + } + + /*! + @brief reads a CBOR string + + This function first reads starting bytes to determine the expected + string length and then copies this number of bytes into a string. + Additionally, CBOR's strings with indefinite lengths are supported. + + @return string + + @throw parse_error.110 if input ended + @throw parse_error.113 if an unexpected byte is read + */ + std::string get_cbor_string() + { + check_eof(); + + switch (current) + { + // UTF-8 string (0x00..0x17 bytes follow) + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + { + return get_string(current & 0x1F); + } + + case 0x78: // UTF-8 string (one-byte uint8_t for n follows) + { + return get_string(get_number<uint8_t>()); + } + + case 0x79: // UTF-8 string (two-byte uint16_t for n follow) + { + return get_string(get_number<uint16_t>()); + } + + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + { + return get_string(get_number<uint32_t>()); + } + + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + { + return get_string(get_number<uint64_t>()); + } + + case 0x7F: // UTF-8 string (indefinite length) + { + std::string result; + while (get() != 0xFF) + { + check_eof(); + result.push_back(static_cast<char>(current)); + } + return result; + } + + default: + { + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; + JSON_THROW(parse_error::create(113, chars_read, "expected a CBOR string; last byte: 0x" + ss.str())); + } + } + } + + template<typename NumberType> + BasicJsonType get_cbor_array(const NumberType len) + { + BasicJsonType result = value_t::array; + std::generate_n(std::back_inserter(*result.m_value.array), len, [this]() + { + return parse_cbor_internal(); + }); + return result; + } + + template<typename NumberType> + BasicJsonType get_cbor_object(const NumberType len) + { + BasicJsonType result = value_t::object; + std::generate_n(std::inserter(*result.m_value.object, + result.m_value.object->end()), + len, [this]() + { + get(); + auto key = get_cbor_string(); + auto val = parse_cbor_internal(); + return std::make_pair(std::move(key), std::move(val)); + }); + return result; + } + + /*! + @brief reads a MessagePack string + + This function first reads starting bytes to determine the expected + string length and then copies this number of bytes into a string. + + @return string + + @throw parse_error.110 if input ended + @throw parse_error.113 if an unexpected byte is read + */ + std::string get_msgpack_string() + { + check_eof(); + + switch (current) + { + // fixstr + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + { + return get_string(current & 0x1F); + } + + case 0xD9: // str 8 + { + return get_string(get_number<uint8_t>()); + } + + case 0xDA: // str 16 + { + return get_string(get_number<uint16_t>()); + } + + case 0xDB: // str 32 + { + return get_string(get_number<uint32_t>()); + } + + default: + { + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << current; + JSON_THROW(parse_error::create(113, chars_read, + "expected a MessagePack string; last byte: 0x" + ss.str())); + } + } + } + + template<typename NumberType> + BasicJsonType get_msgpack_array(const NumberType len) + { + BasicJsonType result = value_t::array; + std::generate_n(std::back_inserter(*result.m_value.array), len, [this]() + { + return parse_msgpack_internal(); + }); + return result; + } + + template<typename NumberType> + BasicJsonType get_msgpack_object(const NumberType len) + { + BasicJsonType result = value_t::object; + std::generate_n(std::inserter(*result.m_value.object, + result.m_value.object->end()), + len, [this]() + { + get(); + auto key = get_msgpack_string(); + auto val = parse_msgpack_internal(); + return std::make_pair(std::move(key), std::move(val)); + }); + return result; + } + + /*! + @brief check if input ended + @throw parse_error.110 if input ended + */ + void check_eof(const bool expect_eof = false) const + { + if (expect_eof) + { + if (JSON_UNLIKELY(current != std::char_traits<char>::eof())) + { + JSON_THROW(parse_error::create(110, chars_read, "expected end of input")); + } + } + else + { + if (JSON_UNLIKELY(current == std::char_traits<char>::eof())) + { + JSON_THROW(parse_error::create(110, chars_read, "unexpected end of input")); + } + } + } + + private: + /// input adapter + input_adapter_t ia = nullptr; + + /// the current character + int current = std::char_traits<char>::eof(); + + /// the number of characters read + std::size_t chars_read = 0; + + /// whether we can assume little endianess + const bool is_little_endian = little_endianess(); +}; + +/*! +@brief serialization to CBOR and MessagePack values +*/ +template<typename BasicJsonType, typename CharType> +class binary_writer +{ + public: + /*! + @brief create a binary writer + + @param[in] adapter output adapter to write to + */ + explicit binary_writer(output_adapter_t<CharType> adapter) : oa(adapter) + { + assert(oa); + } + + /*! + @brief[in] j JSON value to serialize + */ + void write_cbor(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::null: + { + oa->write_character(static_cast<CharType>(0xF6)); + break; + } + + case value_t::boolean: + { + oa->write_character(j.m_value.boolean + ? static_cast<CharType>(0xF5) + : static_cast<CharType>(0xF4)); + break; + } + + case value_t::number_integer: + { + if (j.m_value.number_integer >= 0) + { + // CBOR does not differentiate between positive signed + // integers and unsigned integers. Therefore, we used the + // code from the value_t::number_unsigned case here. + if (j.m_value.number_integer <= 0x17) + { + write_number(static_cast<uint8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits<uint8_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x18)); + write_number(static_cast<uint8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits<uint16_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x19)); + write_number(static_cast<uint16_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits<uint32_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x1A)); + write_number(static_cast<uint32_t>(j.m_value.number_integer)); + } + else + { + oa->write_character(static_cast<CharType>(0x1B)); + write_number(static_cast<uint64_t>(j.m_value.number_integer)); + } + } + else + { + // The conversions below encode the sign in the first + // byte, and the value is converted to a positive number. + const auto positive_number = -1 - j.m_value.number_integer; + if (j.m_value.number_integer >= -24) + { + write_number(static_cast<uint8_t>(0x20 + positive_number)); + } + else if (positive_number <= (std::numeric_limits<uint8_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x38)); + write_number(static_cast<uint8_t>(positive_number)); + } + else if (positive_number <= (std::numeric_limits<uint16_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x39)); + write_number(static_cast<uint16_t>(positive_number)); + } + else if (positive_number <= (std::numeric_limits<uint32_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x3A)); + write_number(static_cast<uint32_t>(positive_number)); + } + else + { + oa->write_character(static_cast<CharType>(0x3B)); + write_number(static_cast<uint64_t>(positive_number)); + } + } + break; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned <= 0x17) + { + write_number(static_cast<uint8_t>(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x18)); + write_number(static_cast<uint8_t>(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x19)); + write_number(static_cast<uint16_t>(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)()) + { + oa->write_character(static_cast<CharType>(0x1A)); + write_number(static_cast<uint32_t>(j.m_value.number_unsigned)); + } + else + { + oa->write_character(static_cast<CharType>(0x1B)); + write_number(static_cast<uint64_t>(j.m_value.number_unsigned)); + } + break; + } + + case value_t::number_float: // Double-Precision Float + { + oa->write_character(static_cast<CharType>(0xFB)); + write_number(j.m_value.number_float); + break; + } + + case value_t::string: + { + // step 1: write control byte and the string length + const auto N = j.m_value.string->size(); + if (N <= 0x17) + { + write_number(static_cast<uint8_t>(0x60 + N)); + } + else if (N <= 0xFF) + { + oa->write_character(static_cast<CharType>(0x78)); + write_number(static_cast<uint8_t>(N)); + } + else if (N <= 0xFFFF) + { + oa->write_character(static_cast<CharType>(0x79)); + write_number(static_cast<uint16_t>(N)); + } + else if (N <= 0xFFFFFFFF) + { + oa->write_character(static_cast<CharType>(0x7A)); + write_number(static_cast<uint32_t>(N)); + } + // LCOV_EXCL_START + else if (N <= 0xFFFFFFFFFFFFFFFF) + { + oa->write_character(static_cast<CharType>(0x7B)); + write_number(static_cast<uint64_t>(N)); + } + // LCOV_EXCL_STOP + + // step 2: write the string + oa->write_characters( + reinterpret_cast<const CharType*>(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + // step 1: write control byte and the array size + const auto N = j.m_value.array->size(); + if (N <= 0x17) + { + write_number(static_cast<uint8_t>(0x80 + N)); + } + else if (N <= 0xFF) + { + oa->write_character(static_cast<CharType>(0x98)); + write_number(static_cast<uint8_t>(N)); + } + else if (N <= 0xFFFF) + { + oa->write_character(static_cast<CharType>(0x99)); + write_number(static_cast<uint16_t>(N)); + } + else if (N <= 0xFFFFFFFF) + { + oa->write_character(static_cast<CharType>(0x9A)); + write_number(static_cast<uint32_t>(N)); + } + // LCOV_EXCL_START + else if (N <= 0xFFFFFFFFFFFFFFFF) + { + oa->write_character(static_cast<CharType>(0x9B)); + write_number(static_cast<uint64_t>(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + for (const auto& el : *j.m_value.array) + { + write_cbor(el); + } + break; + } + + case value_t::object: + { + // step 1: write control byte and the object size + const auto N = j.m_value.object->size(); + if (N <= 0x17) + { + write_number(static_cast<uint8_t>(0xA0 + N)); + } + else if (N <= 0xFF) + { + oa->write_character(static_cast<CharType>(0xB8)); + write_number(static_cast<uint8_t>(N)); + } + else if (N <= 0xFFFF) + { + oa->write_character(static_cast<CharType>(0xB9)); + write_number(static_cast<uint16_t>(N)); + } + else if (N <= 0xFFFFFFFF) + { + oa->write_character(static_cast<CharType>(0xBA)); + write_number(static_cast<uint32_t>(N)); + } + // LCOV_EXCL_START + else if (N <= 0xFFFFFFFFFFFFFFFF) + { + oa->write_character(static_cast<CharType>(0xBB)); + write_number(static_cast<uint64_t>(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + for (const auto& el : *j.m_value.object) + { + write_cbor(el.first); + write_cbor(el.second); + } + break; + } + + default: + break; + } + } + + /*! + @brief[in] j JSON value to serialize + */ + void write_msgpack(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::null: // nil + { + oa->write_character(static_cast<CharType>(0xC0)); + break; + } + + case value_t::boolean: // true and false + { + oa->write_character(j.m_value.boolean + ? static_cast<CharType>(0xC3) + : static_cast<CharType>(0xC2)); + break; + } + + case value_t::number_integer: + { + if (j.m_value.number_integer >= 0) + { + // MessagePack does not differentiate between positive + // signed integers and unsigned integers. Therefore, we used + // the code from the value_t::number_unsigned case here. + if (j.m_value.number_unsigned < 128) + { + // positive fixnum + write_number(static_cast<uint8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)()) + { + // uint 8 + oa->write_character(static_cast<CharType>(0xCC)); + write_number(static_cast<uint8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)()) + { + // uint 16 + oa->write_character(static_cast<CharType>(0xCD)); + write_number(static_cast<uint16_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)()) + { + // uint 32 + oa->write_character(static_cast<CharType>(0xCE)); + write_number(static_cast<uint32_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)()) + { + // uint 64 + oa->write_character(static_cast<CharType>(0xCF)); + write_number(static_cast<uint64_t>(j.m_value.number_integer)); + } + } + else + { + if (j.m_value.number_integer >= -32) + { + // negative fixnum + write_number(static_cast<int8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits<int8_t>::min)() and + j.m_value.number_integer <= (std::numeric_limits<int8_t>::max)()) + { + // int 8 + oa->write_character(static_cast<CharType>(0xD0)); + write_number(static_cast<int8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits<int16_t>::min)() and + j.m_value.number_integer <= (std::numeric_limits<int16_t>::max)()) + { + // int 16 + oa->write_character(static_cast<CharType>(0xD1)); + write_number(static_cast<int16_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits<int32_t>::min)() and + j.m_value.number_integer <= (std::numeric_limits<int32_t>::max)()) + { + // int 32 + oa->write_character(static_cast<CharType>(0xD2)); + write_number(static_cast<int32_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits<int64_t>::min)() and + j.m_value.number_integer <= (std::numeric_limits<int64_t>::max)()) + { + // int 64 + oa->write_character(static_cast<CharType>(0xD3)); + write_number(static_cast<int64_t>(j.m_value.number_integer)); + } + } + break; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned < 128) + { + // positive fixnum + write_number(static_cast<uint8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)()) + { + // uint 8 + oa->write_character(static_cast<CharType>(0xCC)); + write_number(static_cast<uint8_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)()) + { + // uint 16 + oa->write_character(static_cast<CharType>(0xCD)); + write_number(static_cast<uint16_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)()) + { + // uint 32 + oa->write_character(static_cast<CharType>(0xCE)); + write_number(static_cast<uint32_t>(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)()) + { + // uint 64 + oa->write_character(static_cast<CharType>(0xCF)); + write_number(static_cast<uint64_t>(j.m_value.number_integer)); + } + break; + } + + case value_t::number_float: // float 64 + { + oa->write_character(static_cast<CharType>(0xCB)); + write_number(j.m_value.number_float); + break; + } + + case value_t::string: + { + // step 1: write control byte and the string length + const auto N = j.m_value.string->size(); + if (N <= 31) + { + // fixstr + write_number(static_cast<uint8_t>(0xA0 | N)); + } + else if (N <= 255) + { + // str 8 + oa->write_character(static_cast<CharType>(0xD9)); + write_number(static_cast<uint8_t>(N)); + } + else if (N <= 65535) + { + // str 16 + oa->write_character(static_cast<CharType>(0xDA)); + write_number(static_cast<uint16_t>(N)); + } + else if (N <= 4294967295) + { + // str 32 + oa->write_character(static_cast<CharType>(0xDB)); + write_number(static_cast<uint32_t>(N)); + } + + // step 2: write the string + oa->write_characters( + reinterpret_cast<const CharType*>(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + // step 1: write control byte and the array size + const auto N = j.m_value.array->size(); + if (N <= 15) + { + // fixarray + write_number(static_cast<uint8_t>(0x90 | N)); + } + else if (N <= 0xFFFF) + { + // array 16 + oa->write_character(static_cast<CharType>(0xDC)); + write_number(static_cast<uint16_t>(N)); + } + else if (N <= 0xFFFFFFFF) + { + // array 32 + oa->write_character(static_cast<CharType>(0xDD)); + write_number(static_cast<uint32_t>(N)); + } + + // step 2: write each element + for (const auto& el : *j.m_value.array) + { + write_msgpack(el); + } + break; + } + + case value_t::object: + { + // step 1: write control byte and the object size + const auto N = j.m_value.object->size(); + if (N <= 15) + { + // fixmap + write_number(static_cast<uint8_t>(0x80 | (N & 0xF))); + } + else if (N <= 65535) + { + // map 16 + oa->write_character(static_cast<CharType>(0xDE)); + write_number(static_cast<uint16_t>(N)); + } + else if (N <= 4294967295) + { + // map 32 + oa->write_character(static_cast<CharType>(0xDF)); + write_number(static_cast<uint32_t>(N)); + } + + // step 2: write each element + for (const auto& el : *j.m_value.object) + { + write_msgpack(el.first); + write_msgpack(el.second); + } + break; + } + + default: + break; + } + } + + private: + /* + @brief write a number to output input + + @param[in] n number of type @a NumberType + @tparam NumberType the type of the number + + @note This function needs to respect the system's endianess, because bytes + in CBOR and MessagePack are stored in network order (big endian) and + therefore need reordering on little endian systems. + */ + template<typename NumberType> void write_number(NumberType n) + { + // step 1: write number to array of length NumberType + std::array<CharType, sizeof(NumberType)> vec; + std::memcpy(vec.data(), &n, sizeof(NumberType)); + + // step 2: write array to output (with possible reordering) + if (is_little_endian) + { + // reverse byte order prior to conversion if necessary + std::reverse(vec.begin(), vec.end()); + } + + oa->write_characters(vec.data(), sizeof(NumberType)); + } + + private: + /// whether we can assume little endianess + const bool is_little_endian = binary_reader<BasicJsonType>::little_endianess(); + + /// the output + output_adapter_t<CharType> oa = nullptr; +}; + +/////////////////// +// serialization // +/////////////////// + +template<typename BasicJsonType> +class serializer +{ + using string_t = typename BasicJsonType::string_t; + using number_float_t = typename BasicJsonType::number_float_t; + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + public: + /*! + @param[in] s output stream to serialize to + @param[in] ichar indentation character to use + */ + serializer(output_adapter_t<char> s, const char ichar) + : o(std::move(s)), loc(std::localeconv()), + thousands_sep(loc->thousands_sep == nullptr ? '\0' : * (loc->thousands_sep)), + decimal_point(loc->decimal_point == nullptr ? '\0' : * (loc->decimal_point)), + indent_char(ichar), indent_string(512, indent_char) {} + + // delete because of pointer members + serializer(const serializer&) = delete; + serializer& operator=(const serializer&) = delete; + + /*! + @brief internal implementation of the serialization function + + This function is called by the public member function dump and organizes + the serialization internally. The indentation level is propagated as + additional parameter. In case of arrays and objects, the function is + called recursively. + + - strings and object keys are escaped using `escape_string()` + - integer numbers are converted implicitly via `operator<<` + - floating-point numbers are converted to a string using `"%g"` format + + @param[in] val value to serialize + @param[in] pretty_print whether the output shall be pretty-printed + @param[in] indent_step the indent level + @param[in] current_indent the current indent level (only used internally) + */ + void dump(const BasicJsonType& val, const bool pretty_print, + const bool ensure_ascii, + const unsigned int indent_step, + const unsigned int current_indent = 0) + { + switch (val.m_type) + { + case value_t::object: + { + if (val.m_value.object->empty()) + { + o->write_characters("{}", 2); + return; + } + + if (pretty_print) + { + o->write_characters("{\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + // first n-1 elements + auto i = val.m_value.object->cbegin(); + for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) + { + o->write_characters(indent_string.c_str(), new_indent); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\": ", 3); + dump(i->second, true, ensure_ascii, indent_step, new_indent); + o->write_characters(",\n", 2); + } + + // last element + assert(i != val.m_value.object->cend()); + assert(std::next(i) == val.m_value.object->cend()); + o->write_characters(indent_string.c_str(), new_indent); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\": ", 3); + dump(i->second, true, ensure_ascii, indent_step, new_indent); + + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character('}'); + } + else + { + o->write_character('{'); + + // first n-1 elements + auto i = val.m_value.object->cbegin(); + for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) + { + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\":", 2); + dump(i->second, false, ensure_ascii, indent_step, current_indent); + o->write_character(','); + } + + // last element + assert(i != val.m_value.object->cend()); + assert(std::next(i) == val.m_value.object->cend()); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\":", 2); + dump(i->second, false, ensure_ascii, indent_step, current_indent); + + o->write_character('}'); + } + + return; + } + + case value_t::array: + { + if (val.m_value.array->empty()) + { + o->write_characters("[]", 2); + return; + } + + if (pretty_print) + { + o->write_characters("[\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + // first n-1 elements + for (auto i = val.m_value.array->cbegin(); + i != val.m_value.array->cend() - 1; ++i) + { + o->write_characters(indent_string.c_str(), new_indent); + dump(*i, true, ensure_ascii, indent_step, new_indent); + o->write_characters(",\n", 2); + } + + // last element + assert(not val.m_value.array->empty()); + o->write_characters(indent_string.c_str(), new_indent); + dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent); + + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character(']'); + } + else + { + o->write_character('['); + + // first n-1 elements + for (auto i = val.m_value.array->cbegin(); + i != val.m_value.array->cend() - 1; ++i) + { + dump(*i, false, ensure_ascii, indent_step, current_indent); + o->write_character(','); + } + + // last element + assert(not val.m_value.array->empty()); + dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent); + + o->write_character(']'); + } + + return; + } + + case value_t::string: + { + o->write_character('\"'); + dump_escaped(*val.m_value.string, ensure_ascii); + o->write_character('\"'); + return; + } + + case value_t::boolean: + { + if (val.m_value.boolean) + { + o->write_characters("true", 4); + } + else + { + o->write_characters("false", 5); + } + return; + } + + case value_t::number_integer: + { + dump_integer(val.m_value.number_integer); + return; + } + + case value_t::number_unsigned: + { + dump_integer(val.m_value.number_unsigned); + return; + } + + case value_t::number_float: + { + dump_float(val.m_value.number_float); + return; + } + + case value_t::discarded: + { + o->write_characters("<discarded>", 11); + return; + } + + case value_t::null: + { + o->write_characters("null", 4); + return; + } + } + } + + private: + /*! + @brief returns the number of expected bytes following in UTF-8 string + + @param[in] u the first byte of a UTF-8 string + @return the number of expected bytes following + */ + static constexpr std::size_t bytes_following(const uint8_t u) + { + return ((u <= 127) ? 0 + : ((192 <= u and u <= 223) ? 1 + : ((224 <= u and u <= 239) ? 2 + : ((240 <= u and u <= 247) ? 3 : std::string::npos)))); + } + + /*! + @brief calculates the extra space to escape a JSON string + + @param[in] s the string to escape + @param[in] ensure_ascii whether to escape non-ASCII characters with + \uXXXX sequences + @return the number of characters required to escape string @a s + + @complexity Linear in the length of string @a s. + */ + static std::size_t extra_space(const string_t& s, + const bool ensure_ascii) noexcept + { + std::size_t res = 0; + + for (std::size_t i = 0; i < s.size(); ++i) + { + switch (s[i]) + { + // control characters that can be escaped with a backslash + case '"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + { + // from c (1 byte) to \x (2 bytes) + res += 1; + break; + } + + // control characters that need \uxxxx escaping + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x0B: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: + { + // from c (1 byte) to \uxxxx (6 bytes) + res += 5; + break; + } + + default: + { + if (ensure_ascii and (s[i] & 0x80 or s[i] == 0x7F)) + { + const auto bytes = bytes_following(static_cast<uint8_t>(s[i])); + // invalid characters will be detected by throw_if_invalid_utf8 + assert (bytes != std::string::npos); + + if (bytes == 3) + { + // codepoints that need 4 bytes (i.e., 3 additional + // bytes) in UTF-8 need a surrogate pair when \u + // escaping is used: from 4 bytes to \uxxxx\uxxxx + // (12 bytes) + res += (12 - bytes - 1); + } + else + { + // from x bytes to \uxxxx (6 bytes) + res += (6 - bytes - 1); + } + + // skip the additional bytes + i += bytes; + } + break; + } + } + } + + return res; + } + + static void escape_codepoint(int codepoint, string_t& result, std::size_t& pos) + { + // expecting a proper codepoint + assert(0x00 <= codepoint and codepoint <= 0x10FFFF); + + // the last written character was the backslash before the 'u' + assert(result[pos] == '\\'); + + // write the 'u' + result[++pos] = 'u'; + + // convert a number 0..15 to its hex representation (0..f) + static const std::array<char, 16> hexify = + { + { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' + } + }; + + if (codepoint < 0x10000) + { + // codepoints U+0000..U+FFFF can be represented as \uxxxx. + result[++pos] = hexify[(codepoint >> 12) & 0x0F]; + result[++pos] = hexify[(codepoint >> 8) & 0x0F]; + result[++pos] = hexify[(codepoint >> 4) & 0x0F]; + result[++pos] = hexify[codepoint & 0x0F]; + } + else + { + // codepoints U+10000..U+10FFFF need a surrogate pair to be + // represented as \uxxxx\uxxxx. + // http://www.unicode.org/faq/utf_bom.html#utf16-4 + codepoint -= 0x10000; + const int high_surrogate = 0xD800 | ((codepoint >> 10) & 0x3FF); + const int low_surrogate = 0xDC00 | (codepoint & 0x3FF); + result[++pos] = hexify[(high_surrogate >> 12) & 0x0F]; + result[++pos] = hexify[(high_surrogate >> 8) & 0x0F]; + result[++pos] = hexify[(high_surrogate >> 4) & 0x0F]; + result[++pos] = hexify[high_surrogate & 0x0F]; + ++pos; // backslash is already in output + result[++pos] = 'u'; + result[++pos] = hexify[(low_surrogate >> 12) & 0x0F]; + result[++pos] = hexify[(low_surrogate >> 8) & 0x0F]; + result[++pos] = hexify[(low_surrogate >> 4) & 0x0F]; + result[++pos] = hexify[low_surrogate & 0x0F]; + } + + ++pos; + } + + /*! + @brief dump escaped string + + Escape a string by replacing certain special characters by a sequence of an + escape character (backslash) and another character and other control + characters by a sequence of "\u" followed by a four-digit hex + representation. The escaped string is written to output stream @a o. + + @param[in] s the string to escape + @param[in] ensure_ascii whether to escape non-ASCII characters with + \uXXXX sequences + + @complexity Linear in the length of string @a s. + */ + void dump_escaped(const string_t& s, const bool ensure_ascii) const + { + throw_if_invalid_utf8(s); + + const auto space = extra_space(s, ensure_ascii); + if (space == 0) + { + o->write_characters(s.c_str(), s.size()); + return; + } + + // create a result string of necessary size + string_t result(s.size() + space, '\\'); + std::size_t pos = 0; + + for (std::size_t i = 0; i < s.size(); ++i) + { + switch (s[i]) + { + case '"': // quotation mark (0x22) + { + result[pos + 1] = '"'; + pos += 2; + break; + } + + case '\\': // reverse solidus (0x5C) + { + // nothing to change + pos += 2; + break; + } + + case '\b': // backspace (0x08) + { + result[pos + 1] = 'b'; + pos += 2; + break; + } + + case '\f': // formfeed (0x0C) + { + result[pos + 1] = 'f'; + pos += 2; + break; + } + + case '\n': // newline (0x0A) + { + result[pos + 1] = 'n'; + pos += 2; + break; + } + + case '\r': // carriage return (0x0D) + { + result[pos + 1] = 'r'; + pos += 2; + break; + } + + case '\t': // horizontal tab (0x09) + { + result[pos + 1] = 't'; + pos += 2; + break; + } + + default: + { + // escape control characters (0x00..0x1F) or, if + // ensure_ascii parameter is used, non-ASCII characters + if ((0x00 <= s[i] and s[i] <= 0x1F) or + (ensure_ascii and (s[i] & 0x80 or s[i] == 0x7F))) + { + const auto bytes = bytes_following(static_cast<uint8_t>(s[i])); + // invalid characters will be detected by throw_if_invalid_utf8 + assert (bytes != std::string::npos); + + // check that the additional bytes are present + assert(i + bytes < s.size()); + + // to use \uxxxx escaping, we first need to calculate + // the codepoint from the UTF-8 bytes + int codepoint = 0; + + // bytes is unsigned type: + assert(bytes <= 3); + switch (bytes) + { + case 0: + { + codepoint = s[i] & 0xFF; + break; + } + + case 1: + { + codepoint = ((s[i] & 0x3F) << 6) + + (s[i + 1] & 0x7F); + break; + } + + case 2: + { + codepoint = ((s[i] & 0x1F) << 12) + + ((s[i + 1] & 0x7F) << 6) + + (s[i + 2] & 0x7F); + break; + } + + case 3: + { + codepoint = ((s[i] & 0xF) << 18) + + ((s[i + 1] & 0x7F) << 12) + + ((s[i + 2] & 0x7F) << 6) + + (s[i + 3] & 0x7F); + break; + } + + default: + break; // LCOV_EXCL_LINE + } + + escape_codepoint(codepoint, result, pos); + i += bytes; + } + else + { + // all other characters are added as-is + result[pos++] = s[i]; + } + break; + } + } + } + + assert(pos == result.size()); + o->write_characters(result.c_str(), result.size()); + } + + /*! + @brief dump an integer + + Dump a given integer to output stream @a o. Works internally with + @a number_buffer. + + @param[in] x integer number (signed or unsigned) to dump + @tparam NumberType either @a number_integer_t or @a number_unsigned_t + */ + template<typename NumberType, detail::enable_if_t< + std::is_same<NumberType, number_unsigned_t>::value or + std::is_same<NumberType, number_integer_t>::value, + int> = 0> + void dump_integer(NumberType x) + { + // special case for "0" + if (x == 0) + { + o->write_character('0'); + return; + } + + const bool is_negative = (x <= 0) and (x != 0); // see issue #755 + std::size_t i = 0; + + while (x != 0) + { + // spare 1 byte for '\0' + assert(i < number_buffer.size() - 1); + + const auto digit = std::labs(static_cast<long>(x % 10)); + number_buffer[i++] = static_cast<char>('0' + digit); + x /= 10; + } + + if (is_negative) + { + // make sure there is capacity for the '-' + assert(i < number_buffer.size() - 2); + number_buffer[i++] = '-'; + } + + std::reverse(number_buffer.begin(), number_buffer.begin() + i); + o->write_characters(number_buffer.data(), i); + } + + /*! + @brief dump a floating-point number + + Dump a given floating-point number to output stream @a o. Works internally + with @a number_buffer. + + @param[in] x floating-point number to dump + */ + void dump_float(number_float_t x) + { + // NaN / inf + if (not std::isfinite(x) or std::isnan(x)) + { + o->write_characters("null", 4); + return; + } + + // get number of digits for a text -> float -> text round-trip + static constexpr auto d = std::numeric_limits<number_float_t>::digits10; + + // the actual conversion + std::ptrdiff_t len = snprintf(number_buffer.data(), number_buffer.size(), "%.*g", d, x); + + // negative value indicates an error + assert(len > 0); + // check if buffer was large enough + assert(static_cast<std::size_t>(len) < number_buffer.size()); + + // erase thousands separator + if (thousands_sep != '\0') + { + const auto end = std::remove(number_buffer.begin(), + number_buffer.begin() + len, thousands_sep); + std::fill(end, number_buffer.end(), '\0'); + assert((end - number_buffer.begin()) <= len); + len = (end - number_buffer.begin()); + } + + // convert decimal point to '.' + if (decimal_point != '\0' and decimal_point != '.') + { + const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); + if (dec_pos != number_buffer.end()) + { + *dec_pos = '.'; + } + } + + o->write_characters(number_buffer.data(), static_cast<std::size_t>(len)); + + // determine if need to append ".0" + const bool value_is_int_like = + std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1, + [](char c) + { + return (c == '.' or c == 'e'); + }); + + if (value_is_int_like) + { + o->write_characters(".0", 2); + } + } + + /*! + @brief check whether a string is UTF-8 encoded + + The function checks each byte of a string whether it is UTF-8 encoded. The + result of the check is stored in the @a state parameter. The function must + be called initially with state 0 (accept). State 1 means the string must + be rejected, because the current byte is not allowed. If the string is + completely processed, but the state is non-zero, the string ended + prematurely; that is, the last byte indicated more bytes should have + followed. + + @param[in,out] state the state of the decoding + @param[in] byte next byte to decode + + @note The function has been edited: a std::array is used and the code + point is not calculated. + + @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de> + @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + */ + static void decode(uint8_t& state, const uint8_t byte) + { + static const std::array<uint8_t, 400> utf8d = + { + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF + 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF + 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8 + } + }; + + const uint8_t type = utf8d[byte]; + state = utf8d[256u + state * 16u + type]; + } + + /*! + @brief throw an exception if a string is not UTF-8 encoded + + @param[in] str UTF-8 string to check + @throw type_error.316 if passed string is not UTF-8 encoded + + @since version 3.0.0 + */ + static void throw_if_invalid_utf8(const std::string& str) + { + // start with state 0 (= accept) + uint8_t state = 0; + + for (size_t i = 0; i < str.size(); ++i) + { + const auto byte = static_cast<uint8_t>(str[i]); + decode(state, byte); + if (state == 1) + { + // state 1 means reject + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << static_cast<int>(byte); + JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + ss.str())); + } + } + + if (state != 0) + { + // we finish reading, but do not accept: string was incomplete + std::stringstream ss; + ss << std::setw(2) << std::uppercase << std::setfill('0') << std::hex << static_cast<int>(static_cast<uint8_t>(str.back())); + JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + ss.str())); + } + } + + private: + /// the output of the serializer + output_adapter_t<char> o = nullptr; + + /// a (hopefully) large enough character buffer + std::array<char, 64> number_buffer{{}}; + + /// the locale + const std::lconv* loc = nullptr; + /// the locale's thousand separator character + const char thousands_sep = '\0'; + /// the locale's decimal point character + const char decimal_point = '\0'; + + /// the indentation character + const char indent_char; + + /// the indentation string + string_t indent_string; +}; + +template<typename BasicJsonType> +class json_ref +{ + public: + using value_type = BasicJsonType; + + json_ref(value_type&& value) + : owned_value(std::move(value)), value_ref(&owned_value), is_rvalue(true) + {} + + json_ref(const value_type& value) + : value_ref(const_cast<value_type*>(&value)), is_rvalue(false) + {} + + json_ref(std::initializer_list<json_ref> init) + : owned_value(init), value_ref(&owned_value), is_rvalue(true) + {} + + template<class... Args> + json_ref(Args&& ... args) + : owned_value(std::forward<Args>(args)...), value_ref(&owned_value), is_rvalue(true) + {} + + // class should be movable only + json_ref(json_ref&&) = default; + json_ref(const json_ref&) = delete; + json_ref& operator=(const json_ref&) = delete; + + value_type moved_or_copied() const + { + if (is_rvalue) + { + return std::move(*value_ref); + } + return *value_ref; + } + + value_type const& operator*() const + { + return *static_cast<value_type const*>(value_ref); + } + + value_type const* operator->() const + { + return static_cast<value_type const*>(value_ref); + } + + private: + mutable value_type owned_value = nullptr; + value_type* value_ref = nullptr; + const bool is_rvalue; +}; + +} // namespace detail + +/// namespace to hold default `to_json` / `from_json` functions +namespace +{ +constexpr const auto& to_json = detail::static_const<detail::to_json_fn>::value; +constexpr const auto& from_json = detail::static_const<detail::from_json_fn>::value; +} + + +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](http://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template<typename, typename> +struct adl_serializer +{ + /*! + @brief convert a JSON value to any value type + + This function is usually called by the `get()` function of the + @ref basic_json class (either explicit or via conversion operators). + + @param[in] j JSON value to read from + @param[in,out] val value to write to + */ + template<typename BasicJsonType, typename ValueType> + static void from_json(BasicJsonType&& j, ValueType& val) noexcept( + noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val))) + { + ::nlohmann::from_json(std::forward<BasicJsonType>(j), val); + } + + /*! + @brief convert any value type to a JSON value + + This function is usually called by the constructors of the @ref basic_json + class. + + @param[in,out] j JSON value to write to + @param[in] val value to read from + */ + template<typename BasicJsonType, typename ValueType> + static void to_json(BasicJsonType& j, ValueType&& val) noexcept( + noexcept(::nlohmann::to_json(j, std::forward<ValueType>(val)))) + { + ::nlohmann::to_json(j, std::forward<ValueType>(val)); + } +}; + +/*! +@brief JSON Pointer + +A JSON pointer defines a string syntax for identifying a specific value +within a JSON document. It can be used with functions `at` and +`operator[]`. Furthermore, JSON pointers are the base for JSON patches. + +@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) + +@since version 2.0.0 +*/ +class json_pointer +{ + /// allow basic_json to access private members + NLOHMANN_BASIC_JSON_TPL_DECLARATION + friend class basic_json; + + public: + /*! + @brief create JSON pointer + + Create a JSON pointer according to the syntax described in + [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3). + + @param[in] s string representing the JSON pointer; if omitted, the empty + string is assumed which references the whole JSON value + + @throw parse_error.107 if the given JSON pointer @a s is nonempty and + does not begin with a slash (`/`); see example below + + @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s + is not followed by `0` (representing `~`) or `1` (representing `/`); + see example below + + @liveexample{The example shows the construction several valid JSON + pointers as well as the exceptional behavior.,json_pointer} + + @since version 2.0.0 + */ + explicit json_pointer(const std::string& s = "") : reference_tokens(split(s)) {} + + /*! + @brief return a string representation of the JSON pointer + + @invariant For each JSON pointer `ptr`, it holds: + @code {.cpp} + ptr == json_pointer(ptr.to_string()); + @endcode + + @return a string representation of the JSON pointer + + @liveexample{The example shows the result of `to_string`., + json_pointer__to_string} + + @since version 2.0.0 + */ + std::string to_string() const noexcept + { + return std::accumulate(reference_tokens.begin(), reference_tokens.end(), + std::string{}, + [](const std::string & a, const std::string & b) + { + return a + "/" + escape(b); + }); + } + + /// @copydoc to_string() + operator std::string() const + { + return to_string(); + } + + /*! + @param[in] s reference token to be converted into an array index + + @return integer representation of @a s + + @throw out_of_range.404 if string @a s could not be converted to an integer + */ + static int array_index(const std::string& s) + { + size_t processed_chars = 0; + const int res = std::stoi(s, &processed_chars); + + // check if the string was completely read + if (JSON_UNLIKELY(processed_chars != s.size())) + { + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'")); + } + + return res; + } + + private: + /*! + @brief remove and return last reference pointer + @throw out_of_range.405 if JSON pointer has no parent + */ + std::string pop_back() + { + if (JSON_UNLIKELY(is_root())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent")); + } + + auto last = reference_tokens.back(); + reference_tokens.pop_back(); + return last; + } + + /// return whether pointer points to the root document + bool is_root() const + { + return reference_tokens.empty(); + } + + json_pointer top() const + { + if (JSON_UNLIKELY(is_root())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent")); + } + + json_pointer result = *this; + result.reference_tokens = {reference_tokens[0]}; + return result; + } + + /*! + @brief create and return a reference to the pointed to value + + @complexity Linear in the number of reference tokens. + + @throw parse_error.109 if array index is not a number + @throw type_error.313 if value cannot be unflattened + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + NLOHMANN_BASIC_JSON_TPL& get_and_create(NLOHMANN_BASIC_JSON_TPL& j) const; + + /*! + @brief return a reference to the pointed to value + + @note This version does not throw if a value is not present, but tries to + create nested values instead. For instance, calling this function + with pointer `"/this/that"` on a null value is equivalent to calling + `operator[]("this").operator[]("that")` on that value, effectively + changing the null value to an object. + + @param[in] ptr a JSON value + + @return reference to the JSON value pointed to by the JSON pointer + + @complexity Linear in the length of the JSON pointer. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + NLOHMANN_BASIC_JSON_TPL& get_unchecked(NLOHMANN_BASIC_JSON_TPL* ptr) const; + + /*! + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + NLOHMANN_BASIC_JSON_TPL& get_checked(NLOHMANN_BASIC_JSON_TPL* ptr) const; + + /*! + @brief return a const reference to the pointed to value + + @param[in] ptr a JSON value + + @return const reference to the JSON value pointed to by the JSON + pointer + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + const NLOHMANN_BASIC_JSON_TPL& get_unchecked(const NLOHMANN_BASIC_JSON_TPL* ptr) const; + + /*! + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + const NLOHMANN_BASIC_JSON_TPL& get_checked(const NLOHMANN_BASIC_JSON_TPL* ptr) const; + + /*! + @brief split the string input to reference tokens + + @note This function is only called by the json_pointer constructor. + All exceptions below are documented there. + + @throw parse_error.107 if the pointer is not empty or begins with '/' + @throw parse_error.108 if character '~' is not followed by '0' or '1' + */ + static std::vector<std::string> split(const std::string& reference_string) + { + std::vector<std::string> result; + + // special case: empty reference string -> no reference tokens + if (reference_string.empty()) + { + return result; + } + + // check if nonempty reference string begins with slash + if (JSON_UNLIKELY(reference_string[0] != '/')) + { + JSON_THROW(detail::parse_error::create(107, 1, + "JSON pointer must be empty or begin with '/' - was: '" + + reference_string + "'")); + } + + // extract the reference tokens: + // - slash: position of the last read slash (or end of string) + // - start: position after the previous slash + for ( + // search for the first slash after the first character + std::size_t slash = reference_string.find_first_of('/', 1), + // set the beginning of the first reference token + start = 1; + // we can stop if start == string::npos+1 = 0 + start != 0; + // set the beginning of the next reference token + // (will eventually be 0 if slash == std::string::npos) + start = slash + 1, + // find next slash + slash = reference_string.find_first_of('/', start)) + { + // use the text between the beginning of the reference token + // (start) and the last slash (slash). + auto reference_token = reference_string.substr(start, slash - start); + + // check reference tokens are properly escaped + for (std::size_t pos = reference_token.find_first_of('~'); + pos != std::string::npos; + pos = reference_token.find_first_of('~', pos + 1)) + { + assert(reference_token[pos] == '~'); + + // ~ must be followed by 0 or 1 + if (JSON_UNLIKELY(pos == reference_token.size() - 1 or + (reference_token[pos + 1] != '0' and + reference_token[pos + 1] != '1'))) + { + JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'")); + } + } + + // finally, store the reference token + unescape(reference_token); + result.push_back(reference_token); + } + + return result; + } + + /*! + @brief replace all occurrences of a substring by another string + + @param[in,out] s the string to manipulate; changed so that all + occurrences of @a f are replaced with @a t + @param[in] f the substring to replace with @a t + @param[in] t the string to replace @a f + + @pre The search string @a f must not be empty. **This precondition is + enforced with an assertion.** + + @since version 2.0.0 + */ + static void replace_substring(std::string& s, const std::string& f, + const std::string& t) + { + assert(not f.empty()); + for (auto pos = s.find(f); // find first occurrence of f + pos != std::string::npos; // make sure f was found + s.replace(pos, f.size(), t), // replace with t, and + pos = s.find(f, pos + t.size())) // find next occurrence of f + {} + } + + /// escape "~"" to "~0" and "/" to "~1" + static std::string escape(std::string s) + { + replace_substring(s, "~", "~0"); + replace_substring(s, "/", "~1"); + return s; + } + + /// unescape "~1" to tilde and "~0" to slash (order is important!) + static void unescape(std::string& s) + { + replace_substring(s, "~1", "/"); + replace_substring(s, "~0", "~"); + } + + /*! + @param[in] reference_string the reference string to the current value + @param[in] value the value to consider + @param[in,out] result the result object to insert values to + + @note Empty objects or arrays are flattened to `null`. + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + static void flatten(const std::string& reference_string, + const NLOHMANN_BASIC_JSON_TPL& value, + NLOHMANN_BASIC_JSON_TPL& result); + + /*! + @param[in] value flattened JSON + + @return unflattened JSON + + @throw parse_error.109 if array index is not a number + @throw type_error.314 if value is not an object + @throw type_error.315 if object values are not primitive + @throw type_error.313 if value cannot be unflattened + */ + NLOHMANN_BASIC_JSON_TPL_DECLARATION + static NLOHMANN_BASIC_JSON_TPL + unflatten(const NLOHMANN_BASIC_JSON_TPL& value); + + friend bool operator==(json_pointer const& lhs, + json_pointer const& rhs) noexcept; + + friend bool operator!=(json_pointer const& lhs, + json_pointer const& rhs) noexcept; + + /// the reference tokens + std::vector<std::string> reference_tokens; +}; + +/*! +@brief a class to store JSON values + +@tparam ObjectType type for JSON objects (`std::map` by default; will be used +in @ref object_t) +@tparam ArrayType type for JSON arrays (`std::vector` by default; will be used +in @ref array_t) +@tparam StringType type for JSON strings and object keys (`std::string` by +default; will be used in @ref string_t) +@tparam BooleanType type for JSON booleans (`bool` by default; will be used +in @ref boolean_t) +@tparam NumberIntegerType type for JSON integer numbers (`int64_t` by +default; will be used in @ref number_integer_t) +@tparam NumberUnsignedType type for JSON unsigned integer numbers (@c +`uint64_t` by default; will be used in @ref number_unsigned_t) +@tparam NumberFloatType type for JSON floating-point numbers (`double` by +default; will be used in @ref number_float_t) +@tparam AllocatorType type of the allocator to use (`std::allocator` by +default) +@tparam JSONSerializer the serializer to resolve internal calls to `to_json()` +and `from_json()` (@ref adl_serializer by default) + +@requirement The class satisfies the following concept requirements: +- Basic + - [DefaultConstructible](http://en.cppreference.com/w/cpp/concept/DefaultConstructible): + JSON values can be default constructed. The result will be a JSON null + value. + - [MoveConstructible](http://en.cppreference.com/w/cpp/concept/MoveConstructible): + A JSON value can be constructed from an rvalue argument. + - [CopyConstructible](http://en.cppreference.com/w/cpp/concept/CopyConstructible): + A JSON value can be copy-constructed from an lvalue expression. + - [MoveAssignable](http://en.cppreference.com/w/cpp/concept/MoveAssignable): + A JSON value van be assigned from an rvalue argument. + - [CopyAssignable](http://en.cppreference.com/w/cpp/concept/CopyAssignable): + A JSON value can be copy-assigned from an lvalue expression. + - [Destructible](http://en.cppreference.com/w/cpp/concept/Destructible): + JSON values can be destructed. +- Layout + - [StandardLayoutType](http://en.cppreference.com/w/cpp/concept/StandardLayoutType): + JSON values have + [standard layout](http://en.cppreference.com/w/cpp/language/data_members#Standard_layout): + All non-static data members are private and standard layout types, the + class has no virtual functions or (virtual) base classes. +- Library-wide + - [EqualityComparable](http://en.cppreference.com/w/cpp/concept/EqualityComparable): + JSON values can be compared with `==`, see @ref + operator==(const_reference,const_reference). + - [LessThanComparable](http://en.cppreference.com/w/cpp/concept/LessThanComparable): + JSON values can be compared with `<`, see @ref + operator<(const_reference,const_reference). + - [Swappable](http://en.cppreference.com/w/cpp/concept/Swappable): + Any JSON lvalue or rvalue of can be swapped with any lvalue or rvalue of + other compatible types, using unqualified function call @ref swap(). + - [NullablePointer](http://en.cppreference.com/w/cpp/concept/NullablePointer): + JSON values can be compared against `std::nullptr_t` objects which are used + to model the `null` value. +- Container + - [Container](http://en.cppreference.com/w/cpp/concept/Container): + JSON values can be used like STL containers and provide iterator access. + - [ReversibleContainer](http://en.cppreference.com/w/cpp/concept/ReversibleContainer); + JSON values can be used like STL containers and provide reverse iterator + access. + +@invariant The member variables @a m_value and @a m_type have the following +relationship: +- If `m_type == value_t::object`, then `m_value.object != nullptr`. +- If `m_type == value_t::array`, then `m_value.array != nullptr`. +- If `m_type == value_t::string`, then `m_value.string != nullptr`. +The invariants are checked by member function assert_invariant(). + +@internal +@note ObjectType trick from http://stackoverflow.com/a/9860911 +@endinternal + +@see [RFC 7159: The JavaScript Object Notation (JSON) Data Interchange +Format](http://rfc7159.net/rfc7159) + +@since version 1.0.0 + +@nosubgrouping +*/ +NLOHMANN_BASIC_JSON_TPL_DECLARATION +class basic_json +{ + private: + template<detail::value_t> friend struct detail::external_constructor; + friend ::nlohmann::json_pointer; + friend ::nlohmann::detail::parser<basic_json>; + friend ::nlohmann::detail::serializer<basic_json>; + template<typename BasicJsonType> + friend class ::nlohmann::detail::iter_impl; + template<typename BasicJsonType, typename CharType> + friend class ::nlohmann::detail::binary_writer; + template<typename BasicJsonType> + friend class ::nlohmann::detail::binary_reader; + + /// workaround type for MSVC + using basic_json_t = NLOHMANN_BASIC_JSON_TPL; + + // convenience aliases for types residing in namespace detail; + using lexer = ::nlohmann::detail::lexer<basic_json>; + using parser = ::nlohmann::detail::parser<basic_json>; + + using primitive_iterator_t = ::nlohmann::detail::primitive_iterator_t; + template<typename BasicJsonType> + using internal_iterator = ::nlohmann::detail::internal_iterator<BasicJsonType>; + template<typename BasicJsonType> + using iter_impl = ::nlohmann::detail::iter_impl<BasicJsonType>; + template<typename Iterator> + using iteration_proxy = ::nlohmann::detail::iteration_proxy<Iterator>; + template<typename Base> using json_reverse_iterator = ::nlohmann::detail::json_reverse_iterator<Base>; + + template<typename CharType> + using output_adapter_t = ::nlohmann::detail::output_adapter_t<CharType>; + + using binary_reader = ::nlohmann::detail::binary_reader<basic_json>; + template<typename CharType> using binary_writer = ::nlohmann::detail::binary_writer<basic_json, CharType>; + + using serializer = ::nlohmann::detail::serializer<basic_json>; + + public: + using value_t = detail::value_t; + /// @copydoc nlohmann::json_pointer + using json_pointer = ::nlohmann::json_pointer; + template<typename T, typename SFINAE> + using json_serializer = JSONSerializer<T, SFINAE>; + /// helper type for initializer lists of basic_json values + using initializer_list_t = std::initializer_list<detail::json_ref<basic_json>>; + + //////////////// + // exceptions // + //////////////// + + /// @name exceptions + /// Classes to implement user-defined exceptions. + /// @{ + + /// @copydoc detail::exception + using exception = detail::exception; + /// @copydoc detail::parse_error + using parse_error = detail::parse_error; + /// @copydoc detail::invalid_iterator + using invalid_iterator = detail::invalid_iterator; + /// @copydoc detail::type_error + using type_error = detail::type_error; + /// @copydoc detail::out_of_range + using out_of_range = detail::out_of_range; + /// @copydoc detail::other_error + using other_error = detail::other_error; + + /// @} + + + ///////////////////// + // container types // + ///////////////////// + + /// @name container types + /// The canonic container types to use @ref basic_json like any other STL + /// container. + /// @{ + + /// the type of elements in a basic_json container + using value_type = basic_json; + + /// the type of an element reference + using reference = value_type&; + /// the type of an element const reference + using const_reference = const value_type&; + + /// a type to represent differences between iterators + using difference_type = std::ptrdiff_t; + /// a type to represent container sizes + using size_type = std::size_t; + + /// the allocator type + using allocator_type = AllocatorType<basic_json>; + + /// the type of an element pointer + using pointer = typename std::allocator_traits<allocator_type>::pointer; + /// the type of an element const pointer + using const_pointer = typename std::allocator_traits<allocator_type>::const_pointer; + + /// an iterator for a basic_json container + using iterator = iter_impl<basic_json>; + /// a const iterator for a basic_json container + using const_iterator = iter_impl<const basic_json>; + /// a reverse iterator for a basic_json container + using reverse_iterator = json_reverse_iterator<typename basic_json::iterator>; + /// a const reverse iterator for a basic_json container + using const_reverse_iterator = json_reverse_iterator<typename basic_json::const_iterator>; + + /// @} + + + /*! + @brief returns the allocator associated with the container + */ + static allocator_type get_allocator() + { + return allocator_type(); + } + + /*! + @brief returns version information on the library + + This function returns a JSON object with information about the library, + including the version number and information on the platform and compiler. + + @return JSON object holding version information + key | description + ----------- | --------------- + `compiler` | Information on the used compiler. It is an object with the following keys: `c++` (the used C++ standard), `family` (the compiler family; possible values are `clang`, `icc`, `gcc`, `ilecpp`, `msvc`, `pgcpp`, `sunpro`, and `unknown`), and `version` (the compiler version). + `copyright` | The copyright line for the library as string. + `name` | The name of the library as string. + `platform` | The used platform as string. Possible values are `win32`, `linux`, `apple`, `unix`, and `unknown`. + `url` | The URL of the project as string. + `version` | The version of the library. It is an object with the following keys: `major`, `minor`, and `patch` as defined by [Semantic Versioning](http://semver.org), and `string` (the version string). + + @liveexample{The following code shows an example output of the `meta()` + function.,meta} + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @complexity Constant. + + @since 2.1.0 + */ + static basic_json meta() + { + basic_json result; + + result["copyright"] = "(C) 2013-2017 Niels Lohmann"; + result["name"] = "JSON for Modern C++"; + result["url"] = "https://github.com/nlohmann/json"; + result["version"] = + { + {"string", "3.0.1"}, {"major", 3}, {"minor", 0}, {"patch", 1} + }; + +#ifdef _WIN32 + result["platform"] = "win32"; +#elif defined __linux__ + result["platform"] = "linux"; +#elif defined __APPLE__ + result["platform"] = "apple"; +#elif defined __unix__ + result["platform"] = "unix"; +#else + result["platform"] = "unknown"; +#endif + +#if defined(__ICC) || defined(__INTEL_COMPILER) + result["compiler"] = {{"family", "icc"}, {"version", __INTEL_COMPILER}}; +#elif defined(__clang__) + result["compiler"] = {{"family", "clang"}, {"version", __clang_version__}}; +#elif defined(__GNUC__) || defined(__GNUG__) + result["compiler"] = {{"family", "gcc"}, {"version", std::to_string(__GNUC__) + "." + std::to_string(__GNUC_MINOR__) + "." + std::to_string(__GNUC_PATCHLEVEL__)}}; +#elif defined(__HP_cc) || defined(__HP_aCC) + result["compiler"] = "hp" +#elif defined(__IBMCPP__) + result["compiler"] = {{"family", "ilecpp"}, {"version", __IBMCPP__}}; +#elif defined(_MSC_VER) + result["compiler"] = {{"family", "msvc"}, {"version", _MSC_VER}}; +#elif defined(__PGI) + result["compiler"] = {{"family", "pgcpp"}, {"version", __PGI}}; +#elif defined(__SUNPRO_CC) + result["compiler"] = {{"family", "sunpro"}, {"version", __SUNPRO_CC}}; +#else + result["compiler"] = {{"family", "unknown"}, {"version", "unknown"}}; +#endif + +#ifdef __cplusplus + result["compiler"]["c++"] = std::to_string(__cplusplus); +#else + result["compiler"]["c++"] = "unknown"; +#endif + return result; + } + + + /////////////////////////// + // JSON value data types // + /////////////////////////// + + /// @name JSON value data types + /// The data types to store a JSON value. These types are derived from + /// the template arguments passed to class @ref basic_json. + /// @{ + +#if defined(JSON_HAS_CPP_14) + // Use transparent comparator if possible, combined with perfect forwarding + // on find() and count() calls prevents unnecessary string construction. + using object_comparator_t = std::less<>; +#else + using object_comparator_t = std::less<StringType>; +#endif + + /*! + @brief a type for an object + + [RFC 7159](http://rfc7159.net/rfc7159) describes JSON objects as follows: + > An object is an unordered collection of zero or more name/value pairs, + > where a name is a string and a value is a string, number, boolean, null, + > object, or array. + + To store objects in C++, a type is defined by the template parameters + described below. + + @tparam ObjectType the container to store objects (e.g., `std::map` or + `std::unordered_map`) + @tparam StringType the type of the keys or names (e.g., `std::string`). + The comparison function `std::less<StringType>` is used to order elements + inside the container. + @tparam AllocatorType the allocator to use for objects (e.g., + `std::allocator`) + + #### Default type + + With the default values for @a ObjectType (`std::map`), @a StringType + (`std::string`), and @a AllocatorType (`std::allocator`), the default + value for @a object_t is: + + @code {.cpp} + std::map< + std::string, // key_type + basic_json, // value_type + std::less<std::string>, // key_compare + std::allocator<std::pair<const std::string, basic_json>> // allocator_type + > + @endcode + + #### Behavior + + The choice of @a object_t influences the behavior of the JSON class. With + the default type, objects have the following behavior: + + - When all names are unique, objects will be interoperable in the sense + that all software implementations receiving that object will agree on + the name-value mappings. + - When the names within an object are not unique, later stored name/value + pairs overwrite previously stored name/value pairs, leaving the used + names unique. For instance, `{"key": 1}` and `{"key": 2, "key": 1}` will + be treated as equal and both stored as `{"key": 1}`. + - Internally, name/value pairs are stored in lexicographical order of the + names. Objects will also be serialized (see @ref dump) in this order. + For instance, `{"b": 1, "a": 2}` and `{"a": 2, "b": 1}` will be stored + and serialized as `{"a": 2, "b": 1}`. + - When comparing objects, the order of the name/value pairs is irrelevant. + This makes objects interoperable in the sense that they will not be + affected by these differences. For instance, `{"b": 1, "a": 2}` and + `{"a": 2, "b": 1}` will be treated as equal. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the maximum depth of nesting. + + In this class, the object's limit of nesting is not explicitly constrained. + However, a maximum depth of nesting may be introduced by the compiler or + runtime environment. A theoretical limit can be queried by calling the + @ref max_size function of a JSON object. + + #### Storage + + Objects are stored as pointers in a @ref basic_json type. That is, for any + access to object values, a pointer of type `object_t*` must be + dereferenced. + + @sa @ref array_t -- type for an array value + + @since version 1.0.0 + + @note The order name/value pairs are added to the object is *not* + preserved by the library. Therefore, iterating an object may return + name/value pairs in a different order than they were originally stored. In + fact, keys will be traversed in alphabetical order as `std::map` with + `std::less` is used by default. Please note this behavior conforms to [RFC + 7159](http://rfc7159.net/rfc7159), because any order implements the + specified "unordered" nature of JSON objects. + */ + using object_t = ObjectType<StringType, + basic_json, + object_comparator_t, + AllocatorType<std::pair<const StringType, + basic_json>>>; + + /*! + @brief a type for an array + + [RFC 7159](http://rfc7159.net/rfc7159) describes JSON arrays as follows: + > An array is an ordered sequence of zero or more values. + + To store objects in C++, a type is defined by the template parameters + explained below. + + @tparam ArrayType container type to store arrays (e.g., `std::vector` or + `std::list`) + @tparam AllocatorType allocator to use for arrays (e.g., `std::allocator`) + + #### Default type + + With the default values for @a ArrayType (`std::vector`) and @a + AllocatorType (`std::allocator`), the default value for @a array_t is: + + @code {.cpp} + std::vector< + basic_json, // value_type + std::allocator<basic_json> // allocator_type + > + @endcode + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the maximum depth of nesting. + + In this class, the array's limit of nesting is not explicitly constrained. + However, a maximum depth of nesting may be introduced by the compiler or + runtime environment. A theoretical limit can be queried by calling the + @ref max_size function of a JSON array. + + #### Storage + + Arrays are stored as pointers in a @ref basic_json type. That is, for any + access to array values, a pointer of type `array_t*` must be dereferenced. + + @sa @ref object_t -- type for an object value + + @since version 1.0.0 + */ + using array_t = ArrayType<basic_json, AllocatorType<basic_json>>; + + /*! + @brief a type for a string + + [RFC 7159](http://rfc7159.net/rfc7159) describes JSON strings as follows: + > A string is a sequence of zero or more Unicode characters. + + To store objects in C++, a type is defined by the template parameter + described below. Unicode values are split by the JSON class into + byte-sized characters during deserialization. + + @tparam StringType the container to store strings (e.g., `std::string`). + Note this container is used for keys/names in objects, see @ref object_t. + + #### Default type + + With the default values for @a StringType (`std::string`), the default + value for @a string_t is: + + @code {.cpp} + std::string + @endcode + + #### Encoding + + Strings are stored in UTF-8 encoding. Therefore, functions like + `std::string::size()` or `std::string::length()` return the number of + bytes in the string rather than the number of characters or glyphs. + + #### String comparison + + [RFC 7159](http://rfc7159.net/rfc7159) states: + > Software implementations are typically required to test names of object + > members for equality. Implementations that transform the textual + > representation into sequences of Unicode code units and then perform the + > comparison numerically, code unit by code unit, are interoperable in the + > sense that implementations will agree in all cases on equality or + > inequality of two strings. For example, implementations that compare + > strings with escaped characters unconverted may incorrectly find that + > `"a\\b"` and `"a\u005Cb"` are not equal. + + This implementation is interoperable as it does compare strings code unit + by code unit. + + #### Storage + + String values are stored as pointers in a @ref basic_json type. That is, + for any access to string values, a pointer of type `string_t*` must be + dereferenced. + + @since version 1.0.0 + */ + using string_t = StringType; + + /*! + @brief a type for a boolean + + [RFC 7159](http://rfc7159.net/rfc7159) implicitly describes a boolean as a + type which differentiates the two literals `true` and `false`. + + To store objects in C++, a type is defined by the template parameter @a + BooleanType which chooses the type to use. + + #### Default type + + With the default values for @a BooleanType (`bool`), the default value for + @a boolean_t is: + + @code {.cpp} + bool + @endcode + + #### Storage + + Boolean values are stored directly inside a @ref basic_json type. + + @since version 1.0.0 + */ + using boolean_t = BooleanType; + + /*! + @brief a type for a number (integer) + + [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: + > The representation of numbers is similar to that used in most + > programming languages. A number is represented in base 10 using decimal + > digits. It contains an integer component that may be prefixed with an + > optional minus sign, which may be followed by a fraction part and/or an + > exponent part. Leading zeros are not allowed. (...) Numeric values that + > cannot be represented in the grammar below (such as Infinity and NaN) + > are not permitted. + + This description includes both integer and floating-point numbers. + However, C++ allows more precise storage if it is known whether the number + is a signed integer, an unsigned integer or a floating-point number. + Therefore, three different types, @ref number_integer_t, @ref + number_unsigned_t and @ref number_float_t are used. + + To store integer numbers in C++, a type is defined by the template + parameter @a NumberIntegerType which chooses the type to use. + + #### Default type + + With the default values for @a NumberIntegerType (`int64_t`), the default + value for @a number_integer_t is: + + @code {.cpp} + int64_t + @endcode + + #### Default behavior + + - The restrictions about leading zeros is not enforced in C++. Instead, + leading zeros in integer literals lead to an interpretation as octal + number. Internally, the value will be stored as decimal number. For + instance, the C++ integer literal `010` will be serialized to `8`. + During deserialization, leading zeros yield an error. + - Not-a-number (NaN) values will be serialized to `null`. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the range and precision of numbers. + + When the default type is used, the maximal integer number that can be + stored is `9223372036854775807` (INT64_MAX) and the minimal integer number + that can be stored is `-9223372036854775808` (INT64_MIN). Integer numbers + that are out of range will yield over/underflow when used in a + constructor. During deserialization, too large or small integer numbers + will be automatically be stored as @ref number_unsigned_t or @ref + number_float_t. + + [RFC 7159](http://rfc7159.net/rfc7159) further states: + > Note that when such software is used, numbers that are integers and are + > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense + > that implementations will agree exactly on their numeric values. + + As this range is a subrange of the exactly supported range [INT64_MIN, + INT64_MAX], this class's integer type is interoperable. + + #### Storage + + Integer number values are stored directly inside a @ref basic_json type. + + @sa @ref number_float_t -- type for number values (floating-point) + + @sa @ref number_unsigned_t -- type for number values (unsigned integer) + + @since version 1.0.0 + */ + using number_integer_t = NumberIntegerType; + + /*! + @brief a type for a number (unsigned) + + [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: + > The representation of numbers is similar to that used in most + > programming languages. A number is represented in base 10 using decimal + > digits. It contains an integer component that may be prefixed with an + > optional minus sign, which may be followed by a fraction part and/or an + > exponent part. Leading zeros are not allowed. (...) Numeric values that + > cannot be represented in the grammar below (such as Infinity and NaN) + > are not permitted. + + This description includes both integer and floating-point numbers. + However, C++ allows more precise storage if it is known whether the number + is a signed integer, an unsigned integer or a floating-point number. + Therefore, three different types, @ref number_integer_t, @ref + number_unsigned_t and @ref number_float_t are used. + + To store unsigned integer numbers in C++, a type is defined by the + template parameter @a NumberUnsignedType which chooses the type to use. + + #### Default type + + With the default values for @a NumberUnsignedType (`uint64_t`), the + default value for @a number_unsigned_t is: + + @code {.cpp} + uint64_t + @endcode + + #### Default behavior + + - The restrictions about leading zeros is not enforced in C++. Instead, + leading zeros in integer literals lead to an interpretation as octal + number. Internally, the value will be stored as decimal number. For + instance, the C++ integer literal `010` will be serialized to `8`. + During deserialization, leading zeros yield an error. + - Not-a-number (NaN) values will be serialized to `null`. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the range and precision of numbers. + + When the default type is used, the maximal integer number that can be + stored is `18446744073709551615` (UINT64_MAX) and the minimal integer + number that can be stored is `0`. Integer numbers that are out of range + will yield over/underflow when used in a constructor. During + deserialization, too large or small integer numbers will be automatically + be stored as @ref number_integer_t or @ref number_float_t. + + [RFC 7159](http://rfc7159.net/rfc7159) further states: + > Note that when such software is used, numbers that are integers and are + > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense + > that implementations will agree exactly on their numeric values. + + As this range is a subrange (when considered in conjunction with the + number_integer_t type) of the exactly supported range [0, UINT64_MAX], + this class's integer type is interoperable. + + #### Storage + + Integer number values are stored directly inside a @ref basic_json type. + + @sa @ref number_float_t -- type for number values (floating-point) + @sa @ref number_integer_t -- type for number values (integer) + + @since version 2.0.0 + */ + using number_unsigned_t = NumberUnsignedType; + + /*! + @brief a type for a number (floating-point) + + [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: + > The representation of numbers is similar to that used in most + > programming languages. A number is represented in base 10 using decimal + > digits. It contains an integer component that may be prefixed with an + > optional minus sign, which may be followed by a fraction part and/or an + > exponent part. Leading zeros are not allowed. (...) Numeric values that + > cannot be represented in the grammar below (such as Infinity and NaN) + > are not permitted. + + This description includes both integer and floating-point numbers. + However, C++ allows more precise storage if it is known whether the number + is a signed integer, an unsigned integer or a floating-point number. + Therefore, three different types, @ref number_integer_t, @ref + number_unsigned_t and @ref number_float_t are used. + + To store floating-point numbers in C++, a type is defined by the template + parameter @a NumberFloatType which chooses the type to use. + + #### Default type + + With the default values for @a NumberFloatType (`double`), the default + value for @a number_float_t is: + + @code {.cpp} + double + @endcode + + #### Default behavior + + - The restrictions about leading zeros is not enforced in C++. Instead, + leading zeros in floating-point literals will be ignored. Internally, + the value will be stored as decimal number. For instance, the C++ + floating-point literal `01.2` will be serialized to `1.2`. During + deserialization, leading zeros yield an error. + - Not-a-number (NaN) values will be serialized to `null`. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) states: + > This specification allows implementations to set limits on the range and + > precision of numbers accepted. Since software that implements IEEE + > 754-2008 binary64 (double precision) numbers is generally available and + > widely used, good interoperability can be achieved by implementations + > that expect no more precision or range than these provide, in the sense + > that implementations will approximate JSON numbers within the expected + > precision. + + This implementation does exactly follow this approach, as it uses double + precision floating-point numbers. Note values smaller than + `-1.79769313486232e+308` and values greater than `1.79769313486232e+308` + will be stored as NaN internally and be serialized to `null`. + + #### Storage + + Floating-point number values are stored directly inside a @ref basic_json + type. + + @sa @ref number_integer_t -- type for number values (integer) + + @sa @ref number_unsigned_t -- type for number values (unsigned integer) + + @since version 1.0.0 + */ + using number_float_t = NumberFloatType; + + /// @} + + private: + + /// helper for exception-safe object creation + template<typename T, typename... Args> + static T* create(Args&& ... args) + { + AllocatorType<T> alloc; + using AllocatorTraits = std::allocator_traits<AllocatorType<T>>; + + auto deleter = [&](T * object) + { + AllocatorTraits::deallocate(alloc, object, 1); + }; + std::unique_ptr<T, decltype(deleter)> object(AllocatorTraits::allocate(alloc, 1), deleter); + AllocatorTraits::construct(alloc, object.get(), std::forward<Args>(args)...); + assert(object != nullptr); + return object.release(); + } + + //////////////////////// + // JSON value storage // + //////////////////////// + + /*! + @brief a JSON value + + The actual storage for a JSON value of the @ref basic_json class. This + union combines the different storage types for the JSON value types + defined in @ref value_t. + + JSON type | value_t type | used type + --------- | --------------- | ------------------------ + object | object | pointer to @ref object_t + array | array | pointer to @ref array_t + string | string | pointer to @ref string_t + boolean | boolean | @ref boolean_t + number | number_integer | @ref number_integer_t + number | number_unsigned | @ref number_unsigned_t + number | number_float | @ref number_float_t + null | null | *no value is stored* + + @note Variable-length types (objects, arrays, and strings) are stored as + pointers. The size of the union should not exceed 64 bits if the default + value types are used. + + @since version 1.0.0 + */ + union json_value + { + /// object (stored with pointer to save storage) + object_t* object; + /// array (stored with pointer to save storage) + array_t* array; + /// string (stored with pointer to save storage) + string_t* string; + /// boolean + boolean_t boolean; + /// number (integer) + number_integer_t number_integer; + /// number (unsigned integer) + number_unsigned_t number_unsigned; + /// number (floating-point) + number_float_t number_float; + + /// default constructor (for null values) + json_value() = default; + /// constructor for booleans + json_value(boolean_t v) noexcept : boolean(v) {} + /// constructor for numbers (integer) + json_value(number_integer_t v) noexcept : number_integer(v) {} + /// constructor for numbers (unsigned) + json_value(number_unsigned_t v) noexcept : number_unsigned(v) {} + /// constructor for numbers (floating-point) + json_value(number_float_t v) noexcept : number_float(v) {} + /// constructor for empty values of a given type + json_value(value_t t) + { + switch (t) + { + case value_t::object: + { + object = create<object_t>(); + break; + } + + case value_t::array: + { + array = create<array_t>(); + break; + } + + case value_t::string: + { + string = create<string_t>(""); + break; + } + + case value_t::boolean: + { + boolean = boolean_t(false); + break; + } + + case value_t::number_integer: + { + number_integer = number_integer_t(0); + break; + } + + case value_t::number_unsigned: + { + number_unsigned = number_unsigned_t(0); + break; + } + + case value_t::number_float: + { + number_float = number_float_t(0.0); + break; + } + + case value_t::null: + { + object = nullptr; // silence warning, see #821 + break; + } + + default: + { + object = nullptr; // silence warning, see #821 + if (JSON_UNLIKELY(t == value_t::null)) + { + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.0.1")); // LCOV_EXCL_LINE + } + break; + } + } + } + + /// constructor for strings + json_value(const string_t& value) + { + string = create<string_t>(value); + } + + /// constructor for rvalue strings + json_value(string_t&& value) + { + string = create<string_t>(std::move(value)); + } + + /// constructor for objects + json_value(const object_t& value) + { + object = create<object_t>(value); + } + + /// constructor for rvalue objects + json_value(object_t&& value) + { + object = create<object_t>(std::move(value)); + } + + /// constructor for arrays + json_value(const array_t& value) + { + array = create<array_t>(value); + } + + /// constructor for rvalue arrays + json_value(array_t&& value) + { + array = create<array_t>(std::move(value)); + } + + void destroy(value_t t) + { + switch (t) + { + case value_t::object: + { + AllocatorType<object_t> alloc; + std::allocator_traits<decltype(alloc)>::destroy(alloc, object); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, object, 1); + break; + } + + case value_t::array: + { + AllocatorType<array_t> alloc; + std::allocator_traits<decltype(alloc)>::destroy(alloc, array); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, array, 1); + break; + } + + case value_t::string: + { + AllocatorType<string_t> alloc; + std::allocator_traits<decltype(alloc)>::destroy(alloc, string); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, string, 1); + break; + } + + default: + { + break; + } + } + } + }; + + /*! + @brief checks the class invariants + + This function asserts the class invariants. It needs to be called at the + end of every constructor to make sure that created objects respect the + invariant. Furthermore, it has to be called each time the type of a JSON + value is changed, because the invariant expresses a relationship between + @a m_type and @a m_value. + */ + void assert_invariant() const + { + assert(m_type != value_t::object or m_value.object != nullptr); + assert(m_type != value_t::array or m_value.array != nullptr); + assert(m_type != value_t::string or m_value.string != nullptr); + } + + public: + ////////////////////////// + // JSON parser callback // + ////////////////////////// + + /*! + @brief parser event types + + The parser callback distinguishes the following events: + - `object_start`: the parser read `{` and started to process a JSON object + - `key`: the parser read a key of a value in an object + - `object_end`: the parser read `}` and finished processing a JSON object + - `array_start`: the parser read `[` and started to process a JSON array + - `array_end`: the parser read `]` and finished processing a JSON array + - `value`: the parser finished reading a JSON value + + @image html callback_events.png "Example when certain parse events are triggered" + + @sa @ref parser_callback_t for more information and examples + */ + using parse_event_t = typename parser::parse_event_t; + + /*! + @brief per-element parser callback type + + With a parser callback function, the result of parsing a JSON text can be + influenced. When passed to @ref parse, it is called on certain events + (passed as @ref parse_event_t via parameter @a event) with a set recursion + depth @a depth and context JSON value @a parsed. The return value of the + callback function is a boolean indicating whether the element that emitted + the callback shall be kept or not. + + We distinguish six scenarios (determined by the event type) in which the + callback function can be called. The following table describes the values + of the parameters @a depth, @a event, and @a parsed. + + parameter @a event | description | parameter @a depth | parameter @a parsed + ------------------ | ----------- | ------------------ | ------------------- + parse_event_t::object_start | the parser read `{` and started to process a JSON object | depth of the parent of the JSON object | a JSON value with type discarded + parse_event_t::key | the parser read a key of a value in an object | depth of the currently parsed JSON object | a JSON string containing the key + parse_event_t::object_end | the parser read `}` and finished processing a JSON object | depth of the parent of the JSON object | the parsed JSON object + parse_event_t::array_start | the parser read `[` and started to process a JSON array | depth of the parent of the JSON array | a JSON value with type discarded + parse_event_t::array_end | the parser read `]` and finished processing a JSON array | depth of the parent of the JSON array | the parsed JSON array + parse_event_t::value | the parser finished reading a JSON value | depth of the value | the parsed JSON value + + @image html callback_events.png "Example when certain parse events are triggered" + + Discarding a value (i.e., returning `false`) has different effects + depending on the context in which function was called: + + - Discarded values in structured types are skipped. That is, the parser + will behave as if the discarded value was never read. + - In case a value outside a structured type is skipped, it is replaced + with `null`. This case happens if the top-level element is skipped. + + @param[in] depth the depth of the recursion during parsing + + @param[in] event an event of type parse_event_t indicating the context in + the callback function has been called + + @param[in,out] parsed the current intermediate parse result; note that + writing to this value has no effect for parse_event_t::key events + + @return Whether the JSON value which called the function during parsing + should be kept (`true`) or not (`false`). In the latter case, it is either + skipped completely or replaced by an empty discarded object. + + @sa @ref parse for examples + + @since version 1.0.0 + */ + using parser_callback_t = typename parser::parser_callback_t; + + + ////////////////// + // constructors // + ////////////////// + + /// @name constructors and destructors + /// Constructors of class @ref basic_json, copy/move constructor, copy + /// assignment, static functions creating objects, and the destructor. + /// @{ + + /*! + @brief create an empty value with a given type + + Create an empty JSON value with a given type. The value will be default + initialized with an empty value which depends on the type: + + Value type | initial value + ----------- | ------------- + null | `null` + boolean | `false` + string | `""` + number | `0` + object | `{}` + array | `[]` + + @param[in] v the type of the value to create + + @complexity Constant. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows the constructor for different @ref + value_t values,basic_json__value_t} + + @sa @ref clear() -- restores the postcondition of this constructor + + @since version 1.0.0 + */ + basic_json(const value_t v) + : m_type(v), m_value(v) + { + assert_invariant(); + } + + /*! + @brief create a null object + + Create a `null` JSON value. It either takes a null pointer as parameter + (explicitly creating `null`) or no parameter (implicitly creating `null`). + The passed null pointer itself is not read -- it is only used to choose + the right constructor. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this constructor never throws + exceptions. + + @liveexample{The following code shows the constructor with and without a + null pointer parameter.,basic_json__nullptr_t} + + @since version 1.0.0 + */ + basic_json(std::nullptr_t = nullptr) noexcept + : basic_json(value_t::null) + { + assert_invariant(); + } + + /*! + @brief create a JSON value + + This is a "catch all" constructor for all compatible JSON types; that is, + types for which a `to_json()` method exists. The constructor forwards the + parameter @a val to that method (to `json_serializer<U>::to_json` method + with `U = uncvref_t<CompatibleType>`, to be exact). + + Template type @a CompatibleType includes, but is not limited to, the + following types: + - **arrays**: @ref array_t and all kinds of compatible containers such as + `std::vector`, `std::deque`, `std::list`, `std::forward_list`, + `std::array`, `std::valarray`, `std::set`, `std::unordered_set`, + `std::multiset`, and `std::unordered_multiset` with a `value_type` from + which a @ref basic_json value can be constructed. + - **objects**: @ref object_t and all kinds of compatible associative + containers such as `std::map`, `std::unordered_map`, `std::multimap`, + and `std::unordered_multimap` with a `key_type` compatible to + @ref string_t and a `value_type` from which a @ref basic_json value can + be constructed. + - **strings**: @ref string_t, string literals, and all compatible string + containers can be used. + - **numbers**: @ref number_integer_t, @ref number_unsigned_t, + @ref number_float_t, and all convertible number types such as `int`, + `size_t`, `int64_t`, `float` or `double` can be used. + - **boolean**: @ref boolean_t / `bool` can be used. + + See the examples below. + + @tparam CompatibleType a type such that: + - @a CompatibleType is not derived from `std::istream`, + - @a CompatibleType is not @ref basic_json (to avoid hijacking copy/move + constructors), + - @a CompatibleType is not a @ref basic_json nested type (e.g., + @ref json_pointer, @ref iterator, etc ...) + - @ref @ref json_serializer<U> has a + `to_json(basic_json_t&, CompatibleType&&)` method + + @tparam U = `uncvref_t<CompatibleType>` + + @param[in] val the value to be forwarded to the respective constructor + + @complexity Usually linear in the size of the passed @a val, also + depending on the implementation of the called `to_json()` + method. + + @exceptionsafety Depends on the called constructor. For types directly + supported by the library (i.e., all types for which no `to_json()` function + was provided), strong guarantee holds: if an exception is thrown, there are + no changes to any JSON value. + + @liveexample{The following code shows the constructor with several + compatible types.,basic_json__CompatibleType} + + @since version 2.1.0 + */ + template<typename CompatibleType, typename U = detail::uncvref_t<CompatibleType>, + detail::enable_if_t<not std::is_base_of<std::istream, U>::value and + not std::is_same<U, basic_json_t>::value and + not detail::is_basic_json_nested_type< + basic_json_t, U>::value and + detail::has_to_json<basic_json, U>::value, + int> = 0> + basic_json(CompatibleType && val) noexcept(noexcept(JSONSerializer<U>::to_json( + std::declval<basic_json_t&>(), std::forward<CompatibleType>(val)))) + { + JSONSerializer<U>::to_json(*this, std::forward<CompatibleType>(val)); + assert_invariant(); + } + + /*! + @brief create a container (array or object) from an initializer list + + Creates a JSON value of type array or object from the passed initializer + list @a init. In case @a type_deduction is `true` (default), the type of + the JSON value to be created is deducted from the initializer list @a init + according to the following rules: + + 1. If the list is empty, an empty JSON object value `{}` is created. + 2. If the list consists of pairs whose first element is a string, a JSON + object value is created where the first elements of the pairs are + treated as keys and the second elements are as values. + 3. In all other cases, an array is created. + + The rules aim to create the best fit between a C++ initializer list and + JSON values. The rationale is as follows: + + 1. The empty initializer list is written as `{}` which is exactly an empty + JSON object. + 2. C++ has no way of describing mapped types other than to list a list of + pairs. As JSON requires that keys must be of type string, rule 2 is the + weakest constraint one can pose on initializer lists to interpret them + as an object. + 3. In all other cases, the initializer list could not be interpreted as + JSON object type, so interpreting it as JSON array type is safe. + + With the rules described above, the following JSON values cannot be + expressed by an initializer list: + + - the empty array (`[]`): use @ref array(initializer_list_t) + with an empty initializer list in this case + - arrays whose elements satisfy rule 2: use @ref + array(initializer_list_t) with the same initializer list + in this case + + @note When used without parentheses around an empty initializer list, @ref + basic_json() is called instead of this function, yielding the JSON null + value. + + @param[in] init initializer list with JSON values + + @param[in] type_deduction internal parameter; when set to `true`, the type + of the JSON value is deducted from the initializer list @a init; when set + to `false`, the type provided via @a manual_type is forced. This mode is + used by the functions @ref array(initializer_list_t) and + @ref object(initializer_list_t). + + @param[in] manual_type internal parameter; when @a type_deduction is set + to `false`, the created JSON value will use the provided type (only @ref + value_t::array and @ref value_t::object are valid); when @a type_deduction + is set to `true`, this parameter has no effect + + @throw type_error.301 if @a type_deduction is `false`, @a manual_type is + `value_t::object`, but @a init contains an element which is not a pair + whose first element is a string. In this case, the constructor could not + create an object. If @a type_deduction would have be `true`, an array + would have been created. See @ref object(initializer_list_t) + for an example. + + @complexity Linear in the size of the initializer list @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The example below shows how JSON values are created from + initializer lists.,basic_json__list_init_t} + + @sa @ref array(initializer_list_t) -- create a JSON array + value from an initializer list + @sa @ref object(initializer_list_t) -- create a JSON object + value from an initializer list + + @since version 1.0.0 + */ + basic_json(initializer_list_t init, + bool type_deduction = true, + value_t manual_type = value_t::array) + { + // check if each element is an array with two elements whose first + // element is a string + bool is_an_object = std::all_of(init.begin(), init.end(), + [](const detail::json_ref<basic_json>& element_ref) + { + return (element_ref->is_array() and element_ref->size() == 2 and (*element_ref)[0].is_string()); + }); + + // adjust type if type deduction is not wanted + if (not type_deduction) + { + // if array is wanted, do not create an object though possible + if (manual_type == value_t::array) + { + is_an_object = false; + } + + // if object is wanted but impossible, throw an exception + if (JSON_UNLIKELY(manual_type == value_t::object and not is_an_object)) + { + JSON_THROW(type_error::create(301, "cannot create object from initializer list")); + } + } + + if (is_an_object) + { + // the initializer list is a list of pairs -> create object + m_type = value_t::object; + m_value = value_t::object; + + std::for_each(init.begin(), init.end(), [this](const detail::json_ref<basic_json>& element_ref) + { + auto element = element_ref.moved_or_copied(); + m_value.object->emplace( + std::move(*((*element.m_value.array)[0].m_value.string)), + std::move((*element.m_value.array)[1])); + }); + } + else + { + // the initializer list describes an array -> create array + m_type = value_t::array; + m_value.array = create<array_t>(init.begin(), init.end()); + } + + assert_invariant(); + } + + /*! + @brief explicitly create an array from an initializer list + + Creates a JSON array value from a given initializer list. That is, given a + list of values `a, b, c`, creates the JSON value `[a, b, c]`. If the + initializer list is empty, the empty array `[]` is created. + + @note This function is only needed to express two edge cases that cannot + be realized with the initializer list constructor (@ref + basic_json(initializer_list_t, bool, value_t)). These cases + are: + 1. creating an array whose elements are all pairs whose first element is a + string -- in this case, the initializer list constructor would create an + object, taking the first elements as keys + 2. creating an empty array -- passing the empty initializer list to the + initializer list constructor yields an empty object + + @param[in] init initializer list with JSON values to create an array from + (optional) + + @return JSON array value + + @complexity Linear in the size of @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows an example for the `array` + function.,array} + + @sa @ref basic_json(initializer_list_t, bool, value_t) -- + create a JSON value from an initializer list + @sa @ref object(initializer_list_t) -- create a JSON object + value from an initializer list + + @since version 1.0.0 + */ + static basic_json array(initializer_list_t init = {}) + { + return basic_json(init, false, value_t::array); + } + + /*! + @brief explicitly create an object from an initializer list + + Creates a JSON object value from a given initializer list. The initializer + lists elements must be pairs, and their first elements must be strings. If + the initializer list is empty, the empty object `{}` is created. + + @note This function is only added for symmetry reasons. In contrast to the + related function @ref array(initializer_list_t), there are + no cases which can only be expressed by this function. That is, any + initializer list @a init can also be passed to the initializer list + constructor @ref basic_json(initializer_list_t, bool, value_t). + + @param[in] init initializer list to create an object from (optional) + + @return JSON object value + + @throw type_error.301 if @a init is not a list of pairs whose first + elements are strings. In this case, no object can be created. When such a + value is passed to @ref basic_json(initializer_list_t, bool, value_t), + an array would have been created from the passed initializer list @a init. + See example below. + + @complexity Linear in the size of @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows an example for the `object` + function.,object} + + @sa @ref basic_json(initializer_list_t, bool, value_t) -- + create a JSON value from an initializer list + @sa @ref array(initializer_list_t) -- create a JSON array + value from an initializer list + + @since version 1.0.0 + */ + static basic_json object(initializer_list_t init = {}) + { + return basic_json(init, false, value_t::object); + } + + /*! + @brief construct an array with count copies of given value + + Constructs a JSON array value by creating @a cnt copies of a passed value. + In case @a cnt is `0`, an empty array is created. + + @param[in] cnt the number of JSON copies of @a val to create + @param[in] val the JSON value to copy + + @post `std::distance(begin(),end()) == cnt` holds. + + @complexity Linear in @a cnt. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows examples for the @ref + basic_json(size_type\, const basic_json&) + constructor.,basic_json__size_type_basic_json} + + @since version 1.0.0 + */ + basic_json(size_type cnt, const basic_json& val) + : m_type(value_t::array) + { + m_value.array = create<array_t>(cnt, val); + assert_invariant(); + } + + /*! + @brief construct a JSON container given an iterator range + + Constructs the JSON value with the contents of the range `[first, last)`. + The semantics depends on the different types a JSON value can have: + - In case of a null type, invalid_iterator.206 is thrown. + - In case of other primitive types (number, boolean, or string), @a first + must be `begin()` and @a last must be `end()`. In this case, the value is + copied. Otherwise, invalid_iterator.204 is thrown. + - In case of structured types (array, object), the constructor behaves as + similar versions for `std::vector` or `std::map`; that is, a JSON array + or object is constructed from the values in the range. + + @tparam InputIT an input iterator type (@ref iterator or @ref + const_iterator) + + @param[in] first begin of the range to copy from (included) + @param[in] last end of the range to copy from (excluded) + + @pre Iterators @a first and @a last must be initialized. **This + precondition is enforced with an assertion (see warning).** If + assertions are switched off, a violation of this precondition yields + undefined behavior. + + @pre Range `[first, last)` is valid. Usually, this precondition cannot be + checked efficiently. Only certain edge cases are detected; see the + description of the exceptions below. A violation of this precondition + yields undefined behavior. + + @warning A precondition is enforced with a runtime assertion that will + result in calling `std::abort` if this precondition is not met. + Assertions can be disabled by defining `NDEBUG` at compile time. + See http://en.cppreference.com/w/cpp/error/assert for more + information. + + @throw invalid_iterator.201 if iterators @a first and @a last are not + compatible (i.e., do not belong to the same JSON value). In this case, + the range `[first, last)` is undefined. + @throw invalid_iterator.204 if iterators @a first and @a last belong to a + primitive type (number, boolean, or string), but @a first does not point + to the first element any more. In this case, the range `[first, last)` is + undefined. See example code below. + @throw invalid_iterator.206 if iterators @a first and @a last belong to a + null value. In this case, the range `[first, last)` is undefined. + + @complexity Linear in distance between @a first and @a last. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The example below shows several ways to create JSON values by + specifying a subrange with iterators.,basic_json__InputIt_InputIt} + + @since version 1.0.0 + */ + template<class InputIT, typename std::enable_if< + std::is_same<InputIT, typename basic_json_t::iterator>::value or + std::is_same<InputIT, typename basic_json_t::const_iterator>::value, int>::type = 0> + basic_json(InputIT first, InputIT last) + { + assert(first.m_object != nullptr); + assert(last.m_object != nullptr); + + // make sure iterator fits the current value + if (JSON_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(201, "iterators are not compatible")); + } + + // copy type from first iterator + m_type = first.m_object->m_type; + + // check if iterator range is complete for primitive values + switch (m_type) + { + case value_t::boolean: + case value_t::number_float: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::string: + { + if (JSON_UNLIKELY(not first.m_it.primitive_iterator.is_begin() + or not last.m_it.primitive_iterator.is_end())) + { + JSON_THROW(invalid_iterator::create(204, "iterators out of range")); + } + break; + } + + default: + break; + } + + switch (m_type) + { + case value_t::number_integer: + { + m_value.number_integer = first.m_object->m_value.number_integer; + break; + } + + case value_t::number_unsigned: + { + m_value.number_unsigned = first.m_object->m_value.number_unsigned; + break; + } + + case value_t::number_float: + { + m_value.number_float = first.m_object->m_value.number_float; + break; + } + + case value_t::boolean: + { + m_value.boolean = first.m_object->m_value.boolean; + break; + } + + case value_t::string: + { + m_value = *first.m_object->m_value.string; + break; + } + + case value_t::object: + { + m_value.object = create<object_t>(first.m_it.object_iterator, + last.m_it.object_iterator); + break; + } + + case value_t::array: + { + m_value.array = create<array_t>(first.m_it.array_iterator, + last.m_it.array_iterator); + break; + } + + default: + JSON_THROW(invalid_iterator::create(206, "cannot construct with iterators from " + + std::string(first.m_object->type_name()))); + } + + assert_invariant(); + } + + + /////////////////////////////////////// + // other constructors and destructor // + /////////////////////////////////////// + + /// @private + basic_json(const detail::json_ref<basic_json>& ref) + : basic_json(ref.moved_or_copied()) + {} + + /*! + @brief copy constructor + + Creates a copy of a given JSON value. + + @param[in] other the JSON value to copy + + @post `*this == other` + + @complexity Linear in the size of @a other. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is linear. + - As postcondition, it holds: `other == basic_json(other)`. + + @liveexample{The following code shows an example for the copy + constructor.,basic_json__basic_json} + + @since version 1.0.0 + */ + basic_json(const basic_json& other) + : m_type(other.m_type) + { + // check of passed value is valid + other.assert_invariant(); + + switch (m_type) + { + case value_t::object: + { + m_value = *other.m_value.object; + break; + } + + case value_t::array: + { + m_value = *other.m_value.array; + break; + } + + case value_t::string: + { + m_value = *other.m_value.string; + break; + } + + case value_t::boolean: + { + m_value = other.m_value.boolean; + break; + } + + case value_t::number_integer: + { + m_value = other.m_value.number_integer; + break; + } + + case value_t::number_unsigned: + { + m_value = other.m_value.number_unsigned; + break; + } + + case value_t::number_float: + { + m_value = other.m_value.number_float; + break; + } + + default: + break; + } + + assert_invariant(); + } + + /*! + @brief move constructor + + Move constructor. Constructs a JSON value with the contents of the given + value @a other using move semantics. It "steals" the resources from @a + other and leaves it as JSON null value. + + @param[in,out] other value to move to this object + + @post `*this` has the same value as @a other before the call. + @post @a other is a JSON null value. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this constructor never throws + exceptions. + + @requirement This function helps `basic_json` satisfying the + [MoveConstructible](http://en.cppreference.com/w/cpp/concept/MoveConstructible) + requirements. + + @liveexample{The code below shows the move constructor explicitly called + via std::move.,basic_json__moveconstructor} + + @since version 1.0.0 + */ + basic_json(basic_json&& other) noexcept + : m_type(std::move(other.m_type)), + m_value(std::move(other.m_value)) + { + // check that passed value is valid + other.assert_invariant(); + + // invalidate payload + other.m_type = value_t::null; + other.m_value = {}; + + assert_invariant(); + } + + /*! + @brief copy assignment + + Copy assignment operator. Copies a JSON value via the "copy and swap" + strategy: It is expressed in terms of the copy constructor, destructor, + and the `swap()` member function. + + @param[in] other value to copy from + + @complexity Linear. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is linear. + + @liveexample{The code below shows and example for the copy assignment. It + creates a copy of value `a` which is then swapped with `b`. Finally\, the + copy of `a` (which is the null value after the swap) is + destroyed.,basic_json__copyassignment} + + @since version 1.0.0 + */ + reference& operator=(basic_json other) noexcept ( + std::is_nothrow_move_constructible<value_t>::value and + std::is_nothrow_move_assignable<value_t>::value and + std::is_nothrow_move_constructible<json_value>::value and + std::is_nothrow_move_assignable<json_value>::value + ) + { + // check that passed value is valid + other.assert_invariant(); + + using std::swap; + swap(m_type, other.m_type); + swap(m_value, other.m_value); + + assert_invariant(); + return *this; + } + + /*! + @brief destructor + + Destroys the JSON value and frees all allocated memory. + + @complexity Linear. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is linear. + - All stored elements are destroyed and all memory is freed. + + @since version 1.0.0 + */ + ~basic_json() + { + assert_invariant(); + m_value.destroy(m_type); + } + + /// @} + + public: + /////////////////////// + // object inspection // + /////////////////////// + + /// @name object inspection + /// Functions to inspect the type of a JSON value. + /// @{ + + /*! + @brief serialization + + Serialization function for JSON values. The function tries to mimic + Python's `json.dumps()` function, and currently supports its @a indent + and @a ensure_ascii parameters. + + @param[in] indent If indent is nonnegative, then array elements and object + members will be pretty-printed with that indent level. An indent level of + `0` will only insert newlines. `-1` (the default) selects the most compact + representation. + @param[in] indent_char The character to use for indentation if @a indent is + greater than `0`. The default is ` ` (space). + @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters + in the output are escaped with `\uXXXX` sequences, and the result consists + of ASCII characters only. + + @return string containing the serialization of the JSON value + + @throw type_error.316 if a string stored inside the JSON value is not + UTF-8 encoded + + @complexity Linear. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @liveexample{The following example shows the effect of different @a indent\, + @a indent_char\, and @a ensure_ascii parameters to the result of the + serialization.,dump} + + @see https://docs.python.org/2/library/json.html#json.dump + + @since version 1.0.0; indentation character @a indent_char, option + @a ensure_ascii and exceptions added in version 3.0.0 + */ + string_t dump(const int indent = -1, const char indent_char = ' ', + const bool ensure_ascii = false) const + { + string_t result; + serializer s(detail::output_adapter<char>(result), indent_char); + + if (indent >= 0) + { + s.dump(*this, true, ensure_ascii, static_cast<unsigned int>(indent)); + } + else + { + s.dump(*this, false, ensure_ascii, 0); + } + + return result; + } + + /*! + @brief return the type of the JSON value (explicit) + + Return the type of the JSON value as a value from the @ref value_t + enumeration. + + @return the type of the JSON value + Value type | return value + ------------------------- | ------------------------- + null | value_t::null + boolean | value_t::boolean + string | value_t::string + number (integer) | value_t::number_integer + number (unsigned integer) | value_t::number_unsigned + number (floating-point) | value_t::number_float + object | value_t::object + array | value_t::array + discarded | value_t::discarded + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `type()` for all JSON + types.,type} + + @sa @ref operator value_t() -- return the type of the JSON value (implicit) + @sa @ref type_name() -- return the type as string + + @since version 1.0.0 + */ + constexpr value_t type() const noexcept + { + return m_type; + } + + /*! + @brief return whether type is primitive + + This function returns true if and only if the JSON type is primitive + (string, number, boolean, or null). + + @return `true` if type is primitive (string, number, boolean, or null), + `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_primitive()` for all JSON + types.,is_primitive} + + @sa @ref is_structured() -- returns whether JSON value is structured + @sa @ref is_null() -- returns whether JSON value is `null` + @sa @ref is_string() -- returns whether JSON value is a string + @sa @ref is_boolean() -- returns whether JSON value is a boolean + @sa @ref is_number() -- returns whether JSON value is a number + + @since version 1.0.0 + */ + constexpr bool is_primitive() const noexcept + { + return is_null() or is_string() or is_boolean() or is_number(); + } + + /*! + @brief return whether type is structured + + This function returns true if and only if the JSON type is structured + (array or object). + + @return `true` if type is structured (array or object), `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_structured()` for all JSON + types.,is_structured} + + @sa @ref is_primitive() -- returns whether value is primitive + @sa @ref is_array() -- returns whether value is an array + @sa @ref is_object() -- returns whether value is an object + + @since version 1.0.0 + */ + constexpr bool is_structured() const noexcept + { + return is_array() or is_object(); + } + + /*! + @brief return whether value is null + + This function returns true if and only if the JSON value is null. + + @return `true` if type is null, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_null()` for all JSON + types.,is_null} + + @since version 1.0.0 + */ + constexpr bool is_null() const noexcept + { + return (m_type == value_t::null); + } + + /*! + @brief return whether value is a boolean + + This function returns true if and only if the JSON value is a boolean. + + @return `true` if type is boolean, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_boolean()` for all JSON + types.,is_boolean} + + @since version 1.0.0 + */ + constexpr bool is_boolean() const noexcept + { + return (m_type == value_t::boolean); + } + + /*! + @brief return whether value is a number + + This function returns true if and only if the JSON value is a number. This + includes both integer (signed and unsigned) and floating-point values. + + @return `true` if type is number (regardless whether integer, unsigned + integer or floating-type), `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number()` for all JSON + types.,is_number} + + @sa @ref is_number_integer() -- check if value is an integer or unsigned + integer number + @sa @ref is_number_unsigned() -- check if value is an unsigned integer + number + @sa @ref is_number_float() -- check if value is a floating-point number + + @since version 1.0.0 + */ + constexpr bool is_number() const noexcept + { + return is_number_integer() or is_number_float(); + } + + /*! + @brief return whether value is an integer number + + This function returns true if and only if the JSON value is a signed or + unsigned integer number. This excludes floating-point values. + + @return `true` if type is an integer or unsigned integer number, `false` + otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number_integer()` for all + JSON types.,is_number_integer} + + @sa @ref is_number() -- check if value is a number + @sa @ref is_number_unsigned() -- check if value is an unsigned integer + number + @sa @ref is_number_float() -- check if value is a floating-point number + + @since version 1.0.0 + */ + constexpr bool is_number_integer() const noexcept + { + return (m_type == value_t::number_integer or m_type == value_t::number_unsigned); + } + + /*! + @brief return whether value is an unsigned integer number + + This function returns true if and only if the JSON value is an unsigned + integer number. This excludes floating-point and signed integer values. + + @return `true` if type is an unsigned integer number, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number_unsigned()` for all + JSON types.,is_number_unsigned} + + @sa @ref is_number() -- check if value is a number + @sa @ref is_number_integer() -- check if value is an integer or unsigned + integer number + @sa @ref is_number_float() -- check if value is a floating-point number + + @since version 2.0.0 + */ + constexpr bool is_number_unsigned() const noexcept + { + return (m_type == value_t::number_unsigned); + } + + /*! + @brief return whether value is a floating-point number + + This function returns true if and only if the JSON value is a + floating-point number. This excludes signed and unsigned integer values. + + @return `true` if type is a floating-point number, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number_float()` for all + JSON types.,is_number_float} + + @sa @ref is_number() -- check if value is number + @sa @ref is_number_integer() -- check if value is an integer number + @sa @ref is_number_unsigned() -- check if value is an unsigned integer + number + + @since version 1.0.0 + */ + constexpr bool is_number_float() const noexcept + { + return (m_type == value_t::number_float); + } + + /*! + @brief return whether value is an object + + This function returns true if and only if the JSON value is an object. + + @return `true` if type is object, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_object()` for all JSON + types.,is_object} + + @since version 1.0.0 + */ + constexpr bool is_object() const noexcept + { + return (m_type == value_t::object); + } + + /*! + @brief return whether value is an array + + This function returns true if and only if the JSON value is an array. + + @return `true` if type is array, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_array()` for all JSON + types.,is_array} + + @since version 1.0.0 + */ + constexpr bool is_array() const noexcept + { + return (m_type == value_t::array); + } + + /*! + @brief return whether value is a string + + This function returns true if and only if the JSON value is a string. + + @return `true` if type is string, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_string()` for all JSON + types.,is_string} + + @since version 1.0.0 + */ + constexpr bool is_string() const noexcept + { + return (m_type == value_t::string); + } + + /*! + @brief return whether value is discarded + + This function returns true if and only if the JSON value was discarded + during parsing with a callback function (see @ref parser_callback_t). + + @note This function will always be `false` for JSON values after parsing. + That is, discarded values can only occur during parsing, but will be + removed when inside a structured value or replaced by null in other cases. + + @return `true` if type is discarded, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_discarded()` for all JSON + types.,is_discarded} + + @since version 1.0.0 + */ + constexpr bool is_discarded() const noexcept + { + return (m_type == value_t::discarded); + } + + /*! + @brief return the type of the JSON value (implicit) + + Implicitly return the type of the JSON value as a value from the @ref + value_t enumeration. + + @return the type of the JSON value + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies the @ref value_t operator for + all JSON types.,operator__value_t} + + @sa @ref type() -- return the type of the JSON value (explicit) + @sa @ref type_name() -- return the type as string + + @since version 1.0.0 + */ + constexpr operator value_t() const noexcept + { + return m_type; + } + + /// @} + + private: + ////////////////// + // value access // + ////////////////// + + /// get a boolean (explicit) + boolean_t get_impl(boolean_t* /*unused*/) const + { + if (JSON_LIKELY(is_boolean())) + { + return m_value.boolean; + } + + JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(type_name()))); + } + + /// get a pointer to the value (object) + object_t* get_impl_ptr(object_t* /*unused*/) noexcept + { + return is_object() ? m_value.object : nullptr; + } + + /// get a pointer to the value (object) + constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const noexcept + { + return is_object() ? m_value.object : nullptr; + } + + /// get a pointer to the value (array) + array_t* get_impl_ptr(array_t* /*unused*/) noexcept + { + return is_array() ? m_value.array : nullptr; + } + + /// get a pointer to the value (array) + constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const noexcept + { + return is_array() ? m_value.array : nullptr; + } + + /// get a pointer to the value (string) + string_t* get_impl_ptr(string_t* /*unused*/) noexcept + { + return is_string() ? m_value.string : nullptr; + } + + /// get a pointer to the value (string) + constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const noexcept + { + return is_string() ? m_value.string : nullptr; + } + + /// get a pointer to the value (boolean) + boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept + { + return is_boolean() ? &m_value.boolean : nullptr; + } + + /// get a pointer to the value (boolean) + constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) const noexcept + { + return is_boolean() ? &m_value.boolean : nullptr; + } + + /// get a pointer to the value (integer number) + number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept + { + return is_number_integer() ? &m_value.number_integer : nullptr; + } + + /// get a pointer to the value (integer number) + constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept + { + return is_number_integer() ? &m_value.number_integer : nullptr; + } + + /// get a pointer to the value (unsigned number) + number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept + { + return is_number_unsigned() ? &m_value.number_unsigned : nullptr; + } + + /// get a pointer to the value (unsigned number) + constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t* /*unused*/) const noexcept + { + return is_number_unsigned() ? &m_value.number_unsigned : nullptr; + } + + /// get a pointer to the value (floating-point number) + number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept + { + return is_number_float() ? &m_value.number_float : nullptr; + } + + /// get a pointer to the value (floating-point number) + constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unused*/) const noexcept + { + return is_number_float() ? &m_value.number_float : nullptr; + } + + /*! + @brief helper function to implement get_ref() + + This function helps to implement get_ref() without code duplication for + const and non-const overloads + + @tparam ThisType will be deduced as `basic_json` or `const basic_json` + + @throw type_error.303 if ReferenceType does not match underlying value + type of the current JSON + */ + template<typename ReferenceType, typename ThisType> + static ReferenceType get_ref_impl(ThisType& obj) + { + // delegate the call to get_ptr<>() + auto ptr = obj.template get_ptr<typename std::add_pointer<ReferenceType>::type>(); + + if (JSON_LIKELY(ptr != nullptr)) + { + return *ptr; + } + + JSON_THROW(type_error::create(303, "incompatible ReferenceType for get_ref, actual type is " + std::string(obj.type_name()))); + } + + public: + /// @name value access + /// Direct access to the stored value of a JSON value. + /// @{ + + /*! + @brief get special-case overload + + This overloads avoids a lot of template boilerplate, it can be seen as the + identity method + + @tparam BasicJsonType == @ref basic_json + + @return a copy of *this + + @complexity Constant. + + @since version 2.1.0 + */ + template<typename BasicJsonType, detail::enable_if_t< + std::is_same<typename std::remove_const<BasicJsonType>::type, basic_json_t>::value, + int> = 0> + basic_json get() const + { + return *this; + } + + /*! + @brief get a value (explicit) + + Explicit type conversion between the JSON value and a compatible value + which is [CopyConstructible](http://en.cppreference.com/w/cpp/concept/CopyConstructible) + and [DefaultConstructible](http://en.cppreference.com/w/cpp/concept/DefaultConstructible). + The value is converted by calling the @ref json_serializer<ValueType> + `from_json()` method. + + The function is equivalent to executing + @code {.cpp} + ValueType ret; + JSONSerializer<ValueType>::from_json(*this, ret); + return ret; + @endcode + + This overloads is chosen if: + - @a ValueType is not @ref basic_json, + - @ref json_serializer<ValueType> has a `from_json()` method of the form + `void from_json(const basic_json&, ValueType&)`, and + - @ref json_serializer<ValueType> does not have a `from_json()` method of + the form `ValueType from_json(const basic_json&)` + + @tparam ValueTypeCV the provided value type + @tparam ValueType the returned value type + + @return copy of the JSON value, converted to @a ValueType + + @throw what @ref json_serializer<ValueType> `from_json()` method throws + + @liveexample{The example below shows several conversions from JSON values + to other types. There a few things to note: (1) Floating-point numbers can + be converted to integers\, (2) A JSON array can be converted to a standard + `std::vector<short>`\, (3) A JSON object can be converted to C++ + associative containers such as `std::unordered_map<std::string\, + json>`.,get__ValueType_const} + + @since version 2.1.0 + */ + template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>, + detail::enable_if_t < + not std::is_same<basic_json_t, ValueType>::value and + detail::has_from_json<basic_json_t, ValueType>::value and + not detail::has_non_default_from_json<basic_json_t, ValueType>::value, + int> = 0> + ValueType get() const noexcept(noexcept( + JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), std::declval<ValueType&>()))) + { + // we cannot static_assert on ValueTypeCV being non-const, because + // there is support for get<const basic_json_t>(), which is why we + // still need the uncvref + static_assert(not std::is_reference<ValueTypeCV>::value, + "get() cannot be used with reference types, you might want to use get_ref()"); + static_assert(std::is_default_constructible<ValueType>::value, + "types must be DefaultConstructible when used with get()"); + + ValueType ret; + JSONSerializer<ValueType>::from_json(*this, ret); + return ret; + } + + /*! + @brief get a value (explicit); special case + + Explicit type conversion between the JSON value and a compatible value + which is **not** [CopyConstructible](http://en.cppreference.com/w/cpp/concept/CopyConstructible) + and **not** [DefaultConstructible](http://en.cppreference.com/w/cpp/concept/DefaultConstructible). + The value is converted by calling the @ref json_serializer<ValueType> + `from_json()` method. + + The function is equivalent to executing + @code {.cpp} + return JSONSerializer<ValueTypeCV>::from_json(*this); + @endcode + + This overloads is chosen if: + - @a ValueType is not @ref basic_json and + - @ref json_serializer<ValueType> has a `from_json()` method of the form + `ValueType from_json(const basic_json&)` + + @note If @ref json_serializer<ValueType> has both overloads of + `from_json()`, this one is chosen. + + @tparam ValueTypeCV the provided value type + @tparam ValueType the returned value type + + @return copy of the JSON value, converted to @a ValueType + + @throw what @ref json_serializer<ValueType> `from_json()` method throws + + @since version 2.1.0 + */ + template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>, + detail::enable_if_t<not std::is_same<basic_json_t, ValueType>::value and + detail::has_non_default_from_json<basic_json_t, ValueType>::value, + int> = 0> + ValueType get() const noexcept(noexcept( + JSONSerializer<ValueTypeCV>::from_json(std::declval<const basic_json_t&>()))) + { + static_assert(not std::is_reference<ValueTypeCV>::value, + "get() cannot be used with reference types, you might want to use get_ref()"); + return JSONSerializer<ValueTypeCV>::from_json(*this); + } + + /*! + @brief get a pointer value (explicit) + + Explicit pointer access to the internally stored JSON value. No copies are + made. + + @warning The pointer becomes invalid if the underlying JSON object + changes. + + @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref + object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, + @ref number_unsigned_t, or @ref number_float_t. + + @return pointer to the internally stored JSON value if the requested + pointer type @a PointerType fits to the JSON value; `nullptr` otherwise + + @complexity Constant. + + @liveexample{The example below shows how pointers to internal values of a + JSON value can be requested. Note that no type conversions are made and a + `nullptr` is returned if the value and the requested pointer type does not + match.,get__PointerType} + + @sa @ref get_ptr() for explicit pointer-member access + + @since version 1.0.0 + */ + template<typename PointerType, typename std::enable_if< + std::is_pointer<PointerType>::value, int>::type = 0> + PointerType get() noexcept + { + // delegate the call to get_ptr + return get_ptr<PointerType>(); + } + + /*! + @brief get a pointer value (explicit) + @copydoc get() + */ + template<typename PointerType, typename std::enable_if< + std::is_pointer<PointerType>::value, int>::type = 0> + constexpr const PointerType get() const noexcept + { + // delegate the call to get_ptr + return get_ptr<PointerType>(); + } + + /*! + @brief get a pointer value (implicit) + + Implicit pointer access to the internally stored JSON value. No copies are + made. + + @warning Writing data to the pointee of the result yields an undefined + state. + + @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref + object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, + @ref number_unsigned_t, or @ref number_float_t. Enforced by a static + assertion. + + @return pointer to the internally stored JSON value if the requested + pointer type @a PointerType fits to the JSON value; `nullptr` otherwise + + @complexity Constant. + + @liveexample{The example below shows how pointers to internal values of a + JSON value can be requested. Note that no type conversions are made and a + `nullptr` is returned if the value and the requested pointer type does not + match.,get_ptr} + + @since version 1.0.0 + */ + template<typename PointerType, typename std::enable_if< + std::is_pointer<PointerType>::value, int>::type = 0> + PointerType get_ptr() noexcept + { + // get the type of the PointerType (remove pointer and const) + using pointee_t = typename std::remove_const<typename + std::remove_pointer<typename + std::remove_const<PointerType>::type>::type>::type; + // make sure the type matches the allowed types + static_assert( + std::is_same<object_t, pointee_t>::value + or std::is_same<array_t, pointee_t>::value + or std::is_same<string_t, pointee_t>::value + or std::is_same<boolean_t, pointee_t>::value + or std::is_same<number_integer_t, pointee_t>::value + or std::is_same<number_unsigned_t, pointee_t>::value + or std::is_same<number_float_t, pointee_t>::value + , "incompatible pointer type"); + + // delegate the call to get_impl_ptr<>() + return get_impl_ptr(static_cast<PointerType>(nullptr)); + } + + /*! + @brief get a pointer value (implicit) + @copydoc get_ptr() + */ + template<typename PointerType, typename std::enable_if< + std::is_pointer<PointerType>::value and + std::is_const<typename std::remove_pointer<PointerType>::type>::value, int>::type = 0> + constexpr const PointerType get_ptr() const noexcept + { + // get the type of the PointerType (remove pointer and const) + using pointee_t = typename std::remove_const<typename + std::remove_pointer<typename + std::remove_const<PointerType>::type>::type>::type; + // make sure the type matches the allowed types + static_assert( + std::is_same<object_t, pointee_t>::value + or std::is_same<array_t, pointee_t>::value + or std::is_same<string_t, pointee_t>::value + or std::is_same<boolean_t, pointee_t>::value + or std::is_same<number_integer_t, pointee_t>::value + or std::is_same<number_unsigned_t, pointee_t>::value + or std::is_same<number_float_t, pointee_t>::value + , "incompatible pointer type"); + + // delegate the call to get_impl_ptr<>() const + return get_impl_ptr(static_cast<PointerType>(nullptr)); + } + + /*! + @brief get a reference value (implicit) + + Implicit reference access to the internally stored JSON value. No copies + are made. + + @warning Writing data to the referee of the result yields an undefined + state. + + @tparam ReferenceType reference type; must be a reference to @ref array_t, + @ref object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, or + @ref number_float_t. Enforced by static assertion. + + @return reference to the internally stored JSON value if the requested + reference type @a ReferenceType fits to the JSON value; throws + type_error.303 otherwise + + @throw type_error.303 in case passed type @a ReferenceType is incompatible + with the stored JSON value; see example below + + @complexity Constant. + + @liveexample{The example shows several calls to `get_ref()`.,get_ref} + + @since version 1.1.0 + */ + template<typename ReferenceType, typename std::enable_if< + std::is_reference<ReferenceType>::value, int>::type = 0> + ReferenceType get_ref() + { + // delegate call to get_ref_impl + return get_ref_impl<ReferenceType>(*this); + } + + /*! + @brief get a reference value (implicit) + @copydoc get_ref() + */ + template<typename ReferenceType, typename std::enable_if< + std::is_reference<ReferenceType>::value and + std::is_const<typename std::remove_reference<ReferenceType>::type>::value, int>::type = 0> + ReferenceType get_ref() const + { + // delegate call to get_ref_impl + return get_ref_impl<ReferenceType>(*this); + } + + /*! + @brief get a value (implicit) + + Implicit type conversion between the JSON value and a compatible value. + The call is realized by calling @ref get() const. + + @tparam ValueType non-pointer type compatible to the JSON value, for + instance `int` for JSON integer numbers, `bool` for JSON booleans, or + `std::vector` types for JSON arrays. The character type of @ref string_t + as well as an initializer list of this type is excluded to avoid + ambiguities as these types implicitly convert to `std::string`. + + @return copy of the JSON value, converted to type @a ValueType + + @throw type_error.302 in case passed type @a ValueType is incompatible + to the JSON value type (e.g., the JSON value is of type boolean, but a + string is requested); see example below + + @complexity Linear in the size of the JSON value. + + @liveexample{The example below shows several conversions from JSON values + to other types. There a few things to note: (1) Floating-point numbers can + be converted to integers\, (2) A JSON array can be converted to a standard + `std::vector<short>`\, (3) A JSON object can be converted to C++ + associative containers such as `std::unordered_map<std::string\, + json>`.,operator__ValueType} + + @since version 1.0.0 + */ + template < typename ValueType, typename std::enable_if < + not std::is_pointer<ValueType>::value and + not std::is_same<ValueType, detail::json_ref<basic_json>>::value and + not std::is_same<ValueType, typename string_t::value_type>::value +#ifndef _MSC_VER // fix for issue #167 operator<< ambiguity under VS2015 + and not std::is_same<ValueType, std::initializer_list<typename string_t::value_type>>::value +#endif +#if defined(JSON_HAS_CPP_17) + and not std::is_same<ValueType, typename std::string_view>::value +#endif + , int >::type = 0 > + operator ValueType() const + { + // delegate the call to get<>() const + return get<ValueType>(); + } + + /// @} + + + //////////////////// + // element access // + //////////////////// + + /// @name element access + /// Access to the JSON value. + /// @{ + + /*! + @brief access specified array element with bounds checking + + Returns a reference to the element at specified location @a idx, with + bounds checking. + + @param[in] idx index of the element to access + + @return reference to the element at index @a idx + + @throw type_error.304 if the JSON value is not an array; in this case, + calling `at` with an index makes no sense. See example below. + @throw out_of_range.401 if the index @a idx is out of range of the array; + that is, `idx >= size()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 1.0.0 + + @liveexample{The example below shows how array elements can be read and + written using `at()`. It also demonstrates the different exceptions that + can be thrown.,at__size_type} + */ + reference at(size_type idx) + { + // at only works for arrays + if (JSON_LIKELY(is_array())) + { + JSON_TRY + { + return m_value.array->at(idx); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range")); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()))); + } + } + + /*! + @brief access specified array element with bounds checking + + Returns a const reference to the element at specified location @a idx, + with bounds checking. + + @param[in] idx index of the element to access + + @return const reference to the element at index @a idx + + @throw type_error.304 if the JSON value is not an array; in this case, + calling `at` with an index makes no sense. See example below. + @throw out_of_range.401 if the index @a idx is out of range of the array; + that is, `idx >= size()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 1.0.0 + + @liveexample{The example below shows how array elements can be read using + `at()`. It also demonstrates the different exceptions that can be thrown., + at__size_type_const} + */ + const_reference at(size_type idx) const + { + // at only works for arrays + if (JSON_LIKELY(is_array())) + { + JSON_TRY + { + return m_value.array->at(idx); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range")); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()))); + } + } + + /*! + @brief access specified object element with bounds checking + + Returns a reference to the element at with specified key @a key, with + bounds checking. + + @param[in] key key of the element to access + + @return reference to the element at key @a key + + @throw type_error.304 if the JSON value is not an object; in this case, + calling `at` with a key makes no sense. See example below. + @throw out_of_range.403 if the key @a key is is not stored in the object; + that is, `find(key) == end()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Logarithmic in the size of the container. + + @sa @ref operator[](const typename object_t::key_type&) for unchecked + access by reference + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + + @liveexample{The example below shows how object elements can be read and + written using `at()`. It also demonstrates the different exceptions that + can be thrown.,at__object_t_key_type} + */ + reference at(const typename object_t::key_type& key) + { + // at only works for objects + if (JSON_LIKELY(is_object())) + { + JSON_TRY + { + return m_value.object->at(key); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(403, "key '" + key + "' not found")); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()))); + } + } + + /*! + @brief access specified object element with bounds checking + + Returns a const reference to the element at with specified key @a key, + with bounds checking. + + @param[in] key key of the element to access + + @return const reference to the element at key @a key + + @throw type_error.304 if the JSON value is not an object; in this case, + calling `at` with a key makes no sense. See example below. + @throw out_of_range.403 if the key @a key is is not stored in the object; + that is, `find(key) == end()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Logarithmic in the size of the container. + + @sa @ref operator[](const typename object_t::key_type&) for unchecked + access by reference + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + + @liveexample{The example below shows how object elements can be read using + `at()`. It also demonstrates the different exceptions that can be thrown., + at__object_t_key_type_const} + */ + const_reference at(const typename object_t::key_type& key) const + { + // at only works for objects + if (JSON_LIKELY(is_object())) + { + JSON_TRY + { + return m_value.object->at(key); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(403, "key '" + key + "' not found")); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()))); + } + } + + /*! + @brief access specified array element + + Returns a reference to the element at specified location @a idx. + + @note If @a idx is beyond the range of the array (i.e., `idx >= size()`), + then the array is silently filled up with `null` values to make `idx` a + valid reference to the last stored element. + + @param[in] idx index of the element to access + + @return reference to the element at index @a idx + + @throw type_error.305 if the JSON value is not an array or null; in that + cases, using the [] operator with an index makes no sense. + + @complexity Constant if @a idx is in the range of the array. Otherwise + linear in `idx - size()`. + + @liveexample{The example below shows how array elements can be read and + written using `[]` operator. Note the addition of `null` + values.,operatorarray__size_type} + + @since version 1.0.0 + */ + reference operator[](size_type idx) + { + // implicitly convert null value to an empty array + if (is_null()) + { + m_type = value_t::array; + m_value.array = create<array_t>(); + assert_invariant(); + } + + // operator[] only works for arrays + if (JSON_LIKELY(is_array())) + { + // fill up array with null values if given idx is outside range + if (idx >= m_value.array->size()) + { + m_value.array->insert(m_value.array->end(), + idx - m_value.array->size() + 1, + basic_json()); + } + + return m_value.array->operator[](idx); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with " + std::string(type_name()))); + } + + /*! + @brief access specified array element + + Returns a const reference to the element at specified location @a idx. + + @param[in] idx index of the element to access + + @return const reference to the element at index @a idx + + @throw type_error.305 if the JSON value is not an array; in that case, + using the [] operator with an index makes no sense. + + @complexity Constant. + + @liveexample{The example below shows how array elements can be read using + the `[]` operator.,operatorarray__size_type_const} + + @since version 1.0.0 + */ + const_reference operator[](size_type idx) const + { + // const operator[] only works for arrays + if (JSON_LIKELY(is_array())) + { + return m_value.array->operator[](idx); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with " + std::string(type_name()))); + } + + /*! + @brief access specified object element + + Returns a reference to the element at with specified key @a key. + + @note If @a key is not found in the object, then it is silently added to + the object and filled with a `null` value to make `key` a valid reference. + In case the value was `null` before, it is converted to an object. + + @param[in] key key of the element to access + + @return reference to the element at key @a key + + @throw type_error.305 if the JSON value is not an object or null; in that + cases, using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read and + written using the `[]` operator.,operatorarray__key_type} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + */ + reference operator[](const typename object_t::key_type& key) + { + // implicitly convert null value to an empty object + if (is_null()) + { + m_type = value_t::object; + m_value.object = create<object_t>(); + assert_invariant(); + } + + // operator[] only works for objects + if (JSON_LIKELY(is_object())) + { + return m_value.object->operator[](key); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with " + std::string(type_name()))); + } + + /*! + @brief read-only access specified object element + + Returns a const reference to the element at with specified key @a key. No + bounds checking is performed. + + @warning If the element with key @a key does not exist, the behavior is + undefined. + + @param[in] key key of the element to access + + @return const reference to the element at key @a key + + @pre The element with key @a key must exist. **This precondition is + enforced with an assertion.** + + @throw type_error.305 if the JSON value is not an object; in that case, + using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read using + the `[]` operator.,operatorarray__key_type_const} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + */ + const_reference operator[](const typename object_t::key_type& key) const + { + // const operator[] only works for objects + if (JSON_LIKELY(is_object())) + { + assert(m_value.object->find(key) != m_value.object->end()); + return m_value.object->find(key)->second; + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with " + std::string(type_name()))); + } + + /*! + @brief access specified object element + + Returns a reference to the element at with specified key @a key. + + @note If @a key is not found in the object, then it is silently added to + the object and filled with a `null` value to make `key` a valid reference. + In case the value was `null` before, it is converted to an object. + + @param[in] key key of the element to access + + @return reference to the element at key @a key + + @throw type_error.305 if the JSON value is not an object or null; in that + cases, using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read and + written using the `[]` operator.,operatorarray__key_type} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.1.0 + */ + template<typename T> + reference operator[](T* key) + { + // implicitly convert null to object + if (is_null()) + { + m_type = value_t::object; + m_value = value_t::object; + assert_invariant(); + } + + // at only works for objects + if (JSON_LIKELY(is_object())) + { + return m_value.object->operator[](key); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with " + std::string(type_name()))); + } + + /*! + @brief read-only access specified object element + + Returns a const reference to the element at with specified key @a key. No + bounds checking is performed. + + @warning If the element with key @a key does not exist, the behavior is + undefined. + + @param[in] key key of the element to access + + @return const reference to the element at key @a key + + @pre The element with key @a key must exist. **This precondition is + enforced with an assertion.** + + @throw type_error.305 if the JSON value is not an object; in that case, + using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read using + the `[]` operator.,operatorarray__key_type_const} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.1.0 + */ + template<typename T> + const_reference operator[](T* key) const + { + // at only works for objects + if (JSON_LIKELY(is_object())) + { + assert(m_value.object->find(key) != m_value.object->end()); + return m_value.object->find(key)->second; + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with " + std::string(type_name()))); + } + + /*! + @brief access specified object element with default value + + Returns either a copy of an object's element at the specified key @a key + or a given default value if no element with key @a key exists. + + The function is basically equivalent to executing + @code {.cpp} + try { + return at(key); + } catch(out_of_range) { + return default_value; + } + @endcode + + @note Unlike @ref at(const typename object_t::key_type&), this function + does not throw if the given key @a key was not found. + + @note Unlike @ref operator[](const typename object_t::key_type& key), this + function does not implicitly add an element to the position defined by @a + key. This function is furthermore also applicable to const objects. + + @param[in] key key of the element to access + @param[in] default_value the value to return if @a key is not found + + @tparam ValueType type compatible to JSON values, for instance `int` for + JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for + JSON arrays. Note the type of the expected value at @a key and the default + value @a default_value must be compatible. + + @return copy of the element at key @a key or @a default_value if @a key + is not found + + @throw type_error.306 if the JSON value is not an object; in that case, + using `value()` with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be queried + with a default value.,basic_json__value} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref operator[](const typename object_t::key_type&) for unchecked + access by reference + + @since version 1.0.0 + */ + template<class ValueType, typename std::enable_if< + std::is_convertible<basic_json_t, ValueType>::value, int>::type = 0> + ValueType value(const typename object_t::key_type& key, const ValueType& default_value) const + { + // at only works for objects + if (JSON_LIKELY(is_object())) + { + // if key is found, return value and given default value otherwise + const auto it = find(key); + if (it != end()) + { + return *it; + } + + return default_value; + } + + JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name()))); + } + + /*! + @brief overload for a default value of type const char* + @copydoc basic_json::value(const typename object_t::key_type&, ValueType) const + */ + string_t value(const typename object_t::key_type& key, const char* default_value) const + { + return value(key, string_t(default_value)); + } + + /*! + @brief access specified object element via JSON Pointer with default value + + Returns either a copy of an object's element at the specified key @a key + or a given default value if no element with key @a key exists. + + The function is basically equivalent to executing + @code {.cpp} + try { + return at(ptr); + } catch(out_of_range) { + return default_value; + } + @endcode + + @note Unlike @ref at(const json_pointer&), this function does not throw + if the given key @a key was not found. + + @param[in] ptr a JSON pointer to the element to access + @param[in] default_value the value to return if @a ptr found no value + + @tparam ValueType type compatible to JSON values, for instance `int` for + JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for + JSON arrays. Note the type of the expected value at @a key and the default + value @a default_value must be compatible. + + @return copy of the element at key @a key or @a default_value if @a key + is not found + + @throw type_error.306 if the JSON value is not an objec; in that case, + using `value()` with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be queried + with a default value.,basic_json__value_ptr} + + @sa @ref operator[](const json_pointer&) for unchecked access by reference + + @since version 2.0.2 + */ + template<class ValueType, typename std::enable_if< + std::is_convertible<basic_json_t, ValueType>::value, int>::type = 0> + ValueType value(const json_pointer& ptr, const ValueType& default_value) const + { + // at only works for objects + if (JSON_LIKELY(is_object())) + { + // if pointer resolves a value, return it or use default value + JSON_TRY + { + return ptr.get_checked(this); + } + JSON_CATCH (out_of_range&) + { + return default_value; + } + } + + JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name()))); + } + + /*! + @brief overload for a default value of type const char* + @copydoc basic_json::value(const json_pointer&, ValueType) const + */ + string_t value(const json_pointer& ptr, const char* default_value) const + { + return value(ptr, string_t(default_value)); + } + + /*! + @brief access the first element + + Returns a reference to the first element in the container. For a JSON + container `c`, the expression `c.front()` is equivalent to `*c.begin()`. + + @return In case of a structured type (array or object), a reference to the + first element is returned. In case of number, string, or boolean values, a + reference to the value is returned. + + @complexity Constant. + + @pre The JSON value must not be `null` (would throw `std::out_of_range`) + or an empty array or object (undefined behavior, **guarded by + assertions**). + @post The JSON value remains unchanged. + + @throw invalid_iterator.214 when called on `null` value + + @liveexample{The following code shows an example for `front()`.,front} + + @sa @ref back() -- access the last element + + @since version 1.0.0 + */ + reference front() + { + return *begin(); + } + + /*! + @copydoc basic_json::front() + */ + const_reference front() const + { + return *cbegin(); + } + + /*! + @brief access the last element + + Returns a reference to the last element in the container. For a JSON + container `c`, the expression `c.back()` is equivalent to + @code {.cpp} + auto tmp = c.end(); + --tmp; + return *tmp; + @endcode + + @return In case of a structured type (array or object), a reference to the + last element is returned. In case of number, string, or boolean values, a + reference to the value is returned. + + @complexity Constant. + + @pre The JSON value must not be `null` (would throw `std::out_of_range`) + or an empty array or object (undefined behavior, **guarded by + assertions**). + @post The JSON value remains unchanged. + + @throw invalid_iterator.214 when called on a `null` value. See example + below. + + @liveexample{The following code shows an example for `back()`.,back} + + @sa @ref front() -- access the first element + + @since version 1.0.0 + */ + reference back() + { + auto tmp = end(); + --tmp; + return *tmp; + } + + /*! + @copydoc basic_json::back() + */ + const_reference back() const + { + auto tmp = cend(); + --tmp; + return *tmp; + } + + /*! + @brief remove element given an iterator + + Removes the element specified by iterator @a pos. The iterator @a pos must + be valid and dereferenceable. Thus the `end()` iterator (which is valid, + but is not dereferenceable) cannot be used as a value for @a pos. + + If called on a primitive type other than `null`, the resulting JSON value + will be `null`. + + @param[in] pos iterator to the element to remove + @return Iterator following the last removed element. If the iterator @a + pos refers to the last element, the `end()` iterator is returned. + + @tparam IteratorType an @ref iterator or @ref const_iterator + + @post Invalidates iterators and references at or after the point of the + erase, including the `end()` iterator. + + @throw type_error.307 if called on a `null` value; example: `"cannot use + erase() with null"` + @throw invalid_iterator.202 if called on an iterator which does not belong + to the current JSON value; example: `"iterator does not fit current + value"` + @throw invalid_iterator.205 if called on a primitive type with invalid + iterator (i.e., any iterator which is not `begin()`); example: `"iterator + out of range"` + + @complexity The complexity depends on the type: + - objects: amortized constant + - arrays: linear in distance between @a pos and the end of the container + - strings: linear in the length of the string + - other types: constant + + @liveexample{The example shows the result of `erase()` for different JSON + types.,erase__IteratorType} + + @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + the given range + @sa @ref erase(const typename object_t::key_type&) -- removes the element + from an object at the given key + @sa @ref erase(const size_type) -- removes the element from an array at + the given index + + @since version 1.0.0 + */ + template<class IteratorType, typename std::enable_if< + std::is_same<IteratorType, typename basic_json_t::iterator>::value or + std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int>::type + = 0> + IteratorType erase(IteratorType pos) + { + // make sure iterator fits the current value + if (JSON_UNLIKELY(this != pos.m_object)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value")); + } + + IteratorType result = end(); + + switch (m_type) + { + case value_t::boolean: + case value_t::number_float: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::string: + { + if (JSON_UNLIKELY(not pos.m_it.primitive_iterator.is_begin())) + { + JSON_THROW(invalid_iterator::create(205, "iterator out of range")); + } + + if (is_string()) + { + AllocatorType<string_t> alloc; + std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1); + m_value.string = nullptr; + } + + m_type = value_t::null; + assert_invariant(); + break; + } + + case value_t::object: + { + result.m_it.object_iterator = m_value.object->erase(pos.m_it.object_iterator); + break; + } + + case value_t::array: + { + result.m_it.array_iterator = m_value.array->erase(pos.m_it.array_iterator); + break; + } + + default: + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()))); + } + + return result; + } + + /*! + @brief remove elements given an iterator range + + Removes the element specified by the range `[first; last)`. The iterator + @a first does not need to be dereferenceable if `first == last`: erasing + an empty range is a no-op. + + If called on a primitive type other than `null`, the resulting JSON value + will be `null`. + + @param[in] first iterator to the beginning of the range to remove + @param[in] last iterator past the end of the range to remove + @return Iterator following the last removed element. If the iterator @a + second refers to the last element, the `end()` iterator is returned. + + @tparam IteratorType an @ref iterator or @ref const_iterator + + @post Invalidates iterators and references at or after the point of the + erase, including the `end()` iterator. + + @throw type_error.307 if called on a `null` value; example: `"cannot use + erase() with null"` + @throw invalid_iterator.203 if called on iterators which does not belong + to the current JSON value; example: `"iterators do not fit current value"` + @throw invalid_iterator.204 if called on a primitive type with invalid + iterators (i.e., if `first != begin()` and `last != end()`); example: + `"iterators out of range"` + + @complexity The complexity depends on the type: + - objects: `log(size()) + std::distance(first, last)` + - arrays: linear in the distance between @a first and @a last, plus linear + in the distance between @a last and end of the container + - strings: linear in the length of the string + - other types: constant + + @liveexample{The example shows the result of `erase()` for different JSON + types.,erase__IteratorType_IteratorType} + + @sa @ref erase(IteratorType) -- removes the element at a given position + @sa @ref erase(const typename object_t::key_type&) -- removes the element + from an object at the given key + @sa @ref erase(const size_type) -- removes the element from an array at + the given index + + @since version 1.0.0 + */ + template<class IteratorType, typename std::enable_if< + std::is_same<IteratorType, typename basic_json_t::iterator>::value or + std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int>::type + = 0> + IteratorType erase(IteratorType first, IteratorType last) + { + // make sure iterator fits the current value + if (JSON_UNLIKELY(this != first.m_object or this != last.m_object)) + { + JSON_THROW(invalid_iterator::create(203, "iterators do not fit current value")); + } + + IteratorType result = end(); + + switch (m_type) + { + case value_t::boolean: + case value_t::number_float: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::string: + { + if (JSON_LIKELY(not first.m_it.primitive_iterator.is_begin() + or not last.m_it.primitive_iterator.is_end())) + { + JSON_THROW(invalid_iterator::create(204, "iterators out of range")); + } + + if (is_string()) + { + AllocatorType<string_t> alloc; + std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string); + std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1); + m_value.string = nullptr; + } + + m_type = value_t::null; + assert_invariant(); + break; + } + + case value_t::object: + { + result.m_it.object_iterator = m_value.object->erase(first.m_it.object_iterator, + last.m_it.object_iterator); + break; + } + + case value_t::array: + { + result.m_it.array_iterator = m_value.array->erase(first.m_it.array_iterator, + last.m_it.array_iterator); + break; + } + + default: + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()))); + } + + return result; + } + + /*! + @brief remove element from a JSON object given a key + + Removes elements from a JSON object with the key value @a key. + + @param[in] key value of the elements to remove + + @return Number of elements removed. If @a ObjectType is the default + `std::map` type, the return value will always be `0` (@a key was not + found) or `1` (@a key was found). + + @post References and iterators to the erased elements are invalidated. + Other references and iterators are not affected. + + @throw type_error.307 when called on a type other than JSON object; + example: `"cannot use erase() with null"` + + @complexity `log(size()) + count(key)` + + @liveexample{The example shows the effect of `erase()`.,erase__key_type} + + @sa @ref erase(IteratorType) -- removes the element at a given position + @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + the given range + @sa @ref erase(const size_type) -- removes the element from an array at + the given index + + @since version 1.0.0 + */ + size_type erase(const typename object_t::key_type& key) + { + // this erase only works for objects + if (JSON_LIKELY(is_object())) + { + return m_value.object->erase(key); + } + + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()))); + } + + /*! + @brief remove element from a JSON array given an index + + Removes element from a JSON array at the index @a idx. + + @param[in] idx index of the element to remove + + @throw type_error.307 when called on a type other than JSON object; + example: `"cannot use erase() with null"` + @throw out_of_range.401 when `idx >= size()`; example: `"array index 17 + is out of range"` + + @complexity Linear in distance between @a idx and the end of the container. + + @liveexample{The example shows the effect of `erase()`.,erase__size_type} + + @sa @ref erase(IteratorType) -- removes the element at a given position + @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + the given range + @sa @ref erase(const typename object_t::key_type&) -- removes the element + from an object at the given key + + @since version 1.0.0 + */ + void erase(const size_type idx) + { + // this erase only works for arrays + if (JSON_LIKELY(is_array())) + { + if (JSON_UNLIKELY(idx >= size())) + { + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range")); + } + + m_value.array->erase(m_value.array->begin() + static_cast<difference_type>(idx)); + } + else + { + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()))); + } + } + + /// @} + + + //////////// + // lookup // + //////////// + + /// @name lookup + /// @{ + + /*! + @brief find an element in a JSON object + + Finds an element in a JSON object with key equivalent to @a key. If the + element is not found or the JSON value is not an object, end() is + returned. + + @note This method always returns @ref end() when executed on a JSON type + that is not an object. + + @param[in] key key value of the element to search for. + + @return Iterator to an element with key equivalent to @a key. If no such + element is found or the JSON value is not an object, past-the-end (see + @ref end()) iterator is returned. + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The example shows how `find()` is used.,find__key_type} + + @since version 1.0.0 + */ + template<typename KeyT> + iterator find(KeyT&& key) + { + auto result = end(); + + if (is_object()) + { + result.m_it.object_iterator = m_value.object->find(std::forward<KeyT>(key)); + } + + return result; + } + + /*! + @brief find an element in a JSON object + @copydoc find(KeyT&&) + */ + template<typename KeyT> + const_iterator find(KeyT&& key) const + { + auto result = cend(); + + if (is_object()) + { + result.m_it.object_iterator = m_value.object->find(std::forward<KeyT>(key)); + } + + return result; + } + + /*! + @brief returns the number of occurrences of a key in a JSON object + + Returns the number of elements with key @a key. If ObjectType is the + default `std::map` type, the return value will always be `0` (@a key was + not found) or `1` (@a key was found). + + @note This method always returns `0` when executed on a JSON type that is + not an object. + + @param[in] key key value of the element to count + + @return Number of elements with key @a key. If the JSON value is not an + object, the return value will be `0`. + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The example shows how `count()` is used.,count} + + @since version 1.0.0 + */ + template<typename KeyT> + size_type count(KeyT&& key) const + { + // return 0 for all nonobject types + return is_object() ? m_value.object->count(std::forward<KeyT>(key)) : 0; + } + + /// @} + + + /////////////// + // iterators // + /////////////// + + /// @name iterators + /// @{ + + /*! + @brief returns an iterator to the first element + + Returns an iterator to the first element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return iterator to the first element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + + @liveexample{The following code shows an example for `begin()`.,begin} + + @sa @ref cbegin() -- returns a const iterator to the beginning + @sa @ref end() -- returns an iterator to the end + @sa @ref cend() -- returns a const iterator to the end + + @since version 1.0.0 + */ + iterator begin() noexcept + { + iterator result(this); + result.set_begin(); + return result; + } + + /*! + @copydoc basic_json::cbegin() + */ + const_iterator begin() const noexcept + { + return cbegin(); + } + + /*! + @brief returns a const iterator to the first element + + Returns a const iterator to the first element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return const iterator to the first element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast<const basic_json&>(*this).begin()`. + + @liveexample{The following code shows an example for `cbegin()`.,cbegin} + + @sa @ref begin() -- returns an iterator to the beginning + @sa @ref end() -- returns an iterator to the end + @sa @ref cend() -- returns a const iterator to the end + + @since version 1.0.0 + */ + const_iterator cbegin() const noexcept + { + const_iterator result(this); + result.set_begin(); + return result; + } + + /*! + @brief returns an iterator to one past the last element + + Returns an iterator to one past the last element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return iterator one past the last element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + + @liveexample{The following code shows an example for `end()`.,end} + + @sa @ref cend() -- returns a const iterator to the end + @sa @ref begin() -- returns an iterator to the beginning + @sa @ref cbegin() -- returns a const iterator to the beginning + + @since version 1.0.0 + */ + iterator end() noexcept + { + iterator result(this); + result.set_end(); + return result; + } + + /*! + @copydoc basic_json::cend() + */ + const_iterator end() const noexcept + { + return cend(); + } + + /*! + @brief returns a const iterator to one past the last element + + Returns a const iterator to one past the last element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return const iterator one past the last element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast<const basic_json&>(*this).end()`. + + @liveexample{The following code shows an example for `cend()`.,cend} + + @sa @ref end() -- returns an iterator to the end + @sa @ref begin() -- returns an iterator to the beginning + @sa @ref cbegin() -- returns a const iterator to the beginning + + @since version 1.0.0 + */ + const_iterator cend() const noexcept + { + const_iterator result(this); + result.set_end(); + return result; + } + + /*! + @brief returns an iterator to the reverse-beginning + + Returns an iterator to the reverse-beginning; that is, the last element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](http://en.cppreference.com/w/cpp/concept/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `reverse_iterator(end())`. + + @liveexample{The following code shows an example for `rbegin()`.,rbegin} + + @sa @ref crbegin() -- returns a const reverse iterator to the beginning + @sa @ref rend() -- returns a reverse iterator to the end + @sa @ref crend() -- returns a const reverse iterator to the end + + @since version 1.0.0 + */ + reverse_iterator rbegin() noexcept + { + return reverse_iterator(end()); + } + + /*! + @copydoc basic_json::crbegin() + */ + const_reverse_iterator rbegin() const noexcept + { + return crbegin(); + } + + /*! + @brief returns an iterator to the reverse-end + + Returns an iterator to the reverse-end; that is, one before the first + element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](http://en.cppreference.com/w/cpp/concept/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `reverse_iterator(begin())`. + + @liveexample{The following code shows an example for `rend()`.,rend} + + @sa @ref crend() -- returns a const reverse iterator to the end + @sa @ref rbegin() -- returns a reverse iterator to the beginning + @sa @ref crbegin() -- returns a const reverse iterator to the beginning + + @since version 1.0.0 + */ + reverse_iterator rend() noexcept + { + return reverse_iterator(begin()); + } + + /*! + @copydoc basic_json::crend() + */ + const_reverse_iterator rend() const noexcept + { + return crend(); + } + + /*! + @brief returns a const reverse iterator to the last element + + Returns a const iterator to the reverse-beginning; that is, the last + element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](http://en.cppreference.com/w/cpp/concept/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast<const basic_json&>(*this).rbegin()`. + + @liveexample{The following code shows an example for `crbegin()`.,crbegin} + + @sa @ref rbegin() -- returns a reverse iterator to the beginning + @sa @ref rend() -- returns a reverse iterator to the end + @sa @ref crend() -- returns a const reverse iterator to the end + + @since version 1.0.0 + */ + const_reverse_iterator crbegin() const noexcept + { + return const_reverse_iterator(cend()); + } + + /*! + @brief returns a const reverse iterator to one before the first + + Returns a const reverse iterator to the reverse-end; that is, one before + the first element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](http://en.cppreference.com/w/cpp/concept/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast<const basic_json&>(*this).rend()`. + + @liveexample{The following code shows an example for `crend()`.,crend} + + @sa @ref rend() -- returns a reverse iterator to the end + @sa @ref rbegin() -- returns a reverse iterator to the beginning + @sa @ref crbegin() -- returns a const reverse iterator to the beginning + + @since version 1.0.0 + */ + const_reverse_iterator crend() const noexcept + { + return const_reverse_iterator(cbegin()); + } + + public: + /*! + @brief wrapper to access iterator member functions in range-based for + + This function allows to access @ref iterator::key() and @ref + iterator::value() during range-based for loops. In these loops, a + reference to the JSON values is returned, so there is no access to the + underlying iterator. + + For loop without iterator_wrapper: + + @code{cpp} + for (auto it = j_object.begin(); it != j_object.end(); ++it) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + Range-based for loop without iterator proxy: + + @code{cpp} + for (auto it : j_object) + { + // "it" is of type json::reference and has no key() member + std::cout << "value: " << it << '\n'; + } + @endcode + + Range-based for loop with iterator proxy: + + @code{cpp} + for (auto it : json::iterator_wrapper(j_object)) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + @note When iterating over an array, `key()` will return the index of the + element as string (see example). + + @param[in] ref reference to a JSON value + @return iteration proxy object wrapping @a ref with an interface to use in + range-based for loops + + @liveexample{The following code shows how the wrapper is used,iterator_wrapper} + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @note The name of this function is not yet final and may change in the + future. + */ + static iteration_proxy<iterator> iterator_wrapper(reference ref) + { + return iteration_proxy<iterator>(ref); + } + + /*! + @copydoc iterator_wrapper(reference) + */ + static iteration_proxy<const_iterator> iterator_wrapper(const_reference ref) + { + return iteration_proxy<const_iterator>(ref); + } + + /// @} + + + ////////////// + // capacity // + ////////////// + + /// @name capacity + /// @{ + + /*! + @brief checks whether the container is empty. + + Checks if a JSON value has no elements (i.e. whether its @ref size is `0`). + + @return The return value depends on the different types and is + defined as follows: + Value type | return value + ----------- | ------------- + null | `true` + boolean | `false` + string | `false` + number | `false` + object | result of function `object_t::empty()` + array | result of function `array_t::empty()` + + @liveexample{The following code uses `empty()` to check if a JSON + object contains any elements.,empty} + + @complexity Constant, as long as @ref array_t and @ref object_t satisfy + the Container concept; that is, their `empty()` functions have constant + complexity. + + @iterators No changes. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @note This function does not return whether a string stored as JSON value + is empty - it returns whether the JSON container itself is empty which is + false in the case of a string. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + - Has the semantics of `begin() == end()`. + + @sa @ref size() -- returns the number of elements + + @since version 1.0.0 + */ + bool empty() const noexcept + { + switch (m_type) + { + case value_t::null: + { + // null values are empty + return true; + } + + case value_t::array: + { + // delegate call to array_t::empty() + return m_value.array->empty(); + } + + case value_t::object: + { + // delegate call to object_t::empty() + return m_value.object->empty(); + } + + default: + { + // all other types are nonempty + return false; + } + } + } + + /*! + @brief returns the number of elements + + Returns the number of elements in a JSON value. + + @return The return value depends on the different types and is + defined as follows: + Value type | return value + ----------- | ------------- + null | `0` + boolean | `1` + string | `1` + number | `1` + object | result of function object_t::size() + array | result of function array_t::size() + + @liveexample{The following code calls `size()` on the different value + types.,size} + + @complexity Constant, as long as @ref array_t and @ref object_t satisfy + the Container concept; that is, their size() functions have constant + complexity. + + @iterators No changes. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @note This function does not return the length of a string stored as JSON + value - it returns the number of elements in the JSON value which is 1 in + the case of a string. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + - Has the semantics of `std::distance(begin(), end())`. + + @sa @ref empty() -- checks whether the container is empty + @sa @ref max_size() -- returns the maximal number of elements + + @since version 1.0.0 + */ + size_type size() const noexcept + { + switch (m_type) + { + case value_t::null: + { + // null values are empty + return 0; + } + + case value_t::array: + { + // delegate call to array_t::size() + return m_value.array->size(); + } + + case value_t::object: + { + // delegate call to object_t::size() + return m_value.object->size(); + } + + default: + { + // all other types have size 1 + return 1; + } + } + } + + /*! + @brief returns the maximum possible number of elements + + Returns the maximum number of elements a JSON value is able to hold due to + system or library implementation limitations, i.e. `std::distance(begin(), + end())` for the JSON value. + + @return The return value depends on the different types and is + defined as follows: + Value type | return value + ----------- | ------------- + null | `0` (same as `size()`) + boolean | `1` (same as `size()`) + string | `1` (same as `size()`) + number | `1` (same as `size()`) + object | result of function `object_t::max_size()` + array | result of function `array_t::max_size()` + + @liveexample{The following code calls `max_size()` on the different value + types. Note the output is implementation specific.,max_size} + + @complexity Constant, as long as @ref array_t and @ref object_t satisfy + the Container concept; that is, their `max_size()` functions have constant + complexity. + + @iterators No changes. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @requirement This function helps `basic_json` satisfying the + [Container](http://en.cppreference.com/w/cpp/concept/Container) + requirements: + - The complexity is constant. + - Has the semantics of returning `b.size()` where `b` is the largest + possible JSON value. + + @sa @ref size() -- returns the number of elements + + @since version 1.0.0 + */ + size_type max_size() const noexcept + { + switch (m_type) + { + case value_t::array: + { + // delegate call to array_t::max_size() + return m_value.array->max_size(); + } + + case value_t::object: + { + // delegate call to object_t::max_size() + return m_value.object->max_size(); + } + + default: + { + // all other types have max_size() == size() + return size(); + } + } + } + + /// @} + + + /////////////// + // modifiers // + /////////////// + + /// @name modifiers + /// @{ + + /*! + @brief clears the contents + + Clears the content of a JSON value and resets it to the default value as + if @ref basic_json(value_t) would have been called with the current value + type from @ref type(): + + Value type | initial value + ----------- | ------------- + null | `null` + boolean | `false` + string | `""` + number | `0` + object | `{}` + array | `[]` + + @post Has the same effect as calling + @code {.cpp} + *this = basic_json(type()); + @endcode + + @liveexample{The example below shows the effect of `clear()` to different + JSON types.,clear} + + @complexity Linear in the size of the JSON value. + + @iterators All iterators, pointers and references related to this container + are invalidated. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @sa @ref basic_json(value_t) -- constructor that creates an object with the + same value than calling `clear()` + + @since version 1.0.0 + */ + void clear() noexcept + { + switch (m_type) + { + case value_t::number_integer: + { + m_value.number_integer = 0; + break; + } + + case value_t::number_unsigned: + { + m_value.number_unsigned = 0; + break; + } + + case value_t::number_float: + { + m_value.number_float = 0.0; + break; + } + + case value_t::boolean: + { + m_value.boolean = false; + break; + } + + case value_t::string: + { + m_value.string->clear(); + break; + } + + case value_t::array: + { + m_value.array->clear(); + break; + } + + case value_t::object: + { + m_value.object->clear(); + break; + } + + default: + break; + } + } + + /*! + @brief add an object to an array + + Appends the given element @a val to the end of the JSON value. If the + function is called on a JSON null value, an empty array is created before + appending @a val. + + @param[in] val the value to add to the JSON array + + @throw type_error.308 when called on a type other than JSON array or + null; example: `"cannot use push_back() with number"` + + @complexity Amortized constant. + + @liveexample{The example shows how `push_back()` and `+=` can be used to + add elements to a JSON array. Note how the `null` value was silently + converted to a JSON array.,push_back} + + @since version 1.0.0 + */ + void push_back(basic_json&& val) + { + // push_back only works for null objects or arrays + if (JSON_UNLIKELY(not(is_null() or is_array()))) + { + JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()))); + } + + // transform null object into an array + if (is_null()) + { + m_type = value_t::array; + m_value = value_t::array; + assert_invariant(); + } + + // add element to array (move semantics) + m_value.array->push_back(std::move(val)); + // invalidate object + val.m_type = value_t::null; + } + + /*! + @brief add an object to an array + @copydoc push_back(basic_json&&) + */ + reference operator+=(basic_json&& val) + { + push_back(std::move(val)); + return *this; + } + + /*! + @brief add an object to an array + @copydoc push_back(basic_json&&) + */ + void push_back(const basic_json& val) + { + // push_back only works for null objects or arrays + if (JSON_UNLIKELY(not(is_null() or is_array()))) + { + JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()))); + } + + // transform null object into an array + if (is_null()) + { + m_type = value_t::array; + m_value = value_t::array; + assert_invariant(); + } + + // add element to array + m_value.array->push_back(val); + } + + /*! + @brief add an object to an array + @copydoc push_back(basic_json&&) + */ + reference operator+=(const basic_json& val) + { + push_back(val); + return *this; + } + + /*! + @brief add an object to an object + + Inserts the given element @a val to the JSON object. If the function is + called on a JSON null value, an empty object is created before inserting + @a val. + + @param[in] val the value to add to the JSON object + + @throw type_error.308 when called on a type other than JSON object or + null; example: `"cannot use push_back() with number"` + + @complexity Logarithmic in the size of the container, O(log(`size()`)). + + @liveexample{The example shows how `push_back()` and `+=` can be used to + add elements to a JSON object. Note how the `null` value was silently + converted to a JSON object.,push_back__object_t__value} + + @since version 1.0.0 + */ + void push_back(const typename object_t::value_type& val) + { + // push_back only works for null objects or objects + if (JSON_UNLIKELY(not(is_null() or is_object()))) + { + JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()))); + } + + // transform null object into an object + if (is_null()) + { + m_type = value_t::object; + m_value = value_t::object; + assert_invariant(); + } + + // add element to array + m_value.object->insert(val); + } + + /*! + @brief add an object to an object + @copydoc push_back(const typename object_t::value_type&) + */ + reference operator+=(const typename object_t::value_type& val) + { + push_back(val); + return *this; + } + + /*! + @brief add an object to an object + + This function allows to use `push_back` with an initializer list. In case + + 1. the current value is an object, + 2. the initializer list @a init contains only two elements, and + 3. the first element of @a init is a string, + + @a init is converted into an object element and added using + @ref push_back(const typename object_t::value_type&). Otherwise, @a init + is converted to a JSON value and added using @ref push_back(basic_json&&). + + @param[in] init an initializer list + + @complexity Linear in the size of the initializer list @a init. + + @note This function is required to resolve an ambiguous overload error, + because pairs like `{"key", "value"}` can be both interpreted as + `object_t::value_type` or `std::initializer_list<basic_json>`, see + https://github.com/nlohmann/json/issues/235 for more information. + + @liveexample{The example shows how initializer lists are treated as + objects when possible.,push_back__initializer_list} + */ + void push_back(initializer_list_t init) + { + if (is_object() and init.size() == 2 and (*init.begin())->is_string()) + { + basic_json&& key = init.begin()->moved_or_copied(); + push_back(typename object_t::value_type( + std::move(key.get_ref<string_t&>()), (init.begin() + 1)->moved_or_copied())); + } + else + { + push_back(basic_json(init)); + } + } + + /*! + @brief add an object to an object + @copydoc push_back(initializer_list_t) + */ + reference operator+=(initializer_list_t init) + { + push_back(init); + return *this; + } + + /*! + @brief add an object to an array + + Creates a JSON value from the passed parameters @a args to the end of the + JSON value. If the function is called on a JSON null value, an empty array + is created before appending the value created from @a args. + + @param[in] args arguments to forward to a constructor of @ref basic_json + @tparam Args compatible types to create a @ref basic_json object + + @throw type_error.311 when called on a type other than JSON array or + null; example: `"cannot use emplace_back() with number"` + + @complexity Amortized constant. + + @liveexample{The example shows how `push_back()` can be used to add + elements to a JSON array. Note how the `null` value was silently converted + to a JSON array.,emplace_back} + + @since version 2.0.8 + */ + template<class... Args> + void emplace_back(Args&& ... args) + { + // emplace_back only works for null objects or arrays + if (JSON_UNLIKELY(not(is_null() or is_array()))) + { + JSON_THROW(type_error::create(311, "cannot use emplace_back() with " + std::string(type_name()))); + } + + // transform null object into an array + if (is_null()) + { + m_type = value_t::array; + m_value = value_t::array; + assert_invariant(); + } + + // add element to array (perfect forwarding) + m_value.array->emplace_back(std::forward<Args>(args)...); + } + + /*! + @brief add an object to an object if key does not exist + + Inserts a new element into a JSON object constructed in-place with the + given @a args if there is no element with the key in the container. If the + function is called on a JSON null value, an empty object is created before + appending the value created from @a args. + + @param[in] args arguments to forward to a constructor of @ref basic_json + @tparam Args compatible types to create a @ref basic_json object + + @return a pair consisting of an iterator to the inserted element, or the + already-existing element if no insertion happened, and a bool + denoting whether the insertion took place. + + @throw type_error.311 when called on a type other than JSON object or + null; example: `"cannot use emplace() with number"` + + @complexity Logarithmic in the size of the container, O(log(`size()`)). + + @liveexample{The example shows how `emplace()` can be used to add elements + to a JSON object. Note how the `null` value was silently converted to a + JSON object. Further note how no value is added if there was already one + value stored with the same key.,emplace} + + @since version 2.0.8 + */ + template<class... Args> + std::pair<iterator, bool> emplace(Args&& ... args) + { + // emplace only works for null objects or arrays + if (JSON_UNLIKELY(not(is_null() or is_object()))) + { + JSON_THROW(type_error::create(311, "cannot use emplace() with " + std::string(type_name()))); + } + + // transform null object into an object + if (is_null()) + { + m_type = value_t::object; + m_value = value_t::object; + assert_invariant(); + } + + // add element to array (perfect forwarding) + auto res = m_value.object->emplace(std::forward<Args>(args)...); + // create result iterator and set iterator to the result of emplace + auto it = begin(); + it.m_it.object_iterator = res.first; + + // return pair of iterator and boolean + return {it, res.second}; + } + + /*! + @brief inserts element + + Inserts element @a val before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] val element to insert + @return iterator pointing to the inserted @a val. + + @throw type_error.309 if called on JSON values other than arrays; + example: `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + + @complexity Constant plus linear in the distance between @a pos and end of + the container. + + @liveexample{The example shows how `insert()` is used.,insert} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, const basic_json& val) + { + // insert only works for arrays + if (JSON_LIKELY(is_array())) + { + // check if iterator pos fits to this JSON value + if (JSON_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value")); + } + + // insert to array and return iterator + iterator result(this); + result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, val); + return result; + } + + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()))); + } + + /*! + @brief inserts element + @copydoc insert(const_iterator, const basic_json&) + */ + iterator insert(const_iterator pos, basic_json&& val) + { + return insert(pos, val); + } + + /*! + @brief inserts elements + + Inserts @a cnt copies of @a val before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] cnt number of copies of @a val to insert + @param[in] val element to insert + @return iterator pointing to the first element inserted, or @a pos if + `cnt==0` + + @throw type_error.309 if called on JSON values other than arrays; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + + @complexity Linear in @a cnt plus linear in the distance between @a pos + and end of the container. + + @liveexample{The example shows how `insert()` is used.,insert__count} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, size_type cnt, const basic_json& val) + { + // insert only works for arrays + if (JSON_LIKELY(is_array())) + { + // check if iterator pos fits to this JSON value + if (JSON_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value")); + } + + // insert to array and return iterator + iterator result(this); + result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, cnt, val); + return result; + } + + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()))); + } + + /*! + @brief inserts elements + + Inserts elements from range `[first, last)` before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] first begin of the range of elements to insert + @param[in] last end of the range of elements to insert + + @throw type_error.309 if called on JSON values other than arrays; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + @throw invalid_iterator.210 if @a first and @a last do not belong to the + same JSON value; example: `"iterators do not fit"` + @throw invalid_iterator.211 if @a first or @a last are iterators into + container for which insert is called; example: `"passed iterators may not + belong to container"` + + @return iterator pointing to the first element inserted, or @a pos if + `first==last` + + @complexity Linear in `std::distance(first, last)` plus linear in the + distance between @a pos and end of the container. + + @liveexample{The example shows how `insert()` is used.,insert__range} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, const_iterator first, const_iterator last) + { + // insert only works for arrays + if (JSON_UNLIKELY(not is_array())) + { + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()))); + } + + // check if iterator pos fits to this JSON value + if (JSON_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value")); + } + + // check if range iterators belong to the same JSON object + if (JSON_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(210, "iterators do not fit")); + } + + if (JSON_UNLIKELY(first.m_object == this)) + { + JSON_THROW(invalid_iterator::create(211, "passed iterators may not belong to container")); + } + + // insert to array and return iterator + iterator result(this); + result.m_it.array_iterator = m_value.array->insert( + pos.m_it.array_iterator, + first.m_it.array_iterator, + last.m_it.array_iterator); + return result; + } + + /*! + @brief inserts elements + + Inserts elements from initializer list @a ilist before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] ilist initializer list to insert the values from + + @throw type_error.309 if called on JSON values other than arrays; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + + @return iterator pointing to the first element inserted, or @a pos if + `ilist` is empty + + @complexity Linear in `ilist.size()` plus linear in the distance between + @a pos and end of the container. + + @liveexample{The example shows how `insert()` is used.,insert__ilist} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, initializer_list_t ilist) + { + // insert only works for arrays + if (JSON_UNLIKELY(not is_array())) + { + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()))); + } + + // check if iterator pos fits to this JSON value + if (JSON_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value")); + } + + // insert to array and return iterator + iterator result(this); + result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, ilist.begin(), ilist.end()); + return result; + } + + /*! + @brief inserts elements + + Inserts elements from range `[first, last)`. + + @param[in] first begin of the range of elements to insert + @param[in] last end of the range of elements to insert + + @throw type_error.309 if called on JSON values other than objects; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if iterator @a first or @a last does does not + point to an object; example: `"iterators first and last must point to + objects"` + @throw invalid_iterator.210 if @a first and @a last do not belong to the + same JSON value; example: `"iterators do not fit"` + + @complexity Logarithmic: `O(N*log(size() + N))`, where `N` is the number + of elements to insert. + + @liveexample{The example shows how `insert()` is used.,insert__range_object} + + @since version 3.0.0 + */ + void insert(const_iterator first, const_iterator last) + { + // insert only works for objects + if (JSON_UNLIKELY(not is_object())) + { + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()))); + } + + // check if range iterators belong to the same JSON object + if (JSON_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(210, "iterators do not fit")); + } + + // passed iterators must belong to objects + if (JSON_UNLIKELY(not first.m_object->is_object())) + { + JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects")); + } + + m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator); + } + + /*! + @brief updates a JSON object from another object, overwriting existing keys + + Inserts all values from JSON object @a j and overwrites existing keys. + + @param[in] j JSON object to read values from + + @throw type_error.312 if called on JSON values other than objects; example: + `"cannot use update() with string"` + + @complexity O(N*log(size() + N)), where N is the number of elements to + insert. + + @liveexample{The example shows how `update()` is used.,update} + + @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update + + @since version 3.0.0 + */ + void update(const_reference j) + { + // implicitly convert null value to an empty object + if (is_null()) + { + m_type = value_t::object; + m_value.object = create<object_t>(); + assert_invariant(); + } + + if (JSON_UNLIKELY(not is_object())) + { + JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name()))); + } + if (JSON_UNLIKELY(not j.is_object())) + { + JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name()))); + } + + for (auto it = j.cbegin(); it != j.cend(); ++it) + { + m_value.object->operator[](it.key()) = it.value(); + } + } + + /*! + @brief updates a JSON object from another object, overwriting existing keys + + Inserts all values from from range `[first, last)` and overwrites existing + keys. + + @param[in] first begin of the range of elements to insert + @param[in] last end of the range of elements to insert + + @throw type_error.312 if called on JSON values other than objects; example: + `"cannot use update() with string"` + @throw invalid_iterator.202 if iterator @a first or @a last does does not + point to an object; example: `"iterators first and last must point to + objects"` + @throw invalid_iterator.210 if @a first and @a last do not belong to the + same JSON value; example: `"iterators do not fit"` + + @complexity O(N*log(size() + N)), where N is the number of elements to + insert. + + @liveexample{The example shows how `update()` is used__range.,update} + + @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update + + @since version 3.0.0 + */ + void update(const_iterator first, const_iterator last) + { + // implicitly convert null value to an empty object + if (is_null()) + { + m_type = value_t::object; + m_value.object = create<object_t>(); + assert_invariant(); + } + + if (JSON_UNLIKELY(not is_object())) + { + JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name()))); + } + + // check if range iterators belong to the same JSON object + if (JSON_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(210, "iterators do not fit")); + } + + // passed iterators must belong to objects + if (JSON_UNLIKELY(not first.m_object->is_object() + or not first.m_object->is_object())) + { + JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects")); + } + + for (auto it = first; it != last; ++it) + { + m_value.object->operator[](it.key()) = it.value(); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of the JSON value with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other JSON value to exchange the contents with + + @complexity Constant. + + @liveexample{The example below shows how JSON values can be swapped with + `swap()`.,swap__reference} + + @since version 1.0.0 + */ + void swap(reference other) noexcept ( + std::is_nothrow_move_constructible<value_t>::value and + std::is_nothrow_move_assignable<value_t>::value and + std::is_nothrow_move_constructible<json_value>::value and + std::is_nothrow_move_assignable<json_value>::value + ) + { + std::swap(m_type, other.m_type); + std::swap(m_value, other.m_value); + assert_invariant(); + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON array with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other array to exchange the contents with + + @throw type_error.310 when JSON value is not an array; example: `"cannot + use swap() with string"` + + @complexity Constant. + + @liveexample{The example below shows how arrays can be swapped with + `swap()`.,swap__array_t} + + @since version 1.0.0 + */ + void swap(array_t& other) + { + // swap only works for arrays + if (JSON_LIKELY(is_array())) + { + std::swap(*(m_value.array), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()))); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON object with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other object to exchange the contents with + + @throw type_error.310 when JSON value is not an object; example: + `"cannot use swap() with string"` + + @complexity Constant. + + @liveexample{The example below shows how objects can be swapped with + `swap()`.,swap__object_t} + + @since version 1.0.0 + */ + void swap(object_t& other) + { + // swap only works for objects + if (JSON_LIKELY(is_object())) + { + std::swap(*(m_value.object), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()))); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON string with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other string to exchange the contents with + + @throw type_error.310 when JSON value is not a string; example: `"cannot + use swap() with boolean"` + + @complexity Constant. + + @liveexample{The example below shows how strings can be swapped with + `swap()`.,swap__string_t} + + @since version 1.0.0 + */ + void swap(string_t& other) + { + // swap only works for strings + if (JSON_LIKELY(is_string())) + { + std::swap(*(m_value.string), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()))); + } + } + + /// @} + + public: + ////////////////////////////////////////// + // lexicographical comparison operators // + ////////////////////////////////////////// + + /// @name lexicographical comparison operators + /// @{ + + /*! + @brief comparison: equal + + Compares two JSON values for equality according to the following rules: + - Two JSON values are equal if (1) they are from the same type and (2) + their stored values are the same according to their respective + `operator==`. + - Integer and floating-point numbers are automatically converted before + comparison. Note than two NaN values are always treated as unequal. + - Two JSON null values are equal. + + @note Floating-point inside JSON values numbers are compared with + `json::number_float_t::operator==` which is `double::operator==` by + default. To compare floating-point while respecting an epsilon, an alternative + [comparison function](https://github.com/mariokonrad/marnav/blob/master/src/marnav/math/floatingpoint.hpp#L34-#L39) + could be used, for instance + @code {.cpp} + template<typename T, typename = typename std::enable_if<std::is_floating_point<T>::value, T>::type> + inline bool is_same(T a, T b, T epsilon = std::numeric_limits<T>::epsilon()) noexcept + { + return std::abs(a - b) <= epsilon; + } + @endcode + + @note NaN values never compare equal to themselves or to other NaN values. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether the values @a lhs and @a rhs are equal + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @complexity Linear. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__equal} + + @since version 1.0.0 + */ + friend bool operator==(const_reference lhs, const_reference rhs) noexcept + { + const auto lhs_type = lhs.type(); + const auto rhs_type = rhs.type(); + + if (lhs_type == rhs_type) + { + switch (lhs_type) + { + case value_t::array: + return (*lhs.m_value.array == *rhs.m_value.array); + + case value_t::object: + return (*lhs.m_value.object == *rhs.m_value.object); + + case value_t::null: + return true; + + case value_t::string: + return (*lhs.m_value.string == *rhs.m_value.string); + + case value_t::boolean: + return (lhs.m_value.boolean == rhs.m_value.boolean); + + case value_t::number_integer: + return (lhs.m_value.number_integer == rhs.m_value.number_integer); + + case value_t::number_unsigned: + return (lhs.m_value.number_unsigned == rhs.m_value.number_unsigned); + + case value_t::number_float: + return (lhs.m_value.number_float == rhs.m_value.number_float); + + default: + return false; + } + } + else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float) + { + return (static_cast<number_float_t>(lhs.m_value.number_integer) == rhs.m_value.number_float); + } + else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer) + { + return (lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_integer)); + } + else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float) + { + return (static_cast<number_float_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_float); + } + else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned) + { + return (lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_unsigned)); + } + else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer) + { + return (static_cast<number_integer_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_integer); + } + else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned) + { + return (lhs.m_value.number_integer == static_cast<number_integer_t>(rhs.m_value.number_unsigned)); + } + + return false; + } + + /*! + @brief comparison: equal + @copydoc operator==(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept + { + return (lhs == basic_json(rhs)); + } + + /*! + @brief comparison: equal + @copydoc operator==(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept + { + return (basic_json(lhs) == rhs); + } + + /*! + @brief comparison: not equal + + Compares two JSON values for inequality by calculating `not (lhs == rhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether the values @a lhs and @a rhs are not equal + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__notequal} + + @since version 1.0.0 + */ + friend bool operator!=(const_reference lhs, const_reference rhs) noexcept + { + return not (lhs == rhs); + } + + /*! + @brief comparison: not equal + @copydoc operator!=(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept + { + return (lhs != basic_json(rhs)); + } + + /*! + @brief comparison: not equal + @copydoc operator!=(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept + { + return (basic_json(lhs) != rhs); + } + + /*! + @brief comparison: less than + + Compares whether one JSON value @a lhs is less than another JSON value @a + rhs according to the following rules: + - If @a lhs and @a rhs have the same type, the values are compared using + the default `<` operator. + - Integer and floating-point numbers are automatically converted before + comparison + - In case @a lhs and @a rhs have different types, the values are ignored + and the order of the types is considered, see + @ref operator<(const value_t, const value_t). + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is less than @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__less} + + @since version 1.0.0 + */ + friend bool operator<(const_reference lhs, const_reference rhs) noexcept + { + const auto lhs_type = lhs.type(); + const auto rhs_type = rhs.type(); + + if (lhs_type == rhs_type) + { + switch (lhs_type) + { + case value_t::array: + return (*lhs.m_value.array) < (*rhs.m_value.array); + + case value_t::object: + return *lhs.m_value.object < *rhs.m_value.object; + + case value_t::null: + return false; + + case value_t::string: + return *lhs.m_value.string < *rhs.m_value.string; + + case value_t::boolean: + return lhs.m_value.boolean < rhs.m_value.boolean; + + case value_t::number_integer: + return lhs.m_value.number_integer < rhs.m_value.number_integer; + + case value_t::number_unsigned: + return lhs.m_value.number_unsigned < rhs.m_value.number_unsigned; + + case value_t::number_float: + return lhs.m_value.number_float < rhs.m_value.number_float; + + default: + return false; + } + } + else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float) + { + return static_cast<number_float_t>(lhs.m_value.number_integer) < rhs.m_value.number_float; + } + else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer) + { + return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_integer); + } + else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float) + { + return static_cast<number_float_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_float; + } + else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned) + { + return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_unsigned); + } + else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned) + { + return lhs.m_value.number_integer < static_cast<number_integer_t>(rhs.m_value.number_unsigned); + } + else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer) + { + return static_cast<number_integer_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_integer; + } + + // We only reach this line if we cannot compare values. In that case, + // we compare types. Note we have to call the operator explicitly, + // because MSVC has problems otherwise. + return operator<(lhs_type, rhs_type); + } + + /*! + @brief comparison: less than + @copydoc operator<(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept + { + return (lhs < basic_json(rhs)); + } + + /*! + @brief comparison: less than + @copydoc operator<(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept + { + return (basic_json(lhs) < rhs); + } + + /*! + @brief comparison: less than or equal + + Compares whether one JSON value @a lhs is less than or equal to another + JSON value by calculating `not (rhs < lhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is less than or equal to @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__greater} + + @since version 1.0.0 + */ + friend bool operator<=(const_reference lhs, const_reference rhs) noexcept + { + return not (rhs < lhs); + } + + /*! + @brief comparison: less than or equal + @copydoc operator<=(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept + { + return (lhs <= basic_json(rhs)); + } + + /*! + @brief comparison: less than or equal + @copydoc operator<=(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept + { + return (basic_json(lhs) <= rhs); + } + + /*! + @brief comparison: greater than + + Compares whether one JSON value @a lhs is greater than another + JSON value by calculating `not (lhs <= rhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is greater than to @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__lessequal} + + @since version 1.0.0 + */ + friend bool operator>(const_reference lhs, const_reference rhs) noexcept + { + return not (lhs <= rhs); + } + + /*! + @brief comparison: greater than + @copydoc operator>(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept + { + return (lhs > basic_json(rhs)); + } + + /*! + @brief comparison: greater than + @copydoc operator>(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept + { + return (basic_json(lhs) > rhs); + } + + /*! + @brief comparison: greater than or equal + + Compares whether one JSON value @a lhs is greater than or equal to another + JSON value by calculating `not (lhs < rhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is greater than or equal to @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__greaterequal} + + @since version 1.0.0 + */ + friend bool operator>=(const_reference lhs, const_reference rhs) noexcept + { + return not (lhs < rhs); + } + + /*! + @brief comparison: greater than or equal + @copydoc operator>=(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept + { + return (lhs >= basic_json(rhs)); + } + + /*! + @brief comparison: greater than or equal + @copydoc operator>=(const_reference, const_reference) + */ + template<typename ScalarType, typename std::enable_if< + std::is_scalar<ScalarType>::value, int>::type = 0> + friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept + { + return (basic_json(lhs) >= rhs); + } + + /// @} + + /////////////////// + // serialization // + /////////////////// + + /// @name serialization + /// @{ + + /*! + @brief serialize to stream + + Serialize the given JSON value @a j to the output stream @a o. The JSON + value will be serialized using the @ref dump member function. + + - The indentation of the output can be controlled with the member variable + `width` of the output stream @a o. For instance, using the manipulator + `std::setw(4)` on @a o sets the indentation level to `4` and the + serialization result is the same as calling `dump(4)`. + + - The indentation character can be controlled with the member variable + `fill` of the output stream @a o. For instance, the manipulator + `std::setfill('\\t')` sets indentation to use a tab character rather than + the default space character. + + @param[in,out] o stream to serialize to + @param[in] j JSON value to serialize + + @return the stream @a o + + @throw type_error.316 if a string stored inside the JSON value is not + UTF-8 encoded + + @complexity Linear. + + @liveexample{The example below shows the serialization with different + parameters to `width` to adjust the indentation level.,operator_serialize} + + @since version 1.0.0; indentation character added in version 3.0.0 + */ + friend std::ostream& operator<<(std::ostream& o, const basic_json& j) + { + // read width member and use it as indentation parameter if nonzero + const bool pretty_print = (o.width() > 0); + const auto indentation = (pretty_print ? o.width() : 0); + + // reset width to 0 for subsequent calls to this stream + o.width(0); + + // do the actual serialization + serializer s(detail::output_adapter<char>(o), o.fill()); + s.dump(j, pretty_print, false, static_cast<unsigned int>(indentation)); + return o; + } + + /*! + @brief serialize to stream + @deprecated This stream operator is deprecated and will be removed in a + future version of the library. Please use + @ref operator<<(std::ostream&, const basic_json&) + instead; that is, replace calls like `j >> o;` with `o << j;`. + @since version 1.0.0; deprecated since version 3.0.0 + */ + JSON_DEPRECATED + friend std::ostream& operator>>(const basic_json& j, std::ostream& o) + { + return o << j; + } + + /// @} + + + ///////////////////// + // deserialization // + ///////////////////// + + /// @name deserialization + /// @{ + + /*! + @brief deserialize from a compatible input + + This function reads from a compatible input. Examples are: + - an array of 1-byte values + - strings with character/literal type with size of 1 byte + - input streams + - container with contiguous storage of 1-byte values. Compatible container + types include `std::vector`, `std::string`, `std::array`, + `std::valarray`, and `std::initializer_list`. Furthermore, C-style + arrays can be used with `std::begin()`/`std::end()`. User-defined + containers can be used as long as they implement random-access iterators + and a contiguous storage. + + @pre Each element of the container has a size of 1 byte. Violating this + precondition yields undefined behavior. **This precondition is enforced + with a static assertion.** + + @pre The container storage is contiguous. Violating this precondition + yields undefined behavior. **This precondition is enforced with an + assertion.** + @pre Each element of the container has a size of 1 byte. Violating this + precondition yields undefined behavior. **This precondition is enforced + with a static assertion.** + + @warning There is no way to enforce all preconditions at compile-time. If + the function is called with a noncompliant container and with + assertions switched off, the behavior is undefined and will most + likely yield segmentation violation. + + @param[in] i input to read from + @param[in] cb a parser callback function of type @ref parser_callback_t + which is used to control the deserialization by filtering unwanted values + (optional) + + @return result of the deserialization + + @throw parse_error.101 if a parse error occurs; example: `""unexpected end + of input; expected string literal""` + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. The complexity can be higher if the parser callback function + @a cb has a super-linear complexity. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below demonstrates the `parse()` function reading + from an array.,parse__array__parser_callback_t} + + @liveexample{The example below demonstrates the `parse()` function with + and without callback function.,parse__string__parser_callback_t} + + @liveexample{The example below demonstrates the `parse()` function with + and without callback function.,parse__istream__parser_callback_t} + + @liveexample{The example below demonstrates the `parse()` function reading + from a contiguous container.,parse__contiguouscontainer__parser_callback_t} + + @since version 2.0.3 (contiguous containers) + */ + static basic_json parse(detail::input_adapter i, + const parser_callback_t cb = nullptr, + const bool allow_exceptions = true) + { + basic_json result; + parser(i, cb, allow_exceptions).parse(true, result); + return result; + } + + /*! + @copydoc basic_json parse(detail::input_adapter, const parser_callback_t) + */ + static basic_json parse(detail::input_adapter& i, + const parser_callback_t cb = nullptr, + const bool allow_exceptions = true) + { + basic_json result; + parser(i, cb, allow_exceptions).parse(true, result); + return result; + } + + static bool accept(detail::input_adapter i) + { + return parser(i).accept(true); + } + + static bool accept(detail::input_adapter& i) + { + return parser(i).accept(true); + } + + /*! + @brief deserialize from an iterator range with contiguous storage + + This function reads from an iterator range of a container with contiguous + storage of 1-byte values. Compatible container types include + `std::vector`, `std::string`, `std::array`, `std::valarray`, and + `std::initializer_list`. Furthermore, C-style arrays can be used with + `std::begin()`/`std::end()`. User-defined containers can be used as long + as they implement random-access iterators and a contiguous storage. + + @pre The iterator range is contiguous. Violating this precondition yields + undefined behavior. **This precondition is enforced with an assertion.** + @pre Each element in the range has a size of 1 byte. Violating this + precondition yields undefined behavior. **This precondition is enforced + with a static assertion.** + + @warning There is no way to enforce all preconditions at compile-time. If + the function is called with noncompliant iterators and with + assertions switched off, the behavior is undefined and will most + likely yield segmentation violation. + + @tparam IteratorType iterator of container with contiguous storage + @param[in] first begin of the range to parse (included) + @param[in] last end of the range to parse (excluded) + @param[in] cb a parser callback function of type @ref parser_callback_t + which is used to control the deserialization by filtering unwanted values + (optional) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + + @return result of the deserialization + + @throw parse_error.101 in case of an unexpected token + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. The complexity can be higher if the parser callback function + @a cb has a super-linear complexity. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below demonstrates the `parse()` function reading + from an iterator range.,parse__iteratortype__parser_callback_t} + + @since version 2.0.3 + */ + template<class IteratorType, typename std::enable_if< + std::is_base_of< + std::random_access_iterator_tag, + typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0> + static basic_json parse(IteratorType first, IteratorType last, + const parser_callback_t cb = nullptr, + const bool allow_exceptions = true) + { + basic_json result; + parser(detail::input_adapter(first, last), cb, allow_exceptions).parse(true, result); + return result; + } + + template<class IteratorType, typename std::enable_if< + std::is_base_of< + std::random_access_iterator_tag, + typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0> + static bool accept(IteratorType first, IteratorType last) + { + return parser(detail::input_adapter(first, last)).accept(true); + } + + /*! + @brief deserialize from stream + @deprecated This stream operator is deprecated and will be removed in a + future version of the library. Please use + @ref operator>>(std::istream&, basic_json&) + instead; that is, replace calls like `j << i;` with `i >> j;`. + @since version 1.0.0; deprecated since version 3.0.0 + */ + JSON_DEPRECATED + friend std::istream& operator<<(basic_json& j, std::istream& i) + { + return operator>>(i, j); + } + + /*! + @brief deserialize from stream + + Deserializes an input stream to a JSON value. + + @param[in,out] i input stream to read a serialized JSON value from + @param[in,out] j JSON value to write the deserialized input to + + @throw parse_error.101 in case of an unexpected token + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below shows how a JSON value is constructed by + reading a serialization from a stream.,operator_deserialize} + + @sa parse(std::istream&, const parser_callback_t) for a variant with a + parser callback function to filter values while parsing + + @since version 1.0.0 + */ + friend std::istream& operator>>(std::istream& i, basic_json& j) + { + parser(detail::input_adapter(i)).parse(false, j); + return i; + } + + /// @} + + /////////////////////////// + // convenience functions // + /////////////////////////// + + /*! + @brief return the type as string + + Returns the type name as string to be used in error messages - usually to + indicate that a function was called on a wrong JSON type. + + @return a string representation of a the @a m_type member: + Value type | return value + ----------- | ------------- + null | `"null"` + boolean | `"boolean"` + string | `"string"` + number | `"number"` (for all number types) + object | `"object"` + array | `"array"` + discarded | `"discarded"` + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @complexity Constant. + + @liveexample{The following code exemplifies `type_name()` for all JSON + types.,type_name} + + @sa @ref type() -- return the type of the JSON value + @sa @ref operator value_t() -- return the type of the JSON value (implicit) + + @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept` + since 3.0.0 + */ + const char* type_name() const noexcept + { + { + switch (m_type) + { + case value_t::null: + return "null"; + case value_t::object: + return "object"; + case value_t::array: + return "array"; + case value_t::string: + return "string"; + case value_t::boolean: + return "boolean"; + case value_t::discarded: + return "discarded"; + default: + return "number"; + } + } + } + + + private: + ////////////////////// + // member variables // + ////////////////////// + + /// the type of the current element + value_t m_type = value_t::null; + + /// the value of the current element + json_value m_value = {}; + + ////////////////////////////////////////// + // binary serialization/deserialization // + ////////////////////////////////////////// + + /// @name binary serialization/deserialization support + /// @{ + + public: + /*! + @brief create a CBOR serialization of a given JSON value + + Serializes a given JSON value @a j to a byte vector using the CBOR (Concise + Binary Object Representation) serialization format. CBOR is a binary + serialization format which aims to be more compact than JSON itself, yet + more efficient to parse. + + The library uses the following mapping from JSON values types to + CBOR types according to the CBOR specification (RFC 7049): + + JSON value type | value/range | CBOR type | first byte + --------------- | ------------------------------------------ | ---------------------------------- | --------------- + null | `null` | Null | 0xF6 + boolean | `true` | True | 0xF5 + boolean | `false` | False | 0xF4 + number_integer | -9223372036854775808..-2147483649 | Negative integer (8 bytes follow) | 0x3B + number_integer | -2147483648..-32769 | Negative integer (4 bytes follow) | 0x3A + number_integer | -32768..-129 | Negative integer (2 bytes follow) | 0x39 + number_integer | -128..-25 | Negative integer (1 byte follow) | 0x38 + number_integer | -24..-1 | Negative integer | 0x20..0x37 + number_integer | 0..23 | Integer | 0x00..0x17 + number_integer | 24..255 | Unsigned integer (1 byte follow) | 0x18 + number_integer | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 + number_integer | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A + number_integer | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B + number_unsigned | 0..23 | Integer | 0x00..0x17 + number_unsigned | 24..255 | Unsigned integer (1 byte follow) | 0x18 + number_unsigned | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 + number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A + number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B + number_float | *any value* | Double-Precision Float | 0xFB + string | *length*: 0..23 | UTF-8 string | 0x60..0x77 + string | *length*: 23..255 | UTF-8 string (1 byte follow) | 0x78 + string | *length*: 256..65535 | UTF-8 string (2 bytes follow) | 0x79 + string | *length*: 65536..4294967295 | UTF-8 string (4 bytes follow) | 0x7A + string | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow) | 0x7B + array | *size*: 0..23 | array | 0x80..0x97 + array | *size*: 23..255 | array (1 byte follow) | 0x98 + array | *size*: 256..65535 | array (2 bytes follow) | 0x99 + array | *size*: 65536..4294967295 | array (4 bytes follow) | 0x9A + array | *size*: 4294967296..18446744073709551615 | array (8 bytes follow) | 0x9B + object | *size*: 0..23 | map | 0xA0..0xB7 + object | *size*: 23..255 | map (1 byte follow) | 0xB8 + object | *size*: 256..65535 | map (2 bytes follow) | 0xB9 + object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xBA + object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xBB + + @note The mapping is **complete** in the sense that any JSON value type + can be converted to a CBOR value. + + @note If NaN or Infinity are stored inside a JSON number, they are + serialized properly. This behavior differs from the @ref dump() + function which serializes NaN or Infinity to `null`. + + @note The following CBOR types are not used in the conversion: + - byte strings (0x40..0x5F) + - UTF-8 strings terminated by "break" (0x7F) + - arrays terminated by "break" (0x9F) + - maps terminated by "break" (0xBF) + - date/time (0xC0..0xC1) + - bignum (0xC2..0xC3) + - decimal fraction (0xC4) + - bigfloat (0xC5) + - tagged items (0xC6..0xD4, 0xD8..0xDB) + - expected conversions (0xD5..0xD7) + - simple values (0xE0..0xF3, 0xF8) + - undefined (0xF7) + - half and single-precision floats (0xF9-0xFA) + - break (0xFF) + + @param[in] j JSON value to serialize + @return MessagePack serialization as byte vector + + @complexity Linear in the size of the JSON value @a j. + + @liveexample{The example shows the serialization of a JSON value to a byte + vector in CBOR format.,to_cbor} + + @sa http://cbor.io + @sa @ref from_cbor(const std::vector<uint8_t>&, const size_t) for the + analogous deserialization + @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + + @since version 2.0.9 + */ + static std::vector<uint8_t> to_cbor(const basic_json& j) + { + std::vector<uint8_t> result; + to_cbor(j, result); + return result; + } + + static void to_cbor(const basic_json& j, detail::output_adapter<uint8_t> o) + { + binary_writer<uint8_t>(o).write_cbor(j); + } + + static void to_cbor(const basic_json& j, detail::output_adapter<char> o) + { + binary_writer<char>(o).write_cbor(j); + } + + /*! + @brief create a MessagePack serialization of a given JSON value + + Serializes a given JSON value @a j to a byte vector using the MessagePack + serialization format. MessagePack is a binary serialization format which + aims to be more compact than JSON itself, yet more efficient to parse. + + The library uses the following mapping from JSON values types to + MessagePack types according to the MessagePack specification: + + JSON value type | value/range | MessagePack type | first byte + --------------- | --------------------------------- | ---------------- | ---------- + null | `null` | nil | 0xC0 + boolean | `true` | true | 0xC3 + boolean | `false` | false | 0xC2 + number_integer | -9223372036854775808..-2147483649 | int64 | 0xD3 + number_integer | -2147483648..-32769 | int32 | 0xD2 + number_integer | -32768..-129 | int16 | 0xD1 + number_integer | -128..-33 | int8 | 0xD0 + number_integer | -32..-1 | negative fixint | 0xE0..0xFF + number_integer | 0..127 | positive fixint | 0x00..0x7F + number_integer | 128..255 | uint 8 | 0xCC + number_integer | 256..65535 | uint 16 | 0xCD + number_integer | 65536..4294967295 | uint 32 | 0xCE + number_integer | 4294967296..18446744073709551615 | uint 64 | 0xCF + number_unsigned | 0..127 | positive fixint | 0x00..0x7F + number_unsigned | 128..255 | uint 8 | 0xCC + number_unsigned | 256..65535 | uint 16 | 0xCD + number_unsigned | 65536..4294967295 | uint 32 | 0xCE + number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xCF + number_float | *any value* | float 64 | 0xCB + string | *length*: 0..31 | fixstr | 0xA0..0xBF + string | *length*: 32..255 | str 8 | 0xD9 + string | *length*: 256..65535 | str 16 | 0xDA + string | *length*: 65536..4294967295 | str 32 | 0xDB + array | *size*: 0..15 | fixarray | 0x90..0x9F + array | *size*: 16..65535 | array 16 | 0xDC + array | *size*: 65536..4294967295 | array 32 | 0xDD + object | *size*: 0..15 | fix map | 0x80..0x8F + object | *size*: 16..65535 | map 16 | 0xDE + object | *size*: 65536..4294967295 | map 32 | 0xDF + + @note The mapping is **complete** in the sense that any JSON value type + can be converted to a MessagePack value. + + @note The following values can **not** be converted to a MessagePack value: + - strings with more than 4294967295 bytes + - arrays with more than 4294967295 elements + - objects with more than 4294967295 elements + + @note The following MessagePack types are not used in the conversion: + - bin 8 - bin 32 (0xC4..0xC6) + - ext 8 - ext 32 (0xC7..0xC9) + - float 32 (0xCA) + - fixext 1 - fixext 16 (0xD4..0xD8) + + @note Any MessagePack output created @ref to_msgpack can be successfully + parsed by @ref from_msgpack. + + @note If NaN or Infinity are stored inside a JSON number, they are + serialized properly. This behavior differs from the @ref dump() + function which serializes NaN or Infinity to `null`. + + @param[in] j JSON value to serialize + @return MessagePack serialization as byte vector + + @complexity Linear in the size of the JSON value @a j. + + @liveexample{The example shows the serialization of a JSON value to a byte + vector in MessagePack format.,to_msgpack} + + @sa http://msgpack.org + @sa @ref from_msgpack(const std::vector<uint8_t>&, const size_t) for the + analogous deserialization + @sa @ref to_cbor(const basic_json& for the related CBOR format + + @since version 2.0.9 + */ + static std::vector<uint8_t> to_msgpack(const basic_json& j) + { + std::vector<uint8_t> result; + to_msgpack(j, result); + return result; + } + + static void to_msgpack(const basic_json& j, detail::output_adapter<uint8_t> o) + { + binary_writer<uint8_t>(o).write_msgpack(j); + } + + static void to_msgpack(const basic_json& j, detail::output_adapter<char> o) + { + binary_writer<char>(o).write_msgpack(j); + } + + /*! + @brief create a JSON value from an input in CBOR format + + Deserializes a given input @a i to a JSON value using the CBOR (Concise + Binary Object Representation) serialization format. + + The library maps CBOR types to JSON value types as follows: + + CBOR type | JSON value type | first byte + ---------------------- | --------------- | ---------- + Integer | number_unsigned | 0x00..0x17 + Unsigned integer | number_unsigned | 0x18 + Unsigned integer | number_unsigned | 0x19 + Unsigned integer | number_unsigned | 0x1A + Unsigned integer | number_unsigned | 0x1B + Negative integer | number_integer | 0x20..0x37 + Negative integer | number_integer | 0x38 + Negative integer | number_integer | 0x39 + Negative integer | number_integer | 0x3A + Negative integer | number_integer | 0x3B + Negative integer | number_integer | 0x40..0x57 + UTF-8 string | string | 0x60..0x77 + UTF-8 string | string | 0x78 + UTF-8 string | string | 0x79 + UTF-8 string | string | 0x7A + UTF-8 string | string | 0x7B + UTF-8 string | string | 0x7F + array | array | 0x80..0x97 + array | array | 0x98 + array | array | 0x99 + array | array | 0x9A + array | array | 0x9B + array | array | 0x9F + map | object | 0xA0..0xB7 + map | object | 0xB8 + map | object | 0xB9 + map | object | 0xBA + map | object | 0xBB + map | object | 0xBF + False | `false` | 0xF4 + True | `true` | 0xF5 + Nill | `null` | 0xF6 + Half-Precision Float | number_float | 0xF9 + Single-Precision Float | number_float | 0xFA + Double-Precision Float | number_float | 0xFB + + @warning The mapping is **incomplete** in the sense that not all CBOR + types can be converted to a JSON value. The following CBOR types + are not supported and will yield parse errors (parse_error.112): + - byte strings (0x40..0x5F) + - date/time (0xC0..0xC1) + - bignum (0xC2..0xC3) + - decimal fraction (0xC4) + - bigfloat (0xC5) + - tagged items (0xC6..0xD4, 0xD8..0xDB) + - expected conversions (0xD5..0xD7) + - simple values (0xE0..0xF3, 0xF8) + - undefined (0xF7) + + @warning CBOR allows map keys of any type, whereas JSON only allows + strings as keys in object values. Therefore, CBOR maps with keys + other than UTF-8 strings are rejected (parse_error.113). + + @note Any CBOR output created @ref to_cbor can be successfully parsed by + @ref from_cbor. + + @param[in] i an input in CBOR format convertible to an input adapter + @param[in] strict whether to expect the input to be consumed until EOF + (true by default) + @return deserialized JSON value + + @throw parse_error.110 if the given input ends prematurely or the end of + file was not reached when @a strict was set to true + @throw parse_error.112 if unsupported features from CBOR were + used in the given input @a v or if the input is not valid CBOR + @throw parse_error.113 if a string was expected as map key, but not found + + @complexity Linear in the size of the input @a i. + + @liveexample{The example shows the deserialization of a byte vector in CBOR + format to a JSON value.,from_cbor} + + @sa http://cbor.io + @sa @ref to_cbor(const basic_json&) for the analogous serialization + @sa @ref from_msgpack(detail::input_adapter, const bool) for the + related MessagePack format + + @since version 2.0.9; parameter @a start_index since 2.1.1; changed to + consume input adapters, removed start_index parameter, and added + @a strict parameter since 3.0.0 + */ + static basic_json from_cbor(detail::input_adapter i, + const bool strict = true) + { + return binary_reader(i).parse_cbor(strict); + } + + /*! + @copydoc from_cbor(detail::input_adapter, const bool) + */ + template<typename A1, typename A2, + detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0> + static basic_json from_cbor(A1 && a1, A2 && a2, const bool strict = true) + { + return binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).parse_cbor(strict); + } + + /*! + @brief create a JSON value from an input in MessagePack format + + Deserializes a given input @a i to a JSON value using the MessagePack + serialization format. + + The library maps MessagePack types to JSON value types as follows: + + MessagePack type | JSON value type | first byte + ---------------- | --------------- | ---------- + positive fixint | number_unsigned | 0x00..0x7F + fixmap | object | 0x80..0x8F + fixarray | array | 0x90..0x9F + fixstr | string | 0xA0..0xBF + nil | `null` | 0xC0 + false | `false` | 0xC2 + true | `true` | 0xC3 + float 32 | number_float | 0xCA + float 64 | number_float | 0xCB + uint 8 | number_unsigned | 0xCC + uint 16 | number_unsigned | 0xCD + uint 32 | number_unsigned | 0xCE + uint 64 | number_unsigned | 0xCF + int 8 | number_integer | 0xD0 + int 16 | number_integer | 0xD1 + int 32 | number_integer | 0xD2 + int 64 | number_integer | 0xD3 + str 8 | string | 0xD9 + str 16 | string | 0xDA + str 32 | string | 0xDB + array 16 | array | 0xDC + array 32 | array | 0xDD + map 16 | object | 0xDE + map 32 | object | 0xDF + negative fixint | number_integer | 0xE0-0xFF + + @warning The mapping is **incomplete** in the sense that not all + MessagePack types can be converted to a JSON value. The following + MessagePack types are not supported and will yield parse errors: + - bin 8 - bin 32 (0xC4..0xC6) + - ext 8 - ext 32 (0xC7..0xC9) + - fixext 1 - fixext 16 (0xD4..0xD8) + + @note Any MessagePack output created @ref to_msgpack can be successfully + parsed by @ref from_msgpack. + + @param[in] i an input in MessagePack format convertible to an input + adapter + @param[in] strict whether to expect the input to be consumed until EOF + (true by default) + + @throw parse_error.110 if the given input ends prematurely or the end of + file was not reached when @a strict was set to true + @throw parse_error.112 if unsupported features from MessagePack were + used in the given input @a i or if the input is not valid MessagePack + @throw parse_error.113 if a string was expected as map key, but not found + + @complexity Linear in the size of the input @a i. + + @liveexample{The example shows the deserialization of a byte vector in + MessagePack format to a JSON value.,from_msgpack} + + @sa http://msgpack.org + @sa @ref to_msgpack(const basic_json&) for the analogous serialization + @sa @ref from_cbor(detail::input_adapter, const bool) for the related CBOR + format + + @since version 2.0.9; parameter @a start_index since 2.1.1; changed to + consume input adapters, removed start_index parameter, and added + @a strict parameter since 3.0.0 + */ + static basic_json from_msgpack(detail::input_adapter i, + const bool strict = true) + { + return binary_reader(i).parse_msgpack(strict); + } + + /*! + @copydoc from_msgpack(detail::input_adapter, const bool) + */ + template<typename A1, typename A2, + detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0> + static basic_json from_msgpack(A1 && a1, A2 && a2, const bool strict = true) + { + return binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).parse_msgpack(strict); + } + + /// @} + + ////////////////////////// + // JSON Pointer support // + ////////////////////////// + + /// @name JSON Pointer functions + /// @{ + + /*! + @brief access specified element via JSON Pointer + + Uses a JSON pointer to retrieve a reference to the respective JSON value. + No bound checking is performed. Similar to @ref operator[](const typename + object_t::key_type&), `null` values are created in arrays and objects if + necessary. + + In particular: + - If the JSON pointer points to an object key that does not exist, it + is created an filled with a `null` value before a reference to it + is returned. + - If the JSON pointer points to an array index that does not exist, it + is created an filled with a `null` value before a reference to it + is returned. All indices between the current maximum and the given + index are also filled with `null`. + - The special value `-` is treated as a synonym for the index past the + end. + + @param[in] ptr a JSON pointer + + @return reference to the element pointed to by @a ptr + + @complexity Constant. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.404 if the JSON pointer can not be resolved + + @liveexample{The behavior is shown in the example.,operatorjson_pointer} + + @since version 2.0.0 + */ + reference operator[](const json_pointer& ptr) + { + return ptr.get_unchecked(this); + } + + /*! + @brief access specified element via JSON Pointer + + Uses a JSON pointer to retrieve a reference to the respective JSON value. + No bound checking is performed. The function does not change the JSON + value; no `null` values are created. In particular, the the special value + `-` yields an exception. + + @param[in] ptr JSON pointer to the desired element + + @return const reference to the element pointed to by @a ptr + + @complexity Constant. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + + @liveexample{The behavior is shown in the example.,operatorjson_pointer_const} + + @since version 2.0.0 + */ + const_reference operator[](const json_pointer& ptr) const + { + return ptr.get_unchecked(this); + } + + /*! + @brief access specified element via JSON Pointer + + Returns a reference to the element at with specified JSON pointer @a ptr, + with bounds checking. + + @param[in] ptr JSON pointer to the desired element + + @return reference to the element pointed to by @a ptr + + @throw parse_error.106 if an array index in the passed JSON pointer @a ptr + begins with '0'. See example below. + + @throw parse_error.109 if an array index in the passed JSON pointer @a ptr + is not a number. See example below. + + @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr + is out of range. See example below. + + @throw out_of_range.402 if the array index '-' is used in the passed JSON + pointer @a ptr. As `at` provides checked access (and no elements are + implicitly inserted), the index '-' is always invalid. See example below. + + @throw out_of_range.403 if the JSON pointer describes a key of an object + which cannot be found. See example below. + + @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. + See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 2.0.0 + + @liveexample{The behavior is shown in the example.,at_json_pointer} + */ + reference at(const json_pointer& ptr) + { + return ptr.get_checked(this); + } + + /*! + @brief access specified element via JSON Pointer + + Returns a const reference to the element at with specified JSON pointer @a + ptr, with bounds checking. + + @param[in] ptr JSON pointer to the desired element + + @return reference to the element pointed to by @a ptr + + @throw parse_error.106 if an array index in the passed JSON pointer @a ptr + begins with '0'. See example below. + + @throw parse_error.109 if an array index in the passed JSON pointer @a ptr + is not a number. See example below. + + @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr + is out of range. See example below. + + @throw out_of_range.402 if the array index '-' is used in the passed JSON + pointer @a ptr. As `at` provides checked access (and no elements are + implicitly inserted), the index '-' is always invalid. See example below. + + @throw out_of_range.403 if the JSON pointer describes a key of an object + which cannot be found. See example below. + + @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. + See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 2.0.0 + + @liveexample{The behavior is shown in the example.,at_json_pointer_const} + */ + const_reference at(const json_pointer& ptr) const + { + return ptr.get_checked(this); + } + + /*! + @brief return flattened JSON value + + The function creates a JSON object whose keys are JSON pointers (see [RFC + 6901](https://tools.ietf.org/html/rfc6901)) and whose values are all + primitive. The original JSON value can be restored using the @ref + unflatten() function. + + @return an object that maps JSON pointers to primitive values + + @note Empty objects and arrays are flattened to `null` and will not be + reconstructed correctly by the @ref unflatten() function. + + @complexity Linear in the size the JSON value. + + @liveexample{The following code shows how a JSON object is flattened to an + object whose keys consist of JSON pointers.,flatten} + + @sa @ref unflatten() for the reverse function + + @since version 2.0.0 + */ + basic_json flatten() const + { + basic_json result(value_t::object); + json_pointer::flatten("", *this, result); + return result; + } + + /*! + @brief unflatten a previously flattened JSON value + + The function restores the arbitrary nesting of a JSON value that has been + flattened before using the @ref flatten() function. The JSON value must + meet certain constraints: + 1. The value must be an object. + 2. The keys must be JSON pointers (see + [RFC 6901](https://tools.ietf.org/html/rfc6901)) + 3. The mapped values must be primitive JSON types. + + @return the original JSON from a flattened version + + @note Empty objects and arrays are flattened by @ref flatten() to `null` + values and can not unflattened to their original type. Apart from + this example, for a JSON value `j`, the following is always true: + `j == j.flatten().unflatten()`. + + @complexity Linear in the size the JSON value. + + @throw type_error.314 if value is not an object + @throw type_error.315 if object values are not primitive + + @liveexample{The following code shows how a flattened JSON object is + unflattened into the original nested JSON object.,unflatten} + + @sa @ref flatten() for the reverse function + + @since version 2.0.0 + */ + basic_json unflatten() const + { + return json_pointer::unflatten(*this); + } + + /// @} + + ////////////////////////// + // JSON Patch functions // + ////////////////////////// + + /// @name JSON Patch functions + /// @{ + + /*! + @brief applies a JSON patch + + [JSON Patch](http://jsonpatch.com) defines a JSON document structure for + expressing a sequence of operations to apply to a JSON) document. With + this function, a JSON Patch is applied to the current JSON value by + executing all operations from the patch. + + @param[in] json_patch JSON patch document + @return patched document + + @note The application of a patch is atomic: Either all operations succeed + and the patched document is returned or an exception is thrown. In + any case, the original value is not changed: the patch is applied + to a copy of the value. + + @throw parse_error.104 if the JSON patch does not consist of an array of + objects + + @throw parse_error.105 if the JSON patch is malformed (e.g., mandatory + attributes are missing); example: `"operation add must have member path"` + + @throw out_of_range.401 if an array index is out of range. + + @throw out_of_range.403 if a JSON pointer inside the patch could not be + resolved successfully in the current JSON value; example: `"key baz not + found"` + + @throw out_of_range.405 if JSON pointer has no parent ("add", "remove", + "move") + + @throw other_error.501 if "test" operation was unsuccessful + + @complexity Linear in the size of the JSON value and the length of the + JSON patch. As usually only a fraction of the JSON value is affected by + the patch, the complexity can usually be neglected. + + @liveexample{The following code shows how a JSON patch is applied to a + value.,patch} + + @sa @ref diff -- create a JSON patch by comparing two JSON values + + @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) + @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901) + + @since version 2.0.0 + */ + basic_json patch(const basic_json& json_patch) const + { + // make a working copy to apply the patch to + basic_json result = *this; + + // the valid JSON Patch operations + enum class patch_operations {add, remove, replace, move, copy, test, invalid}; + + const auto get_op = [](const std::string & op) + { + if (op == "add") + { + return patch_operations::add; + } + if (op == "remove") + { + return patch_operations::remove; + } + if (op == "replace") + { + return patch_operations::replace; + } + if (op == "move") + { + return patch_operations::move; + } + if (op == "copy") + { + return patch_operations::copy; + } + if (op == "test") + { + return patch_operations::test; + } + + return patch_operations::invalid; + }; + + // wrapper for "add" operation; add value at ptr + const auto operation_add = [&result](json_pointer & ptr, basic_json val) + { + // adding to the root of the target document means replacing it + if (ptr.is_root()) + { + result = val; + } + else + { + // make sure the top element of the pointer exists + json_pointer top_pointer = ptr.top(); + if (top_pointer != ptr) + { + result.at(top_pointer); + } + + // get reference to parent of JSON pointer ptr + const auto last_path = ptr.pop_back(); + basic_json& parent = result[ptr]; + + switch (parent.m_type) + { + case value_t::null: + case value_t::object: + { + // use operator[] to add value + parent[last_path] = val; + break; + } + + case value_t::array: + { + if (last_path == "-") + { + // special case: append to back + parent.push_back(val); + } + else + { + const auto idx = json_pointer::array_index(last_path); + if (JSON_UNLIKELY(static_cast<size_type>(idx) > parent.size())) + { + // avoid undefined behavior + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range")); + } + else + { + // default case: insert add offset + parent.insert(parent.begin() + static_cast<difference_type>(idx), val); + } + } + break; + } + + default: + { + // if there exists a parent it cannot be primitive + assert(false); // LCOV_EXCL_LINE + } + } + } + }; + + // wrapper for "remove" operation; remove value at ptr + const auto operation_remove = [&result](json_pointer & ptr) + { + // get reference to parent of JSON pointer ptr + const auto last_path = ptr.pop_back(); + basic_json& parent = result.at(ptr); + + // remove child + if (parent.is_object()) + { + // perform range check + auto it = parent.find(last_path); + if (JSON_LIKELY(it != parent.end())) + { + parent.erase(it); + } + else + { + JSON_THROW(out_of_range::create(403, "key '" + last_path + "' not found")); + } + } + else if (parent.is_array()) + { + // note erase performs range check + parent.erase(static_cast<size_type>(json_pointer::array_index(last_path))); + } + }; + + // type check: top level value must be an array + if (JSON_UNLIKELY(not json_patch.is_array())) + { + JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects")); + } + + // iterate and apply the operations + for (const auto& val : json_patch) + { + // wrapper to get a value for an operation + const auto get_value = [&val](const std::string & op, + const std::string & member, + bool string_type) -> basic_json& + { + // find value + auto it = val.m_value.object->find(member); + + // context-sensitive error message + const auto error_msg = (op == "op") ? "operation" : "operation '" + op + "'"; + + // check if desired value is present + if (JSON_UNLIKELY(it == val.m_value.object->end())) + { + JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'")); + } + + // check if result is of type string + if (JSON_UNLIKELY(string_type and not it->second.is_string())) + { + JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'")); + } + + // no error: return value + return it->second; + }; + + // type check: every element of the array must be an object + if (JSON_UNLIKELY(not val.is_object())) + { + JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects")); + } + + // collect mandatory members + const std::string op = get_value("op", "op", true); + const std::string path = get_value(op, "path", true); + json_pointer ptr(path); + + switch (get_op(op)) + { + case patch_operations::add: + { + operation_add(ptr, get_value("add", "value", false)); + break; + } + + case patch_operations::remove: + { + operation_remove(ptr); + break; + } + + case patch_operations::replace: + { + // the "path" location must exist - use at() + result.at(ptr) = get_value("replace", "value", false); + break; + } + + case patch_operations::move: + { + const std::string from_path = get_value("move", "from", true); + json_pointer from_ptr(from_path); + + // the "from" location must exist - use at() + basic_json v = result.at(from_ptr); + + // The move operation is functionally identical to a + // "remove" operation on the "from" location, followed + // immediately by an "add" operation at the target + // location with the value that was just removed. + operation_remove(from_ptr); + operation_add(ptr, v); + break; + } + + case patch_operations::copy: + { + const std::string from_path = get_value("copy", "from", true); + const json_pointer from_ptr(from_path); + + // the "from" location must exist - use at() + basic_json v = result.at(from_ptr); + + // The copy is functionally identical to an "add" + // operation at the target location using the value + // specified in the "from" member. + operation_add(ptr, v); + break; + } + + case patch_operations::test: + { + bool success = false; + JSON_TRY + { + // check if "value" matches the one at "path" + // the "path" location must exist - use at() + success = (result.at(ptr) == get_value("test", "value", false)); + } + JSON_CATCH (out_of_range&) + { + // ignore out of range errors: success remains false + } + + // throw an exception if test fails + if (JSON_UNLIKELY(not success)) + { + JSON_THROW(other_error::create(501, "unsuccessful: " + val.dump())); + } + + break; + } + + case patch_operations::invalid: + { + // op must be "add", "remove", "replace", "move", "copy", or + // "test" + JSON_THROW(parse_error::create(105, 0, "operation value '" + op + "' is invalid")); + } + } + } + + return result; + } + + /*! + @brief creates a diff as a JSON patch + + Creates a [JSON Patch](http://jsonpatch.com) so that value @a source can + be changed into the value @a target by calling @ref patch function. + + @invariant For two JSON values @a source and @a target, the following code + yields always `true`: + @code {.cpp} + source.patch(diff(source, target)) == target; + @endcode + + @note Currently, only `remove`, `add`, and `replace` operations are + generated. + + @param[in] source JSON value to compare from + @param[in] target JSON value to compare against + @param[in] path helper value to create JSON pointers + + @return a JSON patch to convert the @a source to @a target + + @complexity Linear in the lengths of @a source and @a target. + + @liveexample{The following code shows how a JSON patch is created as a + diff for two JSON values.,diff} + + @sa @ref patch -- apply a JSON patch + + @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) + + @since version 2.0.0 + */ + static basic_json diff(const basic_json& source, const basic_json& target, + const std::string& path = "") + { + // the patch + basic_json result(value_t::array); + + // if the values are the same, return empty patch + if (source == target) + { + return result; + } + + if (source.type() != target.type()) + { + // different types: replace value + result.push_back( + { + {"op", "replace"}, {"path", path}, {"value", target} + }); + } + else + { + switch (source.type()) + { + case value_t::array: + { + // first pass: traverse common elements + std::size_t i = 0; + while (i < source.size() and i < target.size()) + { + // recursive call to compare array values at index i + auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i)); + result.insert(result.end(), temp_diff.begin(), temp_diff.end()); + ++i; + } + + // i now reached the end of at least one array + // in a second pass, traverse the remaining elements + + // remove my remaining elements + const auto end_index = static_cast<difference_type>(result.size()); + while (i < source.size()) + { + // add operations in reverse order to avoid invalid + // indices + result.insert(result.begin() + end_index, object( + { + {"op", "remove"}, + {"path", path + "/" + std::to_string(i)} + })); + ++i; + } + + // add other remaining elements + while (i < target.size()) + { + result.push_back( + { + {"op", "add"}, + {"path", path + "/" + std::to_string(i)}, + {"value", target[i]} + }); + ++i; + } + + break; + } + + case value_t::object: + { + // first pass: traverse this object's elements + for (auto it = source.cbegin(); it != source.cend(); ++it) + { + // escape the key name to be used in a JSON patch + const auto key = json_pointer::escape(it.key()); + + if (target.find(it.key()) != target.end()) + { + // recursive call to compare object values at key it + auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); + result.insert(result.end(), temp_diff.begin(), temp_diff.end()); + } + else + { + // found a key that is not in o -> remove it + result.push_back(object( + { + {"op", "remove"}, {"path", path + "/" + key} + })); + } + } + + // second pass: traverse other object's elements + for (auto it = target.cbegin(); it != target.cend(); ++it) + { + if (source.find(it.key()) == source.end()) + { + // found a key that is not in this -> add it + const auto key = json_pointer::escape(it.key()); + result.push_back( + { + {"op", "add"}, {"path", path + "/" + key}, + {"value", it.value()} + }); + } + } + + break; + } + + default: + { + // both primitive type: replace value + result.push_back( + { + {"op", "replace"}, {"path", path}, {"value", target} + }); + break; + } + } + } + + return result; + } + + /// @} +}; + +///////////// +// presets // +///////////// + +/*! +@brief default JSON class + +This type is the default specialization of the @ref basic_json class which +uses the standard template types. + +@since version 1.0.0 +*/ +using json = basic_json<>; + +////////////////// +// json_pointer // +////////////////// + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +NLOHMANN_BASIC_JSON_TPL& +json_pointer::get_and_create(NLOHMANN_BASIC_JSON_TPL& j) const +{ + using size_type = typename NLOHMANN_BASIC_JSON_TPL::size_type; + auto result = &j; + + // in case no reference tokens exist, return a reference to the JSON value + // j which will be overwritten by a primitive value + for (const auto& reference_token : reference_tokens) + { + switch (result->m_type) + { + case detail::value_t::null: + { + if (reference_token == "0") + { + // start a new array if reference token is 0 + result = &result->operator[](0); + } + else + { + // start a new object otherwise + result = &result->operator[](reference_token); + } + break; + } + + case detail::value_t::object: + { + // create an entry in the object + result = &result->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + // create an entry in the array + JSON_TRY + { + result = &result->operator[](static_cast<size_type>(array_index(reference_token))); + } + JSON_CATCH(std::invalid_argument&) + { + JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number")); + } + break; + } + + /* + The following code is only reached if there exists a reference + token _and_ the current value is primitive. In this case, we have + an error situation, because primitive values may only occur as + single value; that is, with an empty list of reference tokens. + */ + default: + JSON_THROW(detail::type_error::create(313, "invalid value to unflatten")); + } + } + + return *result; +} + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +NLOHMANN_BASIC_JSON_TPL& +json_pointer::get_unchecked(NLOHMANN_BASIC_JSON_TPL* ptr) const +{ + using size_type = typename NLOHMANN_BASIC_JSON_TPL::size_type; + for (const auto& reference_token : reference_tokens) + { + // convert null values to arrays or objects before continuing + if (ptr->m_type == detail::value_t::null) + { + // check if reference token is a number + const bool nums = + std::all_of(reference_token.begin(), reference_token.end(), + [](const char x) + { + return (x >= '0' and x <= '9'); + }); + + // change value to array for numbers or "-" or to object otherwise + *ptr = (nums or reference_token == "-") + ? detail::value_t::array + : detail::value_t::object; + } + + switch (ptr->m_type) + { + case detail::value_t::object: + { + // use unchecked object access + ptr = &ptr->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + // error condition (cf. RFC 6901, Sect. 4) + if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0')) + { + JSON_THROW(detail::parse_error::create(106, 0, + "array index '" + reference_token + + "' must not begin with '0'")); + } + + if (reference_token == "-") + { + // explicitly treat "-" as index beyond the end + ptr = &ptr->operator[](ptr->m_value.array->size()); + } + else + { + // convert array index to number; unchecked access + JSON_TRY + { + ptr = &ptr->operator[]( + static_cast<size_type>(array_index(reference_token))); + } + JSON_CATCH(std::invalid_argument&) + { + JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number")); + } + } + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'")); + } + } + + return *ptr; +} + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +NLOHMANN_BASIC_JSON_TPL& +json_pointer::get_checked(NLOHMANN_BASIC_JSON_TPL* ptr) const +{ + using size_type = typename NLOHMANN_BASIC_JSON_TPL::size_type; + for (const auto& reference_token : reference_tokens) + { + switch (ptr->m_type) + { + case detail::value_t::object: + { + // note: at performs range check + ptr = &ptr->at(reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_UNLIKELY(reference_token == "-")) + { + // "-" always fails the range check + JSON_THROW(detail::out_of_range::create(402, + "array index '-' (" + std::to_string(ptr->m_value.array->size()) + + ") is out of range")); + } + + // error condition (cf. RFC 6901, Sect. 4) + if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0')) + { + JSON_THROW(detail::parse_error::create(106, 0, + "array index '" + reference_token + + "' must not begin with '0'")); + } + + // note: at performs range check + JSON_TRY + { + ptr = &ptr->at(static_cast<size_type>(array_index(reference_token))); + } + JSON_CATCH(std::invalid_argument&) + { + JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number")); + } + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'")); + } + } + + return *ptr; +} + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +const NLOHMANN_BASIC_JSON_TPL& +json_pointer::get_unchecked(const NLOHMANN_BASIC_JSON_TPL* ptr) const +{ + using size_type = typename NLOHMANN_BASIC_JSON_TPL::size_type; + for (const auto& reference_token : reference_tokens) + { + switch (ptr->m_type) + { + case detail::value_t::object: + { + // use unchecked object access + ptr = &ptr->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_UNLIKELY(reference_token == "-")) + { + // "-" cannot be used for const access + JSON_THROW(detail::out_of_range::create(402, + "array index '-' (" + std::to_string(ptr->m_value.array->size()) + + ") is out of range")); + } + + // error condition (cf. RFC 6901, Sect. 4) + if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0')) + { + JSON_THROW(detail::parse_error::create(106, 0, + "array index '" + reference_token + + "' must not begin with '0'")); + } + + // use unchecked array access + JSON_TRY + { + ptr = &ptr->operator[]( + static_cast<size_type>(array_index(reference_token))); + } + JSON_CATCH(std::invalid_argument&) + { + JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number")); + } + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'")); + } + } + + return *ptr; +} + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +const NLOHMANN_BASIC_JSON_TPL& +json_pointer::get_checked(const NLOHMANN_BASIC_JSON_TPL* ptr) const +{ + using size_type = typename NLOHMANN_BASIC_JSON_TPL::size_type; + for (const auto& reference_token : reference_tokens) + { + switch (ptr->m_type) + { + case detail::value_t::object: + { + // note: at performs range check + ptr = &ptr->at(reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_UNLIKELY(reference_token == "-")) + { + // "-" always fails the range check + JSON_THROW(detail::out_of_range::create(402, + "array index '-' (" + std::to_string(ptr->m_value.array->size()) + + ") is out of range")); + } + + // error condition (cf. RFC 6901, Sect. 4) + if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0')) + { + JSON_THROW(detail::parse_error::create(106, 0, + "array index '" + reference_token + + "' must not begin with '0'")); + } + + // note: at performs range check + JSON_TRY + { + ptr = &ptr->at(static_cast<size_type>(array_index(reference_token))); + } + JSON_CATCH(std::invalid_argument&) + { + JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number")); + } + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'")); + } + } + + return *ptr; +} + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +void json_pointer::flatten(const std::string& reference_string, + const NLOHMANN_BASIC_JSON_TPL& value, + NLOHMANN_BASIC_JSON_TPL& result) +{ + switch (value.m_type) + { + case detail::value_t::array: + { + if (value.m_value.array->empty()) + { + // flatten empty array as null + result[reference_string] = nullptr; + } + else + { + // iterate array and use index as reference string + for (std::size_t i = 0; i < value.m_value.array->size(); ++i) + { + flatten(reference_string + "/" + std::to_string(i), + value.m_value.array->operator[](i), result); + } + } + break; + } + + case detail::value_t::object: + { + if (value.m_value.object->empty()) + { + // flatten empty object as null + result[reference_string] = nullptr; + } + else + { + // iterate object and use keys as reference string + for (const auto& element : *value.m_value.object) + { + flatten(reference_string + "/" + escape(element.first), element.second, result); + } + } + break; + } + + default: + { + // add primitive value with its reference string + result[reference_string] = value; + break; + } + } +} + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +NLOHMANN_BASIC_JSON_TPL +json_pointer::unflatten(const NLOHMANN_BASIC_JSON_TPL& value) +{ + if (JSON_UNLIKELY(not value.is_object())) + { + JSON_THROW(detail::type_error::create(314, "only objects can be unflattened")); + } + + NLOHMANN_BASIC_JSON_TPL result; + + // iterate the JSON object values + for (const auto& element : *value.m_value.object) + { + if (JSON_UNLIKELY(not element.second.is_primitive())) + { + JSON_THROW(detail::type_error::create(315, "values in object must be primitive")); + } + + // assign value to reference pointed to by JSON pointer; Note that if + // the JSON pointer is "" (i.e., points to the whole value), function + // get_and_create returns a reference to result itself. An assignment + // will then create a primitive value. + json_pointer(element.first).get_and_create(result) = element.second; + } + + return result; +} + +inline bool operator==(json_pointer const& lhs, json_pointer const& rhs) noexcept +{ + return (lhs.reference_tokens == rhs.reference_tokens); +} + +inline bool operator!=(json_pointer const& lhs, json_pointer const& rhs) noexcept +{ + return not (lhs == rhs); +} +} // namespace nlohmann + + +/////////////////////// +// nonmember support // +/////////////////////// + +// specialization of std::swap, and std::hash +namespace std +{ +/*! +@brief exchanges the values of two JSON objects + +@since version 1.0.0 +*/ +template<> +inline void swap(nlohmann::json& j1, + nlohmann::json& j2) noexcept( + is_nothrow_move_constructible<nlohmann::json>::value and + is_nothrow_move_assignable<nlohmann::json>::value + ) +{ + j1.swap(j2); +} + +/// hash value for JSON objects +template<> +struct hash<nlohmann::json> +{ + /*! + @brief return a hash value for a JSON object + + @since version 1.0.0 + */ + std::size_t operator()(const nlohmann::json& j) const + { + // a naive hashing via the string representation + const auto& h = hash<nlohmann::json::string_t>(); + return h(j.dump()); + } +}; + +/// specialization for std::less<value_t> +/// @note: do not remove the space after '<', +/// see https://github.com/nlohmann/json/pull/679 +template<> +struct less< ::nlohmann::detail::value_t> +{ + /*! + @brief compare two value_t enum values + @since version 3.0.0 + */ + bool operator()(nlohmann::detail::value_t lhs, + nlohmann::detail::value_t rhs) const noexcept + { + return nlohmann::detail::operator<(lhs, rhs); + } +}; + +} // namespace std + +/*! +@brief user-defined string literal for JSON values + +This operator implements a user-defined string literal for JSON objects. It +can be used by adding `"_json"` to a string literal and returns a JSON object +if no parse error occurred. + +@param[in] s a string representation of a JSON object +@param[in] n the length of string @a s +@return a JSON object + +@since version 1.0.0 +*/ +inline nlohmann::json operator "" _json(const char* s, std::size_t n) +{ + return nlohmann::json::parse(s, s + n); +} + +/*! +@brief user-defined string literal for JSON pointer + +This operator implements a user-defined string literal for JSON Pointers. It +can be used by adding `"_json_pointer"` to a string literal and returns a JSON pointer +object if no parse error occurred. + +@param[in] s a string representation of a JSON Pointer +@param[in] n the length of string @a s +@return a JSON pointer object + +@since version 2.0.0 +*/ +inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std::size_t n) +{ + return nlohmann::json::json_pointer(std::string(s, n)); +} + +// restore GCC/clang diagnostic settings +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #pragma GCC diagnostic pop +#endif +#if defined(__clang__) + #pragma GCC diagnostic pop +#endif + +// clean up +#undef JSON_CATCH +#undef JSON_THROW +#undef JSON_TRY +#undef JSON_LIKELY +#undef JSON_UNLIKELY +#undef JSON_DEPRECATED +#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION +#undef NLOHMANN_BASIC_JSON_TPL + +#endif diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index 2a044d2edc56..7365a550e57f 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -16,9 +16,9 @@ basicTests() { clearStore clearCacheCache - nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---" + nix-env --substituters "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---" - nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '' -r $outPath + nix-store --substituters "file://$cacheDir" --no-require-sigs -r $outPath [ -x $outPath/program ] @@ -28,13 +28,13 @@ basicTests() { clearCacheCache echo "WantMassQuery: 1" >> $cacheDir/nix-cache-info - nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S" - nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S" + nix-env --substituters "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S" + nix-env --substituters "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S" x=$(nix-env -f dependencies.nix -qas \* --prebuilt-only) [ -z "$x" ] - nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '' -r $outPath + nix-store --substituters "file://$cacheDir" --no-require-sigs -r $outPath nix-store --check-validity $outPath nix-store -qR $outPath | grep input-2 @@ -63,7 +63,7 @@ mv $nar $nar.good mkdir -p $TEST_ROOT/empty nix-store --dump $TEST_ROOT/empty | xz > $nar -nix-build --option binary-caches "file://$cacheDir" --option signed-binary-caches '' dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log +nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log grep -q "hash mismatch" $TEST_ROOT/log mv $nar.good $nar @@ -73,7 +73,7 @@ mv $nar.good $nar clearStore clearCacheCache -if nix-store --option binary-caches "file://$cacheDir" -r $outPath; then +if nix-store --substituters "file://$cacheDir" -r $outPath; then echo "unsigned binary cache incorrectly accepted" exit 1 fi @@ -83,12 +83,12 @@ fi # corresponding NAR has disappeared. clearStore -nix-build --option binary-caches "file://$cacheDir" dependencies.nix --dry-run # get info +nix-build --substituters "file://$cacheDir" dependencies.nix --dry-run # get info mkdir $cacheDir/tmp mv $cacheDir/*.nar* $cacheDir/tmp/ -NIX_DEBUG_SUBST=1 nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result --fallback +NIX_DEBUG_SUBST=1 nix-build --substituters "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result --fallback mv $cacheDir/tmp/* $cacheDir/ @@ -99,7 +99,7 @@ clearStore rm $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo) -nix-build --option binary-caches "file://$cacheDir" --option signed-binary-caches '' dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log +nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log grep -q "copying path" $TEST_ROOT/log @@ -124,18 +124,18 @@ nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath clearStore clearCacheCache -(! nix-store -r $outPath --option binary-caches "file://$cacheDir" --option signed-binary-caches '*' ) +(! nix-store -r $outPath --substituters "file://$cacheDir") # And it should fail if we provide an incorrect key. clearStore clearCacheCache -(! nix-store -r $outPath --option binary-caches "file://$cacheDir" --option signed-binary-caches '*' --option binary-cache-public-keys "$badKey") +(! nix-store -r $outPath --substituters "file://$cacheDir" --trusted-public-keys "$badKey") # It should succeed if we provide the correct key. -nix-store -r $outPath --option binary-caches "file://$cacheDir" --option signed-binary-caches '*' --option binary-cache-public-keys "$otherKey $publicKey" +nix-store -r $outPath --substituters "file://$cacheDir" --trusted-public-keys "$otherKey $publicKey" # It should fail if we corrupt the .narinfo. @@ -152,10 +152,10 @@ done clearCacheCache -(! nix-store -r $outPath --option binary-caches "file://$cacheDir2" --option signed-binary-caches '*' --option binary-cache-public-keys "$publicKey") +(! nix-store -r $outPath --substituters "file://$cacheDir2" --trusted-public-keys "$publicKey") # If we provide a bad and a good binary cache, it should succeed. -nix-store -r $outPath --option binary-caches "file://$cacheDir2 file://$cacheDir" --option signed-binary-caches '*' --option binary-cache-public-keys "$publicKey" +nix-store -r $outPath --substituters "file://$cacheDir2 file://$cacheDir" --trusted-public-keys "$publicKey" fi # HAVE_LIBSODIUM diff --git a/tests/brotli.sh b/tests/brotli.sh new file mode 100644 index 000000000000..645dd4214ec6 --- /dev/null +++ b/tests/brotli.sh @@ -0,0 +1,28 @@ +source common.sh + + +# Only test if we found brotli libraries +# (CLI tool is likely unavailable if libraries are missing) +if [ -n "$HAVE_BROTLI" ]; then + +clearStore +clearCache + +cacheURI="file://$cacheDir?compression=br" + +outPath=$(nix-build dependencies.nix --no-out-link) + +nix copy --to $cacheURI $outPath + +HASH=$(nix hash-path $outPath) + +clearStore +clearCacheCache + +nix copy --from $cacheURI $outPath --no-check-sigs + +HASH2=$(nix hash-path $outPath) + +[[ $HASH = $HASH2 ]] + +fi # HAVE_BROTLI diff --git a/tests/build-dry.sh b/tests/build-dry.sh new file mode 100644 index 000000000000..610e6070c5d7 --- /dev/null +++ b/tests/build-dry.sh @@ -0,0 +1,52 @@ +source common.sh + +################################################### +# Check that --dry-run isn't confused with read-only mode +# https://github.com/NixOS/nix/issues/1795 + +clearStore +clearCache + +# Ensure this builds successfully first +nix build -f dependencies.nix + +clearStore +clearCache + +# Try --dry-run using old command first +nix-build dependencies.nix --dry-run 2>&1 | grep "will be built" +# Now new command: +nix build -f dependencies.nix --dry-run 2>&1 | grep "will be built" + +# TODO: XXX: FIXME: #1793 +# Disable this part of the test until the problem is resolved: +if [ -n "$ISSUE_1795_IS_FIXED" ]; then +clearStore +clearCache + +# Try --dry-run using new command first +nix build -f dependencies.nix --dry-run 2>&1 | grep "will be built" +# Now old command: +nix-build dependencies.nix --dry-run 2>&1 | grep "will be built" +fi + +################################################### +# Check --dry-run doesn't create links with --dry-run +# https://github.com/NixOS/nix/issues/1849 +clearStore +clearCache + +RESULT=$TEST_ROOT/result-link +rm -f $RESULT + +nix-build dependencies.nix -o $RESULT --dry-run + +[[ ! -h $RESULT ]] || fail "nix-build --dry-run created output link" + +nix build -f dependencies.nix -o $RESULT --dry-run + +[[ ! -h $RESULT ]] || fail "nix build --dry-run created output link" + +nix build -f dependencies.nix -o $RESULT + +[[ -h $RESULT ]] diff --git a/tests/build-remote.sh b/tests/build-remote.sh index cf3bb4633183..9bca0f4a3856 100644 --- a/tests/build-remote.sh +++ b/tests/build-remote.sh @@ -2,7 +2,7 @@ source common.sh clearStore -if [[ $(uname) != Linux ]]; then exit; fi +if ! canUseSandbox; then exit; fi if [[ ! $SHELL =~ /nix/store ]]; then exit; fi chmod -R u+w $TEST_ROOT/store0 || true diff --git a/tests/check.nix b/tests/check.nix new file mode 100644 index 000000000000..08aac2fb0a77 --- /dev/null +++ b/tests/check.nix @@ -0,0 +1,17 @@ +with import ./config.nix; + +{ + nondeterministic = mkDerivation { + name = "nondeterministic"; + buildCommand = + '' + mkdir $out + date +%s.%N > $out/date + ''; + }; + + fetchurl = import <nix/fetchurl.nix> { + url = "file://" + toString ./lang/eval-okay-xml.exp.xml; + sha256 = "0kg4sla7ihm8ijr8cb3117fhl99zrc2bwy1jrngsfmkh8bav4m0v"; + }; +} diff --git a/tests/check.sh b/tests/check.sh new file mode 100644 index 000000000000..b05e40ffbeea --- /dev/null +++ b/tests/check.sh @@ -0,0 +1,32 @@ +source common.sh + +clearStore + +nix-build dependencies.nix --no-out-link +nix-build dependencies.nix --no-out-link --check + +nix-build check.nix -A nondeterministic --no-out-link +(! nix-build check.nix -A nondeterministic --no-out-link --check 2> $TEST_ROOT/log) +grep 'may not be deterministic' $TEST_ROOT/log + +clearStore + +nix-build dependencies.nix --no-out-link --repeat 3 + +(! nix-build check.nix -A nondeterministic --no-out-link --repeat 1 2> $TEST_ROOT/log) +grep 'differs from previous round' $TEST_ROOT/log + +path=$(nix-build check.nix -A fetchurl --no-out-link --hashed-mirrors '') + +chmod +w $path +echo foo > $path +chmod -w $path + +nix-build check.nix -A fetchurl --no-out-link --check --hashed-mirrors '' + +# Note: "check" doesn't repair anything, it just compares to the hash stored in the database. +[[ $(cat $path) = foo ]] + +nix-build check.nix -A fetchurl --no-out-link --repair --hashed-mirrors '' + +[[ $(cat $path) != foo ]] diff --git a/tests/common.sh.in b/tests/common.sh.in index 04d605e34438..195205988afb 100644 --- a/tests/common.sh.in +++ b/tests/common.sh.in @@ -11,7 +11,6 @@ export NIX_LOCALSTATE_DIR=$TEST_ROOT/var export NIX_LOG_DIR=$TEST_ROOT/var/log/nix export NIX_STATE_DIR=$TEST_ROOT/var/nix export NIX_CONF_DIR=$TEST_ROOT/etc -export NIX_MANIFESTS_DIR=$TEST_ROOT/var/nix/manifests export _NIX_TEST_SHARED=$TEST_ROOT/shared if [[ -n $NIX_STORE ]]; then export _NIX_TEST_NO_SANDBOX=1 @@ -21,6 +20,7 @@ export NIX_REMOTE=$NIX_REMOTE_ unset NIX_PATH export TEST_HOME=$TEST_ROOT/test-home export HOME=$TEST_HOME +unset XDG_CACHE_HOME mkdir -p $TEST_HOME export PATH=@bindir@:$PATH @@ -31,6 +31,7 @@ export xmllint="@xmllint@" export SHELL="@bash@" export PAGER=cat export HAVE_SODIUM="@HAVE_SODIUM@" +export HAVE_BROTLI="@HAVE_BROTLI@" export version=@PACKAGE_VERSION@ export system=@system@ @@ -85,9 +86,38 @@ killDaemon() { trap "" EXIT } +canUseSandbox() { + if [[ $(uname) != Linux ]]; then return 1; fi + + if [ ! -L /proc/self/ns/user ]; then + echo "Kernel doesn't support user namespaces, skipping this test..." + return 1 + fi + + if [ -e /proc/sys/kernel/unprivileged_userns_clone ]; then + if [ "$(cat /proc/sys/kernel/unprivileged_userns_clone)" != 1 ]; then + echo "Unprivileged user namespaces disabled by sysctl, skipping this test..." + return 1 + fi + fi + + return 0 +} + fail() { echo "$1" exit 1 } +expect() { + local expected res + expected="$1" + shift + set +e + "$@" + res="$?" + set -e + [[ $res -eq $expected ]] +} + set -x diff --git a/tests/export.sh b/tests/export.sh index ec7560f19728..2238539bcca9 100644 --- a/tests/export.sh +++ b/tests/export.sh @@ -8,6 +8,11 @@ nix-store --export $outPath > $TEST_ROOT/exp nix-store --export $(nix-store -qR $outPath) > $TEST_ROOT/exp_all +if nix-store --export $outPath >/dev/full ; then + echo "exporting to a bad file descriptor should fail" + exit 1 +fi + clearStore diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index a967380cd5b2..530ac7bb813c 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -2,7 +2,7 @@ source common.sh if [[ -z $(type -p git) ]]; then echo "Git not installed; skipping Git tests" - exit 0 + exit 99 fi clearStore @@ -16,7 +16,8 @@ git -C $repo config user.email "foobar@example.com" git -C $repo config user.name "Foobar" echo utrecht > $repo/hello -git -C $repo add hello +touch $repo/.gitignore +git -C $repo add hello .gitignore git -C $repo commit -m 'Bla1' rev1=$(git -C $repo rev-parse HEAD) @@ -28,10 +29,17 @@ rev2=$(git -C $repo rev-parse HEAD) path=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") [[ $(cat $path/hello) = world ]] +# In pure eval mode, fetchGit without a revision should fail. +[[ $(nix eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") = world ]] +(! nix eval --pure-eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") + # Fetch using an explicit revision hash. path2=$(nix eval --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] +# In pure eval mode, fetchGit with a revision should succeed. +[[ $(nix eval --pure-eval --raw "(builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] + # Fetch again. This should be cached. mv $repo ${repo}-tmp path2=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") @@ -68,6 +76,7 @@ path2=$(nix eval --raw "(builtins.fetchGit $repo).outPath") [ ! -e $path2/hello ] [ ! -e $path2/bar ] [ ! -e $path2/dir2/bar ] +[ ! -e $path2/.git ] [[ $(cat $path2/dir1/foo) = foo ]] [[ $(nix eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] @@ -84,3 +93,49 @@ git -C $repo commit -m 'Bla3' -a path4=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit file://$repo).outPath") [[ $path2 = $path4 ]] + +# tarball-ttl should be ignored if we specify a rev +echo delft > $repo/hello +git -C $repo add hello +git -C $repo commit -m 'Bla4' +rev3=$(git -C $repo rev-parse HEAD) +nix eval --tarball-ttl 3600 "(builtins.fetchGit { url = $repo; rev = \"$rev3\"; })" >/dev/null + +# Update 'path' to reflect latest master +path=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") + +# Check behavior when non-master branch is used +git -C $repo checkout $rev2 -b dev +echo dev > $repo/hello + +# File URI uses 'master' unless specified otherwise +path2=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") +[[ $path = $path2 ]] + +# Using local path with branch other than 'master' should work when clean or dirty +path3=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +# (check dirty-tree handling was used) +[[ $(nix eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] + +# Committing shouldn't change store path, or switch to using 'master' +git -C $repo commit -m 'Bla5' -a +path4=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +[[ $(cat $path4/hello) = dev ]] +[[ $path3 = $path4 ]] + +# Confirm same as 'dev' branch +path5=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +[[ $path3 = $path5 ]] + + +# Nuke the cache +rm -rf $TEST_HOME/.cache/nix/git + +# Try again, but without 'git' on PATH +NIX=$(command -v nix) +# This should fail +(! PATH= $NIX eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath" ) + +# Try again, with 'git' available. This should work. +path5=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +[[ $path3 = $path5 ]] diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh index 0c9f4bdbbc87..4088dbd39796 100644 --- a/tests/fetchMercurial.sh +++ b/tests/fetchMercurial.sh @@ -2,7 +2,7 @@ source common.sh if [[ -z $(type -p hg) ]]; then echo "Mercurial not installed; skipping Mercurial tests" - exit 0 + exit 99 fi clearStore @@ -16,7 +16,8 @@ echo '[ui]' >> $repo/.hg/hgrc echo 'username = Foobar <foobar@example.org>' >> $repo/.hg/hgrc echo utrecht > $repo/hello -hg add --cwd $repo hello +touch $repo/.hgignore +hg add --cwd $repo hello .hgignore hg commit --cwd $repo -m 'Bla1' rev1=$(hg log --cwd $repo -r tip --template '{node}') @@ -28,10 +29,17 @@ rev2=$(hg log --cwd $repo -r tip --template '{node}') path=$(nix eval --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $(cat $path/hello) = world ]] +# In pure eval mode, fetchGit without a revision should fail. +[[ $(nix eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] +(! nix eval --pure-eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") + # Fetch using an explicit revision hash. path2=$(nix eval --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] +# In pure eval mode, fetchGit with a revision should succeed. +[[ $(nix eval --pure-eval --raw "(builtins.readFile (fetchMercurial { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] + # Fetch again. This should be cached. mv $repo ${repo}-tmp path2=$(nix eval --raw "(builtins.fetchMercurial file://$repo).outPath") @@ -69,6 +77,7 @@ path2=$(nix eval --raw "(builtins.fetchMercurial $repo).outPath") [ ! -e $path2/hello ] [ ! -e $path2/bar ] [ ! -e $path2/dir2/bar ] +[ ! -e $path2/.hg ] [[ $(cat $path2/dir1/foo) = foo ]] [[ $(nix eval --raw "(builtins.fetchMercurial $repo).rev") = 0000000000000000000000000000000000000000 ]] diff --git a/tests/fetchurl.sh b/tests/fetchurl.sh index 7f2de907049a..9bbf044f7329 100644 --- a/tests/fetchurl.sh +++ b/tests/fetchurl.sh @@ -5,7 +5,7 @@ clearStore # Test fetching a flat file. hash=$(nix-hash --flat --type sha256 ./fetchurl.sh) -outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha256 $hash --no-out-link --option hashed-mirrors '') +outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha256 $hash --no-out-link --hashed-mirrors '') cmp $outPath fetchurl.sh @@ -14,7 +14,7 @@ clearStore hash=$(nix hash-file --type sha512 --base64 ./fetchurl.sh) -outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha512 $hash --no-out-link --option hashed-mirrors '') +outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha512 $hash --no-out-link --hashed-mirrors '') cmp $outPath fetchurl.sh @@ -29,7 +29,7 @@ rm -rf $mirror mkdir -p $mirror/sha512 ln -s $(pwd)/fetchurl.sh $mirror/sha512/$hash32 -outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file:///no-such-dir/fetchurl.sh --argstr sha512 $hash --no-out-link --option hashed-mirrors "file://$mirror") +outPath=$(nix-build '<nix/fetchurl.nix>' --argstr url file:///no-such-dir/fetchurl.sh --argstr sha512 $hash --no-out-link --hashed-mirrors "file://$mirror") # Test unpacking a NAR. rm -rf $TEST_ROOT/archive diff --git a/tests/fixed.sh b/tests/fixed.sh index cac3f0be91b0..8f51403a7071 100644 --- a/tests/fixed.sh +++ b/tests/fixed.sh @@ -5,15 +5,22 @@ clearStore export IMPURE_VAR1=foo export IMPURE_VAR2=bar +path=$(nix-store -q $(nix-instantiate fixed.nix -A good.0)) + +echo 'testing bad...' +nix-build fixed.nix -A bad --no-out-link && fail "should fail" + +# Building with the bad hash should produce the "good" output path as +# a side-effect. +[[ -e $path ]] +nix path-info --json $path | grep fixed:md5:2qk15sxzzjlnpjk9brn7j8ppcd + echo 'testing good...' nix-build fixed.nix -A good --no-out-link echo 'testing good2...' nix-build fixed.nix -A good2 --no-out-link -echo 'testing bad...' -nix-build fixed.nix -A bad --no-out-link && fail "should fail" - echo 'testing reallyBad...' nix-instantiate fixed.nix -A reallyBad && fail "should fail" diff --git a/tests/init.sh b/tests/init.sh index 41cca047d8fb..e5353598bcc4 100644 --- a/tests/init.sh +++ b/tests/init.sh @@ -16,7 +16,12 @@ mkdir "$NIX_CONF_DIR" cat > "$NIX_CONF_DIR"/nix.conf <<EOF build-users-group = keep-derivations = false +include nix.conf.extra +EOF + +cat > "$NIX_CONF_DIR"/nix.conf.extra <<EOF fsync-metadata = false +!include nix.conf.extra.not-there EOF # Initialise the database. diff --git a/tests/lang/data b/tests/lang/data new file mode 100644 index 000000000000..257cc5642cb1 --- /dev/null +++ b/tests/lang/data @@ -0,0 +1 @@ +foo diff --git a/tests/lang/eval-okay-arithmetic.exp b/tests/lang/eval-okay-arithmetic.exp index b195055b7a09..5c54d10b7b47 100644 --- a/tests/lang/eval-okay-arithmetic.exp +++ b/tests/lang/eval-okay-arithmetic.exp @@ -1 +1 @@ -2188 +2216 diff --git a/tests/lang/eval-okay-arithmetic.nix b/tests/lang/eval-okay-arithmetic.nix index bbbbc4691d75..7e9e6a0b666e 100644 --- a/tests/lang/eval-okay-arithmetic.nix +++ b/tests/lang/eval-okay-arithmetic.nix @@ -26,6 +26,10 @@ let { (56088 / 123 / 2) (3 + 4 * const 5 0 - 6 / id 2) + (builtins.bitAnd 12 10) # 0b1100 & 0b1010 = 8 + (builtins.bitOr 12 10) # 0b1100 | 0b1010 = 14 + (builtins.bitXor 12 10) # 0b1100 ^ 0b1010 = 6 + (if 3 < 7 then 1 else err) (if 7 < 3 then err else 1) (if 3 < 3 then err else 1) diff --git a/tests/lang/eval-okay-backslash-newline-1.exp b/tests/lang/eval-okay-backslash-newline-1.exp new file mode 100644 index 000000000000..3e754364cc9c --- /dev/null +++ b/tests/lang/eval-okay-backslash-newline-1.exp @@ -0,0 +1 @@ +"a\nb" diff --git a/tests/lang/eval-okay-backslash-newline-1.nix b/tests/lang/eval-okay-backslash-newline-1.nix new file mode 100644 index 000000000000..7fef3dddd4dd --- /dev/null +++ b/tests/lang/eval-okay-backslash-newline-1.nix @@ -0,0 +1,2 @@ +"a\ +b" diff --git a/tests/lang/eval-okay-backslash-newline-2.exp b/tests/lang/eval-okay-backslash-newline-2.exp new file mode 100644 index 000000000000..3e754364cc9c --- /dev/null +++ b/tests/lang/eval-okay-backslash-newline-2.exp @@ -0,0 +1 @@ +"a\nb" diff --git a/tests/lang/eval-okay-backslash-newline-2.nix b/tests/lang/eval-okay-backslash-newline-2.nix new file mode 100644 index 000000000000..35ddf495c63b --- /dev/null +++ b/tests/lang/eval-okay-backslash-newline-2.nix @@ -0,0 +1,2 @@ +''a''\ +b'' diff --git a/tests/lang/eval-okay-builtins-add.exp b/tests/lang/eval-okay-builtins-add.exp new file mode 100644 index 000000000000..0350b518a7ec --- /dev/null +++ b/tests/lang/eval-okay-builtins-add.exp @@ -0,0 +1 @@ +[ 5 4 "int" "tt" "float" 4 ] diff --git a/tests/lang/eval-okay-builtins-add.nix b/tests/lang/eval-okay-builtins-add.nix new file mode 100644 index 000000000000..c841816222a5 --- /dev/null +++ b/tests/lang/eval-okay-builtins-add.nix @@ -0,0 +1,8 @@ +[ +(builtins.add 2 3) +(builtins.add 2 2) +(builtins.typeOf (builtins.add 2 2)) +("t" + "t") +(builtins.typeOf (builtins.add 2.0 2)) +(builtins.add 2.0 2) +] diff --git a/tests/lang/eval-okay-getattrpos-undefined.exp b/tests/lang/eval-okay-getattrpos-undefined.exp new file mode 100644 index 000000000000..19765bd501b6 --- /dev/null +++ b/tests/lang/eval-okay-getattrpos-undefined.exp @@ -0,0 +1 @@ +null diff --git a/tests/lang/eval-okay-getattrpos-undefined.nix b/tests/lang/eval-okay-getattrpos-undefined.nix new file mode 100644 index 000000000000..14dd38f7734c --- /dev/null +++ b/tests/lang/eval-okay-getattrpos-undefined.nix @@ -0,0 +1 @@ +builtins.unsafeGetAttrPos "abort" builtins diff --git a/tests/lang/eval-okay-nested-with.exp b/tests/lang/eval-okay-nested-with.exp new file mode 100644 index 000000000000..0cfbf08886fc --- /dev/null +++ b/tests/lang/eval-okay-nested-with.exp @@ -0,0 +1 @@ +2 diff --git a/tests/lang/eval-okay-nested-with.nix b/tests/lang/eval-okay-nested-with.nix new file mode 100644 index 000000000000..ba9d79aa79b1 --- /dev/null +++ b/tests/lang/eval-okay-nested-with.nix @@ -0,0 +1,3 @@ +with { x = 1; }; +with { x = 2; }; +x diff --git a/tests/lang/eval-okay-path.nix b/tests/lang/eval-okay-path.nix new file mode 100644 index 000000000000..e67168cf3edf --- /dev/null +++ b/tests/lang/eval-okay-path.nix @@ -0,0 +1,7 @@ +builtins.path + { path = ./.; + filter = path: _: baseNameOf path == "data"; + recursive = true; + sha256 = "1yhm3gwvg5a41yylymgblsclk95fs6jy72w0wv925mmidlhcq4sw"; + name = "output"; + } diff --git a/tests/lang/eval-okay-regex-split.exp b/tests/lang/eval-okay-regex-split.exp new file mode 100644 index 000000000000..27ba77ddaf61 --- /dev/null +++ b/tests/lang/eval-okay-regex-split.exp @@ -0,0 +1 @@ +true diff --git a/tests/lang/eval-okay-replacestrings.exp b/tests/lang/eval-okay-replacestrings.exp index a2add1b7b140..72e8274d8c58 100644 --- a/tests/lang/eval-okay-replacestrings.exp +++ b/tests/lang/eval-okay-replacestrings.exp @@ -1 +1 @@ -[ "faabar" "fbar" "fubar" "faboor" "fubar" ] +[ "faabar" "fbar" "fubar" "faboor" "fubar" "XaXbXcX" "X" "a_b" ] diff --git a/tests/lang/eval-okay-replacestrings.nix b/tests/lang/eval-okay-replacestrings.nix index 6284a0e660ae..bd8031fc004e 100644 --- a/tests/lang/eval-okay-replacestrings.nix +++ b/tests/lang/eval-okay-replacestrings.nix @@ -5,4 +5,7 @@ with builtins; (replaceStrings ["oo"] ["u"] "foobar") (replaceStrings ["oo" "a"] ["a" "oo"] "foobar") (replaceStrings ["oo" "oo"] ["u" "i"] "foobar") + (replaceStrings [""] ["X"] "abc") + (replaceStrings [""] ["X"] "") + (replaceStrings ["-"] ["_"] "a-b") ] diff --git a/tests/lang/eval-okay-splitversion.exp b/tests/lang/eval-okay-splitversion.exp new file mode 100644 index 000000000000..153ceb8186a0 --- /dev/null +++ b/tests/lang/eval-okay-splitversion.exp @@ -0,0 +1 @@ +[ "1" "2" "3" ] diff --git a/tests/lang/eval-okay-splitversion.nix b/tests/lang/eval-okay-splitversion.nix new file mode 100644 index 000000000000..9e5c99d2e7f6 --- /dev/null +++ b/tests/lang/eval-okay-splitversion.nix @@ -0,0 +1 @@ +builtins.splitVersion "1.2.3" diff --git a/tests/lang/lib.nix b/tests/lang/lib.nix index 1c63b2f31d67..028a538314b7 100644 --- a/tests/lang/lib.nix +++ b/tests/lang/lib.nix @@ -49,7 +49,7 @@ rec { if comp (head list2) (head list1) then [(head list2)] ++ mergeLists comp list1 (tail list2) else [(head list1)] ++ mergeLists comp (tail list1) list2; - id = x:x; # sic + id = x: x; const = x: y: x; diff --git a/tests/lang/parse-fail-uft8.nix b/tests/lang/parse-fail-uft8.nix new file mode 100644 index 000000000000..34948d48aed2 --- /dev/null +++ b/tests/lang/parse-fail-uft8.nix @@ -0,0 +1 @@ +123 é 4 diff --git a/tests/lang/parse-okay-url.nix b/tests/lang/parse-okay-url.nix index fb74d66f0923..fce3b13ee64b 100644 --- a/tests/lang/parse-okay-url.nix +++ b/tests/lang/parse-okay-url.nix @@ -1,8 +1,7 @@ -[ +[ x:x https://svn.cs.uu.nl:12443/repos/trace/trunk http://www2.mplayerhq.hu/MPlayer/releases/fonts/font-arial-iso-8859-1.tar.bz2 http://losser.st-lab.cs.uu.nl/~armijn/.nix/gcc-3.3.4-static-nix.tar.gz http://fpdownload.macromedia.com/get/shockwave/flash/english/linux/7.0r25/install_flash_player_7_linux.tar.gz ftp://ftp.gtk.org/pub/gtk/v1.2/gtk+-1.2.10.tar.gz - channel:nixos-17.09 ] diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh index 0691c30be1ce..acfd46c54170 100644 --- a/tests/linux-sandbox.sh +++ b/tests/linux-sandbox.sh @@ -2,7 +2,7 @@ source common.sh clearStore -if [[ $(uname) != Linux ]]; then exit; fi +if ! canUseSandbox; then exit; fi # Note: we need to bind-mount $SHELL into the chroot. Currently we # only support the case where $SHELL is in the Nix store, because @@ -16,7 +16,7 @@ rm -rf $TEST_ROOT/store0 export NIX_STORE_DIR=/my/store export NIX_REMOTE=$TEST_ROOT/store0 -outPath=$(nix-build dependencies.nix --no-out-link --option sandbox-paths /nix/store) +outPath=$(nix-build dependencies.nix --no-out-link --sandbox-paths /nix/store) [[ $outPath =~ /my/store/.*-dependencies ]] diff --git a/tests/local.mk b/tests/local.mk index 67b378839a22..9df0adf1bfd8 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -13,11 +13,19 @@ nix_tests = \ check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ placeholders.sh nix-shell.sh \ linux-sandbox.sh \ + build-dry.sh \ build-remote.sh \ - nar-index.sh \ + nar-access.sh \ structured-attrs.sh \ fetchGit.sh \ - fetchMercurial.sh + fetchMercurial.sh \ + signing.sh \ + run.sh \ + brotli.sh \ + pure-eval.sh \ + check.sh \ + plugins.sh \ + search.sh # parallel.sh install-tests += $(foreach x, $(nix_tests), tests/$(x)) @@ -26,4 +34,4 @@ tests-environment = NIX_REMOTE= $(bash) -e clean-files += $(d)/common.sh -installcheck: $(d)/common.sh +installcheck: $(d)/common.sh $(d)/plugins/libplugintest.$(SO_EXT) diff --git a/tests/logging.sh b/tests/logging.sh index d38136531ba0..c894ad3ff079 100644 --- a/tests/logging.sh +++ b/tests/logging.sh @@ -11,5 +11,5 @@ path=$(nix-build dependencies.nix --no-out-link) clearStore rm -rf $NIX_LOG_DIR (! nix-store -l $path) -nix-build dependencies.nix --no-out-link --option compress-build-log true +nix-build dependencies.nix --no-out-link --compress-build-log [ "$(nix-store -l $path)" = FOO ] diff --git a/tests/misc.sh b/tests/misc.sh index 6d0ab3adcec8..eda0164167f2 100644 --- a/tests/misc.sh +++ b/tests/misc.sh @@ -16,4 +16,4 @@ nix-env --foo 2>&1 | grep "no operation" nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. -nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 | grep "infinite recursion encountered, at (string):1:15$" +nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 | grep "infinite recursion encountered, at .*(string).*:1:15$" diff --git a/tests/multiple-outputs.sh b/tests/multiple-outputs.sh index 47698b10381d..bedbc39a4ebf 100644 --- a/tests/multiple-outputs.sh +++ b/tests/multiple-outputs.sh @@ -2,6 +2,8 @@ source common.sh clearStore +rm -f $TEST_ROOT/result* + # Test whether read-only evaluation works when referring to the # ‘drvPath’ attribute. echo "evaluating c..." @@ -28,7 +30,7 @@ echo "output path is $outPath" [ "$(cat "$outPath"/file)" = "success" ] # Test nix-build on a derivation with multiple outputs. -nix-build multiple-outputs.nix -A a -o $TEST_ROOT/result +outPath1=$(nix-build multiple-outputs.nix -A a -o $TEST_ROOT/result) [ -e $TEST_ROOT/result-first ] (! [ -e $TEST_ROOT/result-second ]) nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result @@ -37,6 +39,17 @@ nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result [ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] hash1=$(nix-store -q --hash $TEST_ROOT/result-second) +outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a) --no-out-link) +[[ $outPath1 = $outPath2 ]] + +outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.first) --no-out-link) +[[ $outPath1 = $outPath2 ]] + +outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.second) --no-out-link) +[[ $(cat $outPath2/file) = second ]] + +[[ $(nix-build $(nix-instantiate multiple-outputs.nix -A a.all) --no-out-link | wc -l) -eq 2 ]] + # Delete one of the outputs and rebuild it. This will cause a hash # rewrite. nix-store --delete $TEST_ROOT/result-second --ignore-liveness @@ -59,5 +72,5 @@ fi echo "collecting garbage..." rm $TEST_ROOT/result* -nix-store --gc --option keep-derivations true --option keep-outputs true +nix-store --gc --keep-derivations --keep-outputs nix-store --gc --print-roots diff --git a/tests/nar-index.nix b/tests/nar-access.nix index 0e2a7f721135..0e2a7f721135 100644 --- a/tests/nar-index.nix +++ b/tests/nar-access.nix diff --git a/tests/nar-access.sh b/tests/nar-access.sh new file mode 100644 index 000000000000..553d6ca89d7d --- /dev/null +++ b/tests/nar-access.sh @@ -0,0 +1,44 @@ +source common.sh + +echo "building test path" +storePath="$(nix-build nar-access.nix -A a --no-out-link)" + +cd "$TEST_ROOT" + +# Dump path to nar. +narFile="$TEST_ROOT/path.nar" +nix-store --dump $storePath > $narFile + +# Check that find and ls-nar match. +( cd $storePath; find . | sort ) > files.find +nix ls-nar -R -d $narFile "" | sort > files.ls-nar +diff -u files.find files.ls-nar + +# Check that file contents of data match. +nix cat-nar $narFile /foo/data > data.cat-nar +diff -u data.cat-nar $storePath/foo/data + +# Check that file contents of baz match. +nix cat-nar $narFile /foo/baz > baz.cat-nar +diff -u baz.cat-nar $storePath/foo/baz + +nix cat-store $storePath/foo/baz > baz.cat-nar +diff -u baz.cat-nar $storePath/foo/baz + +# Test --json. +[[ $(nix ls-nar --json $narFile /) = '{"type":"directory","entries":{"foo":{},"foo-x":{},"qux":{},"zyx":{}}}' ]] +[[ $(nix ls-nar --json -R $narFile /foo) = '{"type":"directory","entries":{"bar":{"type":"regular","size":0,"narOffset":368},"baz":{"type":"regular","size":0,"narOffset":552},"data":{"type":"regular","size":58,"narOffset":736}}}' ]] +[[ $(nix ls-nar --json -R $narFile /foo/bar) = '{"type":"regular","size":0,"narOffset":368}' ]] +[[ $(nix ls-store --json $storePath) = '{"type":"directory","entries":{"foo":{},"foo-x":{},"qux":{},"zyx":{}}}' ]] +[[ $(nix ls-store --json -R $storePath/foo) = '{"type":"directory","entries":{"bar":{"type":"regular","size":0},"baz":{"type":"regular","size":0},"data":{"type":"regular","size":58}}}' ]] +[[ $(nix ls-store --json -R $storePath/foo/bar) = '{"type":"regular","size":0}' ]] + +# Test missing files. +nix ls-store --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR' +nix ls-store $storePath/xyzzy 2>&1 | grep 'does not exist' + +# Test failure to dump. +if nix-store --dump $storePath >/dev/full ; then + echo "dumping to /dev/full should fail" + exit -1 +fi diff --git a/tests/nar-index.sh b/tests/nar-index.sh deleted file mode 100644 index 51369346c88a..000000000000 --- a/tests/nar-index.sh +++ /dev/null @@ -1,23 +0,0 @@ -source common.sh - -echo "building test path" -storePath="$(nix-build nar-index.nix -A a --no-out-link)" - -cd "$TEST_ROOT" - -echo "dumping path to nar" -narFile="$TEST_ROOT/path.nar" -nix-store --dump $storePath > $narFile - -echo "check that find and ls-nar match" -( cd $storePath; find . | sort ) > files.find -nix ls-nar -R -d $narFile "" | sort > files.ls-nar -diff -u files.find files.ls-nar - -echo "check that file contents of data match" -nix cat-nar $narFile /foo/data > data.cat-nar -diff -u data.cat-nar $storePath/foo/data - -echo "check that file contents of baz match" -nix cat-nar $narFile /foo/baz > baz.cat-nar -diff -u baz.cat-nar $storePath/foo/baz \ No newline at end of file diff --git a/tests/nix-build.sh b/tests/nix-build.sh index dc0e99c73621..395264863196 100644 --- a/tests/nix-build.sh +++ b/tests/nix-build.sh @@ -2,7 +2,7 @@ source common.sh clearStore -nix-build dependencies.nix -o $TEST_ROOT/result +outPath=$(nix-build dependencies.nix -o $TEST_ROOT/result) test "$(cat $TEST_ROOT/result/foobar)" = FOOBAR # The result should be retained by a GC. @@ -17,3 +17,9 @@ test -e $target/foobar rm $TEST_ROOT/result nix-store --gc if test -e $target/foobar; then false; fi + +outPath2=$(nix-build $(nix-instantiate dependencies.nix) --no-out-link) +[[ $outPath = $outPath2 ]] + +outPath2=$(nix-build $(nix-instantiate dependencies.nix)!out --no-out-link) +[[ $outPath = $outPath2 ]] diff --git a/tests/nix-copy-closure.nix b/tests/nix-copy-closure.nix index 44126dd64e47..0dc147fb34e9 100644 --- a/tests/nix-copy-closure.nix +++ b/tests/nix-copy-closure.nix @@ -1,8 +1,8 @@ # Test ‘nix-copy-closure’. -{ system, nix }: +{ nixpkgs, system, nix }: -with import <nixpkgs/nixos/lib/testing.nix> { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { @@ -29,10 +29,10 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { startAll; # Create an SSH key on the client. - my $key = `${pkgs.openssh}/bin/ssh-keygen -t dsa -f key -N ""`; + my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`; $client->succeed("mkdir -m 700 /root/.ssh"); - $client->copyFileFromHost("key", "/root/.ssh/id_dsa"); - $client->succeed("chmod 600 /root/.ssh/id_dsa"); + $client->copyFileFromHost("key", "/root/.ssh/id_ed25519"); + $client->succeed("chmod 600 /root/.ssh/id_ed25519"); # Install the SSH key on the server. $server->succeed("mkdir -m 700 /root/.ssh"); diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh index f0f34a5f8705..d25c456cedfb 100644 --- a/tests/nix-shell.sh +++ b/tests/nix-shell.sh @@ -10,6 +10,25 @@ output=$(nix-shell --pure shell.nix -A shellDrv --run \ [ "$output" = " - foo - bar" ] +# Test nix-shell on a .drv +[[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \ + 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] + +[[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \ + 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] + +# Test nix-shell on a .drv symlink + +# Legacy: absolute path and .drv extension required +nix-instantiate shell.nix -A shellDrv --indirect --add-root shell.drv +[[ $(nix-shell --pure $PWD/shell.drv --run \ + 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] + +# New behaviour: just needs to resolve to a derivation in the store +nix-instantiate shell.nix -A shellDrv --indirect --add-root shell +[[ $(nix-shell --pure shell --run \ + 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] + # Test nix-shell -p output=$(NIX_PATH=nixpkgs=shell.nix nix-shell --pure -p foo bar --run 'echo "$(foo) $(bar)"') [ "$output" = "foo bar" ] @@ -20,3 +39,12 @@ chmod a+rx $TEST_ROOT/shell.shebang.sh output=$($TEST_ROOT/shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] + +# Test nix-shell shebang mode for ruby +# This uses a fake interpreter that returns the arguments passed +# This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected. +sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb +chmod a+rx $TEST_ROOT/shell.shebang.rb + +output=$($TEST_ROOT/shell.shebang.rb abc ruby) +[ "$output" = '-e load("'"$TEST_ROOT"'/shell.shebang.rb") -- abc ruby' ] diff --git a/tests/optimise-store.sh b/tests/optimise-store.sh index bd88662bc37f..61e3df2f9f7e 100644 --- a/tests/optimise-store.sh +++ b/tests/optimise-store.sh @@ -2,8 +2,8 @@ source common.sh clearStore -outPath1=$(echo 'with import ./config.nix; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --option auto-optimise-store true) -outPath2=$(echo 'with import ./config.nix; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --option auto-optimise-store true) +outPath1=$(echo 'with import ./config.nix; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) +outPath2=$(echo 'with import ./config.nix; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) inode1="$(stat --format=%i $outPath1/foo)" inode2="$(stat --format=%i $outPath2/foo)" diff --git a/tests/plugins.sh b/tests/plugins.sh new file mode 100644 index 000000000000..4b1baeddce32 --- /dev/null +++ b/tests/plugins.sh @@ -0,0 +1,7 @@ +source common.sh + +set -o pipefail + +res=$(nix eval '(builtins.anotherNull)' --option setting-set true --option plugin-files $PWD/plugins/libplugintest*) + +[ "$res"x = "nullx" ] diff --git a/tests/plugins/local.mk b/tests/plugins/local.mk new file mode 100644 index 000000000000..1d2bac052fd2 --- /dev/null +++ b/tests/plugins/local.mk @@ -0,0 +1,9 @@ +libraries += libplugintest + +libplugintest_DIR := $(d) + +libplugintest_SOURCES := $(d)/plugintest.cc + +libplugintest_ALLOW_UNDEFINED := 1 + +libplugintest_EXCLUDE_FROM_LIBRARY_LIST := 1 diff --git a/tests/plugins/plugintest.cc b/tests/plugins/plugintest.cc new file mode 100644 index 000000000000..8da15ebabd7d --- /dev/null +++ b/tests/plugins/plugintest.cc @@ -0,0 +1,19 @@ +#include "globals.hh" +#include "primops.hh" + +using namespace nix; + +static BaseSetting<bool> settingSet{false, "setting-set", + "Whether the plugin-defined setting was set"}; + +static RegisterSetting rs(&settingSet); + +static void prim_anotherNull (EvalState & state, const Pos & pos, Value ** args, Value & v) +{ + if (settingSet) + mkNull(v); + else + mkBool(v, false); +} + +static RegisterPrimOp rp("anotherNull", 0, prim_anotherNull); diff --git a/tests/pure-eval.nix b/tests/pure-eval.nix new file mode 100644 index 000000000000..ed25b3d45637 --- /dev/null +++ b/tests/pure-eval.nix @@ -0,0 +1,3 @@ +{ + x = 123; +} diff --git a/tests/pure-eval.sh b/tests/pure-eval.sh new file mode 100644 index 000000000000..49c8564487c3 --- /dev/null +++ b/tests/pure-eval.sh @@ -0,0 +1,18 @@ +source common.sh + +clearStore + +nix eval --pure-eval '(assert 1 + 2 == 3; true)' + +[[ $(nix eval '(builtins.readFile ./pure-eval.sh)') =~ clearStore ]] + +(! nix eval --pure-eval '(builtins.readFile ./pure-eval.sh)') + +(! nix eval --pure-eval '(builtins.currentTime)') +(! nix eval --pure-eval '(builtins.currentSystem)') + +(! nix-instantiate --pure-eval ./simple.nix) + +[[ $(nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") == 123 ]] +(! nix eval --pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") +nix eval --pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash-file pure-eval.nix --type sha256)\"; })).x)" diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix index 39bd090e43e7..d7a4b21989e5 100644 --- a/tests/remote-builds.nix +++ b/tests/remote-builds.nix @@ -1,8 +1,8 @@ # Test Nix's remote build feature. -{ system, nix }: +{ nixpkgs, system, nix }: -with import <nixpkgs/nixos/lib/testing.nix> { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; makeTest ( @@ -46,13 +46,13 @@ in nix.buildMachines = [ { hostName = "slave1"; sshUser = "root"; - sshKey = "/root/.ssh/id_dsa"; + sshKey = "/root/.ssh/id_ed25519"; system = "i686-linux"; maxJobs = 1; } { hostName = "slave2"; sshUser = "root"; - sshKey = "/root/.ssh/id_dsa"; + sshKey = "/root/.ssh/id_ed25519"; system = "i686-linux"; maxJobs = 1; } @@ -70,10 +70,10 @@ in startAll; # Create an SSH key on the client. - my $key = `${pkgs.openssh}/bin/ssh-keygen -t dsa -f key -N ""`; + my $key = `${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f key -N ""`; $client->succeed("mkdir -p -m 700 /root/.ssh"); - $client->copyFileFromHost("key", "/root/.ssh/id_dsa"); - $client->succeed("chmod 600 /root/.ssh/id_dsa"); + $client->copyFileFromHost("key", "/root/.ssh/id_ed25519"); + $client->succeed("chmod 600 /root/.ssh/id_ed25519"); # Install the SSH key on the slaves. $client->waitForUnit("network.target"); @@ -85,7 +85,10 @@ in } # Perform a build and check that it was performed on the slave. - my $out = $client->succeed("nix-build ${expr nodes.client.config 1}"); + my $out = $client->succeed( + "nix-build ${expr nodes.client.config 1} 2> build-output", + "grep -q Hello build-output" + ); $slave1->succeed("test -e $out"); # And a parallel build. diff --git a/tests/repair.sh b/tests/repair.sh index 7c928e3be73c..ec7ad5dcaff4 100644 --- a/tests/repair.sh +++ b/tests/repair.sh @@ -51,7 +51,7 @@ nix copy --to file://$cacheDir $path chmod u+w $path2 rm -rf $path2 -nix-store --verify --check-contents --repair --option binary-caches "file://$cacheDir" --option signed-binary-caches '' +nix-store --verify --check-contents --repair --substituters "file://$cacheDir" --no-require-sigs if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then echo "path not repaired properly" >&2 @@ -69,7 +69,7 @@ if nix-store --verify-path $path2; then exit 1 fi -nix-store --repair-path $path2 --option binary-caches "file://$cacheDir" --option signed-binary-caches '' +nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then echo "path not repaired properly" >&2 diff --git a/tests/restricted.nix b/tests/restricted.nix new file mode 100644 index 000000000000..e0ef5840209c --- /dev/null +++ b/tests/restricted.nix @@ -0,0 +1 @@ +1 + 2 diff --git a/tests/restricted.sh b/tests/restricted.sh index a297847cc8e3..a87d8ec2c940 100644 --- a/tests/restricted.sh +++ b/tests/restricted.sh @@ -2,19 +2,20 @@ source common.sh clearStore -nix-instantiate --option restrict-eval true --eval -E '1 + 2' -(! nix-instantiate --option restrict-eval true ./simple.nix) -nix-instantiate --option restrict-eval true ./simple.nix -I src=. -nix-instantiate --option restrict-eval true ./simple.nix -I src1=simple.nix -I src2=config.nix -I src3=./simple.builder.sh +nix-instantiate --restrict-eval --eval -E '1 + 2' +(! nix-instantiate --restrict-eval ./restricted.nix) +(! nix-instantiate --eval --restrict-eval <(echo '1 + 2')) +nix-instantiate --restrict-eval ./simple.nix -I src=. +nix-instantiate --restrict-eval ./simple.nix -I src1=simple.nix -I src2=config.nix -I src3=./simple.builder.sh -(! nix-instantiate --option restrict-eval true --eval -E 'builtins.readFile ./simple.nix') -nix-instantiate --option restrict-eval true --eval -E 'builtins.readFile ./simple.nix' -I src=.. +(! nix-instantiate --restrict-eval --eval -E 'builtins.readFile ./simple.nix') +nix-instantiate --restrict-eval --eval -E 'builtins.readFile ./simple.nix' -I src=.. -(! nix-instantiate --option restrict-eval true --eval -E 'builtins.readDir ../src/boost') -nix-instantiate --option restrict-eval true --eval -E 'builtins.readDir ../src/boost' -I src=../src +(! nix-instantiate --restrict-eval --eval -E 'builtins.readDir ../src/nix-channel') +nix-instantiate --restrict-eval --eval -E 'builtins.readDir ../src/nix-channel' -I src=../src -(! nix-instantiate --option restrict-eval true --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in <foo>') -nix-instantiate --option restrict-eval true --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in <foo>' -I src=. +(! nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in <foo>') +nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in <foo>' -I src=. p=$(nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval --allowed-uris "file://$(pwd)") cmp $p restricted.sh @@ -28,3 +29,12 @@ nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval (! nix eval --raw "(builtins.fetchurl https://github.com/NixOS/patchelf/archive/master.tar.gz)" --restrict-eval) (! nix eval --raw "(builtins.fetchTarball https://github.com/NixOS/patchelf/archive/master.tar.gz)" --restrict-eval) (! nix eval --raw "(fetchGit git://github.com/NixOS/patchelf.git)" --restrict-eval) + +ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix +[[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] +(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix) +(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT) +(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) +nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT -I . + +[[ $(nix eval --raw --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]] diff --git a/tests/run.nix b/tests/run.nix new file mode 100644 index 000000000000..77dcbd2a9df0 --- /dev/null +++ b/tests/run.nix @@ -0,0 +1,17 @@ +with import ./config.nix; + +{ + hello = mkDerivation { + name = "hello"; + buildCommand = + '' + mkdir -p $out/bin + cat > $out/bin/hello <<EOF + #! ${shell} + who=\$1 + echo "Hello \''${who:-World} from $out/bin/hello" + EOF + chmod +x $out/bin/hello + ''; + }; +} diff --git a/tests/run.sh b/tests/run.sh new file mode 100644 index 000000000000..d1dbfd6bd4a6 --- /dev/null +++ b/tests/run.sh @@ -0,0 +1,28 @@ +source common.sh + +clearStore +clearCache + +nix run -f run.nix hello -c hello | grep 'Hello World' +nix run -f run.nix hello -c hello NixOS | grep 'Hello NixOS' + +if ! canUseSandbox; then exit; fi + +chmod -R u+w $TEST_ROOT/store0 || true +rm -rf $TEST_ROOT/store0 + +clearStore + +path=$(nix eval --raw -f run.nix hello) + +# Note: we need the sandbox paths to ensure that the shell is +# visible in the sandbox. +nix run --sandbox-build-dir /build-tmp \ + --sandbox-paths '/nix? /bin? /lib? /lib64? /usr?' \ + --store $TEST_ROOT/store0 -f run.nix hello -c hello | grep 'Hello World' + +path2=$(nix run --sandbox-paths '/nix? /bin? /lib? /lib64? /usr?' --store $TEST_ROOT/store0 -f run.nix hello -c $SHELL -c 'type -p hello') + +[[ $path/bin/hello = $path2 ]] + +[[ -e $TEST_ROOT/store0/nix/store/$(basename $path)/bin/hello ]] diff --git a/tests/search.nix b/tests/search.nix new file mode 100644 index 000000000000..fea6e7a7a647 --- /dev/null +++ b/tests/search.nix @@ -0,0 +1,25 @@ +with import ./config.nix; + +{ + hello = mkDerivation rec { + name = "hello-${version}"; + version = "0.1"; + buildCommand = "touch $out"; + meta.description = "Empty file"; + }; + foo = mkDerivation rec { + name = "foo-5"; + buildCommand = '' + mkdir -p $out + echo ${name} > $out/${name} + ''; + }; + bar = mkDerivation rec { + name = "bar-3"; + buildCommand = '' + echo "Does not build successfully" + exit 1 + ''; + meta.description = "broken bar"; + }; +} diff --git a/tests/search.sh b/tests/search.sh new file mode 100644 index 000000000000..0b26a125120f --- /dev/null +++ b/tests/search.sh @@ -0,0 +1,43 @@ +source common.sh + +clearStore +clearCache + +# No packages +(( $(NIX_PATH= nix search -u|wc -l) == 0 )) + +# Haven't updated cache, still nothing +(( $(nix search -f search.nix hello|wc -l) == 0 )) +(( $(nix search -f search.nix |wc -l) == 0 )) + +# Update cache, search should work +(( $(nix search -f search.nix -u hello|wc -l) > 0 )) + +# Use cache +(( $(nix search -f search.nix foo|wc -l) > 0 )) +(( $(nix search foo|wc -l) > 0 )) + +# Test --no-cache works +# No results from cache +(( $(nix search --no-cache foo |wc -l) == 0 )) +# Does find results from file pointed at +(( $(nix search -f search.nix --no-cache foo |wc -l) > 0 )) + +# Check descriptions are searched +(( $(nix search broken | wc -l) > 0 )) + +# Check search that matches nothing +(( $(nix search nosuchpackageexists | wc -l) == 0 )) + +# Search for multiple arguments +(( $(nix search hello empty | wc -l) == 5 )) + +# Multiple arguments will not exist +(( $(nix search hello broken | wc -l) == 0 )) + +## Search expressions + +# Check that empty search string matches all +nix search|grep -q foo +nix search|grep -q bar +nix search|grep -q hello diff --git a/tests/setuid.nix b/tests/setuid.nix index 7a12b4fb316e..77e83c8d6c2c 100644 --- a/tests/setuid.nix +++ b/tests/setuid.nix @@ -1,8 +1,8 @@ # Verify that Linux builds cannot create setuid or setgid binaries. -{ system, nix }: +{ nixpkgs, system, nix }: -with import <nixpkgs/nixos/lib/testing.nix> { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; makeTest { @@ -20,7 +20,7 @@ makeTest { startAll; # Copying to /tmp should succeed. - $machine->succeed('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" {} " + $machine->succeed('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" {} " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id ")\' '); @@ -30,7 +30,7 @@ makeTest { $machine->succeed("rm /tmp/id"); # Creating a setuid binary should fail. - $machine->fail('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" {} " + $machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" {} " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id chmod 4755 /tmp/id @@ -41,7 +41,7 @@ makeTest { $machine->succeed("rm /tmp/id"); # Creating a setgid binary should fail. - $machine->fail('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" {} " + $machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" {} " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id chmod 2755 /tmp/id @@ -52,7 +52,7 @@ makeTest { $machine->succeed("rm /tmp/id"); # The checks should also work on 32-bit binaries. - $machine->fail('nix-build --option sandbox false -E \'(with import <nixpkgs> { system = "i686-linux"; }; runCommand "foo" {} " + $machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> { system = "i686-linux"; }; runCommand "foo" {} " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id chmod 2755 /tmp/id @@ -63,7 +63,7 @@ makeTest { $machine->succeed("rm /tmp/id"); # The tests above use fchmodat(). Test chmod() as well. - $machine->succeed('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " + $machine->succeed('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id perl -e \"chmod 0666, qw(/tmp/id) or die\" @@ -73,7 +73,7 @@ makeTest { $machine->succeed("rm /tmp/id"); - $machine->fail('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " + $machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id perl -e \"chmod 04755, qw(/tmp/id) or die\" @@ -84,7 +84,7 @@ makeTest { $machine->succeed("rm /tmp/id"); # And test fchmod(). - $machine->succeed('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " + $machine->succeed('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 01750, \\\$x or die\" @@ -94,7 +94,7 @@ makeTest { $machine->succeed("rm /tmp/id"); - $machine->fail('nix-build --option sandbox false -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " + $machine->fail('nix-build --no-sandbox -E \'(with import <nixpkgs> {}; runCommand "foo" { buildInputs = [ perl ]; } " mkdir -p $out cp ${pkgs.coreutils}/bin/id /tmp/id perl -e \"my \\\$x; open \\\$x, qw(/tmp/id); chmod 04777, \\\$x or die\" diff --git a/tests/shell.nix b/tests/shell.nix index 5845d36fc161..eb39f9039a88 100644 --- a/tests/shell.nix +++ b/tests/shell.nix @@ -45,5 +45,12 @@ let pkgs = rec { bash = shell; + # ruby "interpreter" that outputs "$@" + ruby = runCommand "ruby" {} '' + mkdir -p $out/bin + echo 'printf -- "$*"' > $out/bin/ruby + chmod a+rx $out/bin/ruby + ''; + inherit pkgs; }; in pkgs diff --git a/tests/shell.shebang.rb b/tests/shell.shebang.rb new file mode 100644 index 000000000000..ea67eb09c1c6 --- /dev/null +++ b/tests/shell.shebang.rb @@ -0,0 +1,7 @@ +#! @SHELL_PROG@ +#! ruby +#! nix-shell -I nixpkgs=shell.nix --no-substitute +#! nix-shell --pure -p ruby -i ruby + +# Contents doesn't matter. +abort("This shouldn't be executed.") diff --git a/tests/shell.shebang.sh b/tests/shell.shebang.sh index a6c4bc945921..f7132043de44 100755 --- a/tests/shell.shebang.sh +++ b/tests/shell.shebang.sh @@ -1,4 +1,4 @@ #! @ENV_PROG@ nix-shell -#! nix-shell -I nixpkgs=shell.nix --option use-substitutes false +#! nix-shell -I nixpkgs=shell.nix --no-substitute #! nix-shell --pure -i bash -p foo bar echo "$(foo) $(bar) $@" diff --git a/tests/signing.sh b/tests/signing.sh new file mode 100644 index 000000000000..46929639199d --- /dev/null +++ b/tests/signing.sh @@ -0,0 +1,101 @@ +source common.sh + +clearStore +clearCache + +nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1 +pk1=$(cat $TEST_ROOT/pk1) +nix-store --generate-binary-cache-key cache2.example.org $TEST_ROOT/sk2 $TEST_ROOT/pk2 +pk2=$(cat $TEST_ROOT/pk2) + +# Build a path. +outPath=$(nix-build dependencies.nix --no-out-link --secret-key-files "$TEST_ROOT/sk1 $TEST_ROOT/sk2") + +# Verify that the path got signed. +info=$(nix path-info --json $outPath) +[[ $info =~ '"ultimate":true' ]] +[[ $info =~ 'cache1.example.org' ]] +[[ $info =~ 'cache2.example.org' ]] + +# Test "nix verify". +nix verify -r $outPath + +expect 2 nix verify -r $outPath --sigs-needed 1 + +nix verify -r $outPath --sigs-needed 1 --trusted-public-keys $pk1 + +expect 2 nix verify -r $outPath --sigs-needed 2 --trusted-public-keys $pk1 + +nix verify -r $outPath --sigs-needed 2 --trusted-public-keys "$pk1 $pk2" + +nix verify --all --sigs-needed 2 --trusted-public-keys "$pk1 $pk2" + +# Build something unsigned. +outPath2=$(nix-build simple.nix --no-out-link) + +nix verify -r $outPath + +# Verify that the path did not get signed but does have the ultimate bit. +info=$(nix path-info --json $outPath2) +[[ $info =~ '"ultimate":true' ]] +(! [[ $info =~ 'signatures' ]]) + +# Test "nix verify". +nix verify -r $outPath2 + +expect 2 nix verify -r $outPath2 --sigs-needed 1 + +expect 2 nix verify -r $outPath2 --sigs-needed 1 --trusted-public-keys $pk1 + +# Test "nix sign-paths". +nix sign-paths --key-file $TEST_ROOT/sk1 $outPath2 + +nix verify -r $outPath2 --sigs-needed 1 --trusted-public-keys $pk1 + +# Build something content-addressed. +outPathCA=$(IMPURE_VAR1=foo IMPURE_VAR2=bar nix-build ./fixed.nix -A good.0 --no-out-link) + +[[ $(nix path-info --json $outPathCA) =~ '"ca":"fixed:md5:' ]] + +# Content-addressed paths don't need signatures, so they verify +# regardless of --sigs-needed. +nix verify $outPathCA +nix verify $outPathCA --sigs-needed 1000 + +# Copy to a binary cache. +nix copy --to file://$cacheDir $outPath2 + +# Verify that signatures got copied. +info=$(nix path-info --store file://$cacheDir --json $outPath2) +(! [[ $info =~ '"ultimate":true' ]]) +[[ $info =~ 'cache1.example.org' ]] +(! [[ $info =~ 'cache2.example.org' ]]) + +# Verify that adding a signature to a path in a binary cache works. +nix sign-paths --store file://$cacheDir --key-file $TEST_ROOT/sk2 $outPath2 +info=$(nix path-info --store file://$cacheDir --json $outPath2) +[[ $info =~ 'cache1.example.org' ]] +[[ $info =~ 'cache2.example.org' ]] + +# Copying to a diverted store should fail due to a lack of valid signatures. +chmod -R u+w $TEST_ROOT/store0 || true +rm -rf $TEST_ROOT/store0 +(! nix copy --to $TEST_ROOT/store0 $outPath) + +# But succeed if we supply the public keys. +nix copy --to $TEST_ROOT/store0 $outPath --trusted-public-keys $pk1 + +expect 2 nix verify --store $TEST_ROOT/store0 -r $outPath + +nix verify --store $TEST_ROOT/store0 -r $outPath --trusted-public-keys $pk1 +nix verify --store $TEST_ROOT/store0 -r $outPath --sigs-needed 2 --trusted-public-keys "$pk1 $pk2" + +# It should also succeed if we disable signature checking. +(! nix copy --to $TEST_ROOT/store0 $outPath2) +nix copy --to $TEST_ROOT/store0?require-sigs=false $outPath2 + +# But signatures should still get copied. +nix verify --store $TEST_ROOT/store0 -r $outPath2 --trusted-public-keys $pk1 + +# Content-addressed stuff can be copied without signatures. +nix copy --to $TEST_ROOT/store0 $outPathCA diff --git a/tests/timeout.sh b/tests/timeout.sh index d3d85200fa51..39ecf0a1a30c 100644 --- a/tests/timeout.sh +++ b/tests/timeout.sh @@ -15,7 +15,7 @@ if ! echo "$messages" | grep -q "timed out"; then exit 1 fi -if nix-build -Q timeout.nix -A infiniteLoop --option max-build-log-size 100; then +if nix-build -Q timeout.nix -A infiniteLoop --max-build-log-size 100; then echo "build should have failed" exit 1 fi diff --git a/tests/user-envs.sh b/tests/user-envs.sh index c4192fdc59b2..ba63923113d8 100644 --- a/tests/user-envs.sh +++ b/tests/user-envs.sh @@ -24,6 +24,9 @@ rm -f $HOME/.nix-defexpr ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr nix-env -qa '*' --description | grep -q silly +# Query the system. +nix-env -qa '*' --system | grep -q $system + # Install "foo-1.0". nix-env -i foo-1.0 diff --git a/version b/version index 35d51f33b34f..42f7d2336ea8 100644 --- a/version +++ b/version @@ -1 +1 @@ -1.12 \ No newline at end of file +2.1 \ No newline at end of file |