about summary refs log tree commit diff
path: root/tvix/boot/tests/default.nix
blob: 108803d7abcc18e812912faf73254b9b336baeeb (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
{ depot, pkgs, lib, ... }:

let
  # Seed a tvix-store with the tvix docs, then start a VM, ask it to list all
  # files in /nix/store, and ensure the store path is present, which acts as a
  # nice smoketest.
  mkBootTest =
    { blobServiceAddr ? "memory://"
    , directoryServiceAddr ? "memory://"
    , pathInfoServiceAddr ? "memory://"


      # The path to import.
    , path

      # Whether the path should be imported as a closure.
      # If false, importPathName must be specified.
    , isClosure ? false
    , importPathName ? null

      # The cmdline to pass to the VM.
      # Defaults to tvix.find, which lists all files in the store.
    , vmCmdline ? "tvix.find"
      # The string we expect to find in the VM output.
      # Defaults the value of `path` (the store path we upload).
    , assertVMOutput ? path
    }:

      assert isClosure -> importPathName == null;
      assert (!isClosure) -> importPathName != null;

      pkgs.stdenv.mkDerivation {
        name = "run-vm";

        nativeBuildInputs = [
          depot.tvix.store
          depot.tvix.boot.runVM
        ] ++ lib.optionals isClosure [
          depot.tvix.nar-bridge
          pkgs.curl
          pkgs.parallel
          pkgs.xz.bin
        ];
        buildCommand = ''
          touch $out

          # Start the tvix daemon, listening on a unix socket.
          BLOB_SERVICE_ADDR=${blobServiceAddr} \
            DIRECTORY_SERVICE_ADDR=${directoryServiceAddr} \
            PATH_INFO_SERVICE_ADDR=${pathInfoServiceAddr} \
            tvix-store \
              --otlp=false \
              daemon -l $PWD/tvix-store.sock &

          # Wait for the socket to be created.
          while [ ! -e $PWD/tvix-store.sock ]; do sleep 1; done

          # Export env vars so that subsequent tvix-store commands will talk to
          # our tvix-store daemon over the unix socket.
          export BLOB_SERVICE_ADDR=grpc+unix://$PWD/tvix-store.sock
          export DIRECTORY_SERVICE_ADDR=grpc+unix://$PWD/tvix-store.sock
          export PATH_INFO_SERVICE_ADDR=grpc+unix://$PWD/tvix-store.sock
        '' + lib.optionalString (!isClosure) ''
          echo "Importing ${path} into tvix-store with name ${importPathName}…"
          cp -R ${path} ${importPathName}
          outpath=$(tvix-store import ${importPathName})

          echo "imported to $outpath"
        '' + lib.optionalString (isClosure) ''
          echo "Starting nar-bridge…"
          nar-bridge-http \
            --otlp=false \
            --store-addr=unix://$PWD/tvix-store.sock \
            --listen-addr=$PWD/nar-bridge.sock &

          # Wait for the socket to be created.
          while [ ! -e $PWD/nar-bridge.sock ]; do sleep 1; done

          # Upload. We can't use nix copy --to http://…, as it wants access to the nix db.
          # However, we can use mkBinaryCache to assemble .narinfo and .nar.xz to upload,
          # and then drive a HTTP client ourselves.
          to_upload=${pkgs.mkBinaryCache { rootPaths = [path];}}

          # Upload all NAR files (with some parallelism).
          # As mkBinaryCache produces them xz-compressed, unpack them on the fly.
          # nar-bridge doesn't care about the path we upload *to*, but a
          # subsequent .narinfo upload need to refer to its contents (by narhash).
          echo -e "Uploading NARs… "
          ls -d $to_upload/nar/*.nar.xz | parallel 'xz -d < {} | curl -s -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/nar/$(basename {} | cut -d "." -f 1).nar'
          echo "Done."

          # Upload all NARInfo files.
          # FUTUREWORK: This doesn't upload them in order, and currently relies
          # on PathInfoService not doing any checking.
          # In the future, we might want to make this behaviour configurable,
          # and disable checking here, to keep the logic simple.
          ls -d $to_upload/*.narinfo | parallel 'curl -s -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/$(basename {}) < {}'
        '' + ''
          # Invoke a VM using tvix as the backing store, ensure the outpath appears in its listing.

          CH_CMDLINE="${vmCmdline}" run-tvix-vm 2>&1 | tee output.txt
          grep "${assertVMOutput}" output.txt
        '';
        requiredSystemFeatures = [ "kvm" ];
      };
in
depot.nix.readTree.drvTargets
{
  docs-memory = (mkBootTest {
    path = ../../docs;
    importPathName = "docs";
  });
  docs-sled = (mkBootTest {
    blobServiceAddr = "sled://$PWD/blobs.sled";
    directoryServiceAddr = "sled://$PWD/directories.sled";
    pathInfoServiceAddr = "sled://$PWD/pathinfo.sled";
    path = ../../docs;
    importPathName = "docs";
  });
  docs-objectstore-local = (mkBootTest {
    blobServiceAddr = "objectstore+file://$PWD/blobs";
    path = ../../docs;
    importPathName = "docs";
  });

  closure-tvix = (mkBootTest {
    blobServiceAddr = "objectstore+file://$PWD/blobs";
    path = depot.tvix.store;
    isClosure = true;
  });
}