1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
|
{ depot, pkgs, lib, ... }:
let
# Seed a tvix-store with the tvix docs, then start a VM, ask it to list all
# files in /nix/store, and ensure the store path is present, which acts as a
# nice smoketest.
mkBootTest =
{ blobServiceAddr ? "memory://"
, directoryServiceAddr ? "memory://"
, pathInfoServiceAddr ? "memory://"
# The path to import.
, path
# Whether the path should be imported as a closure.
# If false, importPathName must be specified.
, isClosure ? false
# Whether to use nar-bridge to upload, rather than tvix-store copy.
# using nar-bridge currently is "slower", as the `pkgs.mkBinaryCache` build
# takes quite some time.
, useNarBridge ? false
, importPathName ? null
# Commands to run before starting the tvix-daemon. Useful to provide
# auxillary mock services.
, preStart ? ""
# The cmdline to pass to the VM.
# Defaults to tvix.find, which lists all files in the store.
, vmCmdline ? "tvix.find"
# The string we expect to find in the VM output.
# Defaults the value of `path` (the store path we upload).
, assertVMOutput ? path
}:
assert isClosure -> importPathName == null;
assert (!isClosure) -> importPathName != null;
pkgs.stdenv.mkDerivation ({
name = "run-vm";
nativeBuildInputs = [
depot.tvix.store
depot.tvix.boot.runVM
] ++ lib.optionals (isClosure && useNarBridge) [
depot.tvix.nar-bridge
pkgs.curl
pkgs.parallel
pkgs.xz.bin
];
buildCommand = ''
touch $out
# Ensure we can construct http clients.
export SSL_CERT_FILE=/dev/null
${preStart}
# Start the tvix daemon, listening on a unix socket.
BLOB_SERVICE_ADDR=${lib.escapeShellArg blobServiceAddr} \
DIRECTORY_SERVICE_ADDR=${lib.escapeShellArg directoryServiceAddr} \
PATH_INFO_SERVICE_ADDR=${lib.escapeShellArg pathInfoServiceAddr} \
tvix-store \
--otlp=false \
daemon -l $PWD/tvix-store.sock &
# Wait for the service to report healthy.
timeout 22 sh -c "until ${pkgs.ip2unix}/bin/ip2unix -r out,path=$PWD/tvix-store.sock ${pkgs.grpc-health-check}/bin/grpc-health-check --address 127.0.0.1 --port 8080; do sleep 1; done"
# Export env vars so that subsequent tvix-store commands will talk to
# our tvix-store daemon over the unix socket.
export BLOB_SERVICE_ADDR=grpc+unix://$PWD/tvix-store.sock
export DIRECTORY_SERVICE_ADDR=grpc+unix://$PWD/tvix-store.sock
export PATH_INFO_SERVICE_ADDR=grpc+unix://$PWD/tvix-store.sock
'' + lib.optionalString (!isClosure) ''
echo "Importing ${path} into tvix-store with name ${importPathName}…"
cp -R ${path} ${importPathName}
outpath=$(tvix-store import ${importPathName})
echo "imported to $outpath"
'' + lib.optionalString (isClosure && !useNarBridge) ''
echo "Copying closure ${path}…"
# This picks up the `closure` key in `$NIX_ATTRS_JSON_FILE` automatically.
tvix-store --otlp=false copy
'' + lib.optionalString (isClosure && useNarBridge) ''
echo "Starting nar-bridge…"
nar-bridge \
--otlp=false \
-l $PWD/nar-bridge.sock &
# Wait for the socket to be created.
while [ ! -e $PWD/nar-bridge.sock ]; do sleep 1; done
# Upload. We can't use nix copy --to http://…, as it wants access to the nix db.
# However, we can use mkBinaryCache to assemble .narinfo and .nar.xz to upload,
# and then drive a HTTP client ourselves.
to_upload=${pkgs.mkBinaryCache { rootPaths = [path];}}
# Upload all NAR files (with some parallelism).
# As mkBinaryCache produces them xz-compressed, unpack them on the fly.
# nar-bridge doesn't care about the path we upload *to*, but a
# subsequent .narinfo upload need to refer to its contents (by narhash).
echo -e "Uploading NARs… "
ls -d $to_upload/nar/*.nar.xz | parallel 'xz -d < {} | curl -s -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/nar/$(basename {} | cut -d "." -f 1).nar'
echo "Done."
# Upload all NARInfo files.
# FUTUREWORK: This doesn't upload them in order, and currently relies
# on PathInfoService not doing any checking.
# In the future, we might want to make this behaviour configurable,
# and disable checking here, to keep the logic simple.
ls -d $to_upload/*.narinfo | parallel 'curl -s -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/$(basename {}) < {}'
'' + ''
# Invoke a VM using tvix as the backing store, ensure the outpath appears in its listing.
echo "Starting VM…"
CH_CMDLINE="${vmCmdline}" run-tvix-vm 2>&1 | tee output.txt
grep "${assertVMOutput}" output.txt
'';
requiredSystemFeatures = [ "kvm" ];
} // lib.optionalAttrs (isClosure && !useNarBridge) {
__structuredAttrs = true;
exportReferencesGraph.closure = [ path ];
});
systemFor = sys: (depot.ops.nixos.nixosFor sys).system;
testSystem = systemFor ({ modulesPath, pkgs, ... }: {
# Set some options necessary to evaluate.
boot.loader.systemd-boot.enable = true;
# TODO: figure out how to disable this without causing eval to fail
fileSystems."/" = {
device = "/dev/root";
fsType = "tmpfs";
};
services.getty.helpLine = "Onwards and upwards.";
systemd.services.do-shutdown = {
after = [ "getty.target" ];
description = "Shut down again";
wantedBy = [ "multi-user.target" ];
serviceConfig.Type = "oneshot";
script = "/run/current-system/sw/bin/systemctl poweroff --when=+10s";
};
# Don't warn about stateVersion.
system.stateVersion = "24.05";
});
in
depot.nix.readTree.drvTargets
{
docs-memory = (mkBootTest {
path = ../../docs;
importPathName = "docs";
});
docs-persistent = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
directoryServiceAddr = "sled:///build/directories.sled";
pathInfoServiceAddr = "sled:///build/pathinfo.sled";
path = ../../docs;
importPathName = "docs";
});
closure-tvix = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
path = depot.tvix.store;
isClosure = true;
});
closure-nixos = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
path = testSystem;
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-bigtable = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
directoryServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=directories&family_name=cf1";
pathInfoServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=pathinfos&family_name=cf1";
path = testSystem;
useNarBridge = true;
preStart = ''
${pkgs.cbtemulator}/bin/cbtemulator -address $PWD/cbtemulator.sock &
timeout 22 sh -c 'until [ -e $PWD/cbtemulator.sock ]; do sleep 1; done'
export BIGTABLE_EMULATOR_HOST=unix://$PWD/cbtemulator.sock
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable directories
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily directories cf1
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable pathinfos
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily pathinfos cf1
'';
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-s3 = (mkBootTest {
blobServiceAddr = "objectstore+s3://mybucket/blobs?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&aws_endpoint_url=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
# we cannot use s3 here yet without any caching layer, as we don't allow "deeper" access to directories (non-root nodes)
# directoryServiceAddr = "objectstore+s3://mybucket/directories?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&endpoint=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
directoryServiceAddr = "memory://";
pathInfoServiceAddr = "memory://";
path = testSystem;
useNarBridge = true;
preStart = ''
MINIO_ACCESS_KEY=myaccesskey MINIO_SECRET_KEY=supersecret MINIO_ADDRESS=127.0.0.1:9000 ${pkgs.minio}/bin/minio server $(mktemp -d) &
timeout 22 sh -c 'until ${pkgs.netcat}/bin/nc -z $0 $1; do sleep 1; done' localhost 9000
mc_config_dir=$(mktemp -d)
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir alias set 'myminio' 'http://127.0.0.1:9000' 'myaccesskey' 'supersecret'
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir mb myminio/mybucket
'';
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-nar-bridge = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
path = testSystem;
useNarBridge = true;
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
}
|