about summary refs log tree commit diff
path: root/tvix/docs
diff options
context:
space:
mode:
Diffstat (limited to 'tvix/docs')
-rw-r--r--tvix/docs/book.toml14
-rw-r--r--tvix/docs/default.nix3
-rw-r--r--tvix/docs/mdbook-admonish.css348
-rw-r--r--tvix/docs/mdbook-extra.css7
-rw-r--r--tvix/docs/src/SUMMARY.md37
-rw-r--r--tvix/docs/src/TODO.md121
-rw-r--r--tvix/docs/src/architecture.md9
-rw-r--r--tvix/docs/src/castore/blobstore-chunking.md147
-rw-r--r--tvix/docs/src/castore/blobstore-protocol.md104
-rw-r--r--tvix/docs/src/castore/data-model.md50
-rw-r--r--tvix/docs/src/castore/why-not-git-trees.md57
-rw-r--r--tvix/docs/src/contributing/code-&-commits.md76
-rw-r--r--tvix/docs/src/contributing/email.md33
-rw-r--r--tvix/docs/src/contributing/gerrit.md112
-rw-r--r--tvix/docs/src/eval/abandoned/index.md3
-rw-r--r--tvix/docs/src/eval/abandoned/thread-local-vm.md233
-rw-r--r--tvix/docs/src/eval/bindings.md134
-rw-r--r--tvix/docs/src/eval/build-references.md259
-rw-r--r--tvix/docs/src/eval/builtins.md137
-rw-r--r--tvix/docs/src/eval/catchable-errors.md131
-rw-r--r--tvix/docs/src/eval/known-optimisation-potential.md161
-rw-r--r--tvix/docs/src/eval/language-issues.md46
-rw-r--r--tvix/docs/src/eval/opcodes-attrsets.md122
-rw-r--r--tvix/docs/src/eval/recursive-attrs.md67
-rw-r--r--tvix/docs/src/eval/vm-loop.md314
-rw-r--r--tvix/docs/src/getting-started.md59
-rw-r--r--tvix/docs/src/language-spec.md4
-rw-r--r--tvix/docs/src/nix-daemon/index.md15
-rw-r--r--tvix/docs/src/store/api.md287
-rw-r--r--tvix/docs/src/value-pointer-equality.md6
30 files changed, 3072 insertions, 24 deletions
diff --git a/tvix/docs/book.toml b/tvix/docs/book.toml
index 7318a90233..093b73b8e8 100644
--- a/tvix/docs/book.toml
+++ b/tvix/docs/book.toml
@@ -9,3 +9,17 @@ title = "Tvix Docs"
 # override the /usr/bin/plantuml default
 plantuml-cmd = "plantuml"
 use-data-uris = true
+
+[preprocessor.admonish]
+command = "mdbook-admonish"
+after = ["links"] # ensure `{{#include}}` also gets processed
+assets_version = "3.0.2" # do not edit: managed by `mdbook-admonish install`
+
+[preprocessor.d2]
+command = "d2"
+after = ["links"] # ensure `{{#include}}` also gets processed
+
+[output]
+
+[output.html]
+additional-css = ["./mdbook-admonish.css", "./mdbook-extra.css"]
diff --git a/tvix/docs/default.nix b/tvix/docs/default.nix
index 9fc2f76576..3b102e4b7c 100644
--- a/tvix/docs/default.nix
+++ b/tvix/docs/default.nix
@@ -9,7 +9,10 @@ pkgs.stdenv.mkDerivation {
   src = lib.cleanSource ./.;
 
   nativeBuildInputs = [
+    pkgs.d2
     pkgs.mdbook
+    pkgs.mdbook-admonish
+    pkgs.mdbook-d2
     pkgs.mdbook-plantuml
     pkgs.plantuml
   ];
diff --git a/tvix/docs/mdbook-admonish.css b/tvix/docs/mdbook-admonish.css
new file mode 100644
index 0000000000..45aeff0511
--- /dev/null
+++ b/tvix/docs/mdbook-admonish.css
@@ -0,0 +1,348 @@
+@charset "UTF-8";
+:is(.admonition) {
+  display: flow-root;
+  margin: 1.5625em 0;
+  padding: 0 1.2rem;
+  color: var(--fg);
+  page-break-inside: avoid;
+  background-color: var(--bg);
+  border: 0 solid black;
+  border-inline-start-width: 0.4rem;
+  border-radius: 0.2rem;
+  box-shadow: 0 0.2rem 1rem rgba(0, 0, 0, 0.05), 0 0 0.1rem rgba(0, 0, 0, 0.1);
+}
+@media print {
+  :is(.admonition) {
+    box-shadow: none;
+  }
+}
+:is(.admonition) > * {
+  box-sizing: border-box;
+}
+:is(.admonition) :is(.admonition) {
+  margin-top: 1em;
+  margin-bottom: 1em;
+}
+:is(.admonition) > .tabbed-set:only-child {
+  margin-top: 0;
+}
+html :is(.admonition) > :last-child {
+  margin-bottom: 1.2rem;
+}
+
+a.admonition-anchor-link {
+  display: none;
+  position: absolute;
+  left: -1.2rem;
+  padding-right: 1rem;
+}
+a.admonition-anchor-link:link, a.admonition-anchor-link:visited {
+  color: var(--fg);
+}
+a.admonition-anchor-link:link:hover, a.admonition-anchor-link:visited:hover {
+  text-decoration: none;
+}
+a.admonition-anchor-link::before {
+  content: "§";
+}
+
+:is(.admonition-title, summary.admonition-title) {
+  position: relative;
+  min-height: 4rem;
+  margin-block: 0;
+  margin-inline: -1.6rem -1.2rem;
+  padding-block: 0.8rem;
+  padding-inline: 4.4rem 1.2rem;
+  font-weight: 700;
+  background-color: rgba(68, 138, 255, 0.1);
+  print-color-adjust: exact;
+  -webkit-print-color-adjust: exact;
+  display: flex;
+}
+:is(.admonition-title, summary.admonition-title) p {
+  margin: 0;
+}
+html :is(.admonition-title, summary.admonition-title):last-child {
+  margin-bottom: 0;
+}
+:is(.admonition-title, summary.admonition-title)::before {
+  position: absolute;
+  top: 0.625em;
+  inset-inline-start: 1.6rem;
+  width: 2rem;
+  height: 2rem;
+  background-color: #448aff;
+  print-color-adjust: exact;
+  -webkit-print-color-adjust: exact;
+  mask-image: url('data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"></svg>');
+  -webkit-mask-image: url('data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"></svg>');
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-size: contain;
+  content: "";
+}
+:is(.admonition-title, summary.admonition-title):hover a.admonition-anchor-link {
+  display: initial;
+}
+
+details.admonition > summary.admonition-title::after {
+  position: absolute;
+  top: 0.625em;
+  inset-inline-end: 1.6rem;
+  height: 2rem;
+  width: 2rem;
+  background-color: currentcolor;
+  mask-image: var(--md-details-icon);
+  -webkit-mask-image: var(--md-details-icon);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-size: contain;
+  content: "";
+  transform: rotate(0deg);
+  transition: transform 0.25s;
+}
+details[open].admonition > summary.admonition-title::after {
+  transform: rotate(90deg);
+}
+
+:root {
+  --md-details-icon: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M8.59 16.58 13.17 12 8.59 7.41 10 6l6 6-6 6-1.41-1.42Z'/></svg>");
+}
+
+:root {
+  --md-admonition-icon--admonish-note: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M20.71 7.04c.39-.39.39-1.04 0-1.41l-2.34-2.34c-.37-.39-1.02-.39-1.41 0l-1.84 1.83 3.75 3.75M3 17.25V21h3.75L17.81 9.93l-3.75-3.75L3 17.25z'/></svg>");
+  --md-admonition-icon--admonish-abstract: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M17 9H7V7h10m0 6H7v-2h10m-3 6H7v-2h7M12 3a1 1 0 0 1 1 1 1 1 0 0 1-1 1 1 1 0 0 1-1-1 1 1 0 0 1 1-1m7 0h-4.18C14.4 1.84 13.3 1 12 1c-1.3 0-2.4.84-2.82 2H5a2 2 0 0 0-2 2v14a2 2 0 0 0 2 2h14a2 2 0 0 0 2-2V5a2 2 0 0 0-2-2z'/></svg>");
+  --md-admonition-icon--admonish-info: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M13 9h-2V7h2m0 10h-2v-6h2m-1-9A10 10 0 0 0 2 12a10 10 0 0 0 10 10 10 10 0 0 0 10-10A10 10 0 0 0 12 2z'/></svg>");
+  --md-admonition-icon--admonish-tip: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M17.66 11.2c-.23-.3-.51-.56-.77-.82-.67-.6-1.43-1.03-2.07-1.66C13.33 7.26 13 4.85 13.95 3c-.95.23-1.78.75-2.49 1.32-2.59 2.08-3.61 5.75-2.39 8.9.04.1.08.2.08.33 0 .22-.15.42-.35.5-.23.1-.47.04-.66-.12a.58.58 0 0 1-.14-.17c-1.13-1.43-1.31-3.48-.55-5.12C5.78 10 4.87 12.3 5 14.47c.06.5.12 1 .29 1.5.14.6.41 1.2.71 1.73 1.08 1.73 2.95 2.97 4.96 3.22 2.14.27 4.43-.12 6.07-1.6 1.83-1.66 2.47-4.32 1.53-6.6l-.13-.26c-.21-.46-.77-1.26-.77-1.26m-3.16 6.3c-.28.24-.74.5-1.1.6-1.12.4-2.24-.16-2.9-.82 1.19-.28 1.9-1.16 2.11-2.05.17-.8-.15-1.46-.28-2.23-.12-.74-.1-1.37.17-2.06.19.38.39.76.63 1.06.77 1 1.98 1.44 2.24 2.8.04.14.06.28.06.43.03.82-.33 1.72-.93 2.27z'/></svg>");
+  --md-admonition-icon--admonish-success: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='m9 20.42-6.21-6.21 2.83-2.83L9 14.77l9.88-9.89 2.83 2.83L9 20.42z'/></svg>");
+  --md-admonition-icon--admonish-question: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='m15.07 11.25-.9.92C13.45 12.89 13 13.5 13 15h-2v-.5c0-1.11.45-2.11 1.17-2.83l1.24-1.26c.37-.36.59-.86.59-1.41a2 2 0 0 0-2-2 2 2 0 0 0-2 2H8a4 4 0 0 1 4-4 4 4 0 0 1 4 4 3.2 3.2 0 0 1-.93 2.25M13 19h-2v-2h2M12 2A10 10 0 0 0 2 12a10 10 0 0 0 10 10 10 10 0 0 0 10-10c0-5.53-4.5-10-10-10z'/></svg>");
+  --md-admonition-icon--admonish-warning: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M13 14h-2V9h2m0 9h-2v-2h2M1 21h22L12 2 1 21z'/></svg>");
+  --md-admonition-icon--admonish-failure: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M20 6.91 17.09 4 12 9.09 6.91 4 4 6.91 9.09 12 4 17.09 6.91 20 12 14.91 17.09 20 20 17.09 14.91 12 20 6.91z'/></svg>");
+  --md-admonition-icon--admonish-danger: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M11 15H6l7-14v8h5l-7 14v-8z'/></svg>");
+  --md-admonition-icon--admonish-bug: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M14 12h-4v-2h4m0 6h-4v-2h4m6-6h-2.81a5.985 5.985 0 0 0-1.82-1.96L17 4.41 15.59 3l-2.17 2.17a6.002 6.002 0 0 0-2.83 0L8.41 3 7 4.41l1.62 1.63C7.88 6.55 7.26 7.22 6.81 8H4v2h2.09c-.05.33-.09.66-.09 1v1H4v2h2v1c0 .34.04.67.09 1H4v2h2.81c1.04 1.79 2.97 3 5.19 3s4.15-1.21 5.19-3H20v-2h-2.09c.05-.33.09-.66.09-1v-1h2v-2h-2v-1c0-.34-.04-.67-.09-1H20V8z'/></svg>");
+  --md-admonition-icon--admonish-example: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M7 13v-2h14v2H7m0 6v-2h14v2H7M7 7V5h14v2H7M3 8V5H2V4h2v4H3m-1 9v-1h3v4H2v-1h2v-.5H3v-1h1V17H2m2.25-7a.75.75 0 0 1 .75.75c0 .2-.08.39-.21.52L3.12 13H5v1H2v-.92L4 11H2v-1h2.25z'/></svg>");
+  --md-admonition-icon--admonish-quote: url("data:image/svg+xml;charset=utf-8,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'><path d='M14 17h3l2-4V7h-6v6h3M6 17h3l2-4V7H5v6h3l-2 4z'/></svg>");
+}
+
+:is(.admonition):is(.admonish-note) {
+  border-color: #448aff;
+}
+
+:is(.admonish-note) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(68, 138, 255, 0.1);
+}
+:is(.admonish-note) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #448aff;
+  mask-image: var(--md-admonition-icon--admonish-note);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-note);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-abstract, .admonish-summary, .admonish-tldr) {
+  border-color: #00b0ff;
+}
+
+:is(.admonish-abstract, .admonish-summary, .admonish-tldr) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(0, 176, 255, 0.1);
+}
+:is(.admonish-abstract, .admonish-summary, .admonish-tldr) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #00b0ff;
+  mask-image: var(--md-admonition-icon--admonish-abstract);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-abstract);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-info, .admonish-todo) {
+  border-color: #00b8d4;
+}
+
+:is(.admonish-info, .admonish-todo) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(0, 184, 212, 0.1);
+}
+:is(.admonish-info, .admonish-todo) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #00b8d4;
+  mask-image: var(--md-admonition-icon--admonish-info);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-info);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-tip, .admonish-hint, .admonish-important) {
+  border-color: #00bfa5;
+}
+
+:is(.admonish-tip, .admonish-hint, .admonish-important) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(0, 191, 165, 0.1);
+}
+:is(.admonish-tip, .admonish-hint, .admonish-important) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #00bfa5;
+  mask-image: var(--md-admonition-icon--admonish-tip);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-tip);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-success, .admonish-check, .admonish-done) {
+  border-color: #00c853;
+}
+
+:is(.admonish-success, .admonish-check, .admonish-done) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(0, 200, 83, 0.1);
+}
+:is(.admonish-success, .admonish-check, .admonish-done) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #00c853;
+  mask-image: var(--md-admonition-icon--admonish-success);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-success);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-question, .admonish-help, .admonish-faq) {
+  border-color: #64dd17;
+}
+
+:is(.admonish-question, .admonish-help, .admonish-faq) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(100, 221, 23, 0.1);
+}
+:is(.admonish-question, .admonish-help, .admonish-faq) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #64dd17;
+  mask-image: var(--md-admonition-icon--admonish-question);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-question);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-warning, .admonish-caution, .admonish-attention) {
+  border-color: #ff9100;
+}
+
+:is(.admonish-warning, .admonish-caution, .admonish-attention) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(255, 145, 0, 0.1);
+}
+:is(.admonish-warning, .admonish-caution, .admonish-attention) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #ff9100;
+  mask-image: var(--md-admonition-icon--admonish-warning);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-warning);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-failure, .admonish-fail, .admonish-missing) {
+  border-color: #ff5252;
+}
+
+:is(.admonish-failure, .admonish-fail, .admonish-missing) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(255, 82, 82, 0.1);
+}
+:is(.admonish-failure, .admonish-fail, .admonish-missing) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #ff5252;
+  mask-image: var(--md-admonition-icon--admonish-failure);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-failure);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-danger, .admonish-error) {
+  border-color: #ff1744;
+}
+
+:is(.admonish-danger, .admonish-error) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(255, 23, 68, 0.1);
+}
+:is(.admonish-danger, .admonish-error) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #ff1744;
+  mask-image: var(--md-admonition-icon--admonish-danger);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-danger);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-bug) {
+  border-color: #f50057;
+}
+
+:is(.admonish-bug) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(245, 0, 87, 0.1);
+}
+:is(.admonish-bug) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #f50057;
+  mask-image: var(--md-admonition-icon--admonish-bug);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-bug);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-example) {
+  border-color: #7c4dff;
+}
+
+:is(.admonish-example) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(124, 77, 255, 0.1);
+}
+:is(.admonish-example) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #7c4dff;
+  mask-image: var(--md-admonition-icon--admonish-example);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-example);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+:is(.admonition):is(.admonish-quote, .admonish-cite) {
+  border-color: #9e9e9e;
+}
+
+:is(.admonish-quote, .admonish-cite) > :is(.admonition-title, summary.admonition-title) {
+  background-color: rgba(158, 158, 158, 0.1);
+}
+:is(.admonish-quote, .admonish-cite) > :is(.admonition-title, summary.admonition-title)::before {
+  background-color: #9e9e9e;
+  mask-image: var(--md-admonition-icon--admonish-quote);
+  -webkit-mask-image: var(--md-admonition-icon--admonish-quote);
+  mask-repeat: no-repeat;
+  -webkit-mask-repeat: no-repeat;
+  mask-size: contain;
+  -webkit-mask-repeat: no-repeat;
+}
+
+.navy :is(.admonition) {
+  background-color: var(--sidebar-bg);
+}
+
+.ayu :is(.admonition),
+.coal :is(.admonition) {
+  background-color: var(--theme-hover);
+}
+
+.rust :is(.admonition) {
+  background-color: var(--sidebar-bg);
+  color: var(--sidebar-fg);
+}
+.rust .admonition-anchor-link:link, .rust .admonition-anchor-link:visited {
+  color: var(--sidebar-fg);
+}
diff --git a/tvix/docs/mdbook-extra.css b/tvix/docs/mdbook-extra.css
new file mode 100644
index 0000000000..7a50fdbeed
--- /dev/null
+++ b/tvix/docs/mdbook-extra.css
@@ -0,0 +1,7 @@
+@charset "utf-8";
+
+.hljs-meta.prompt_ {
+	-webkit-user-select: none;
+	-moz-user-select: none;
+	user-select: none;
+}
diff --git a/tvix/docs/src/SUMMARY.md b/tvix/docs/src/SUMMARY.md
index 954abae338..6b3c43035b 100644
--- a/tvix/docs/src/SUMMARY.md
+++ b/tvix/docs/src/SUMMARY.md
@@ -1,14 +1,43 @@
 # Summary
 
+# Welcome
+* [Getting Started](./getting-started.md)
+
+# Contributing
+* [Gerrit](./contributing/gerrit.md)
+* [Email](./contributing/email.md)
+* [Code & Commits](./contributing/code-&-commits.md)
+
 # Tvix
 - [Architecture & data flow](./architecture.md)
 - [TODOs](./TODO.md)
 
+# Evaluator
+- [Compilation of Bindings](./eval/bindings.md)
+- [Builtins](./eval/builtins.md)
+- [Build References](./eval/build-references.md)
+- [Catchable Errors](./eval/catchable-errors.md)
+- [Known Optimisation Potential](./eval/known-optimisation-potential.md)
+- [Langugage Issues](./eval/language-issues.md)
+- [Attrset Opcodes](./eval/opcodes-attrsets.md)
+- [Recursive attribute sets](./eval/recursive-attrs.md)
+- [VM Loop](./eval/vm-loop.md)
+- [Abandoned](./eval/abandoned/index.md)
+  - [Thread-local VM](./eval/abandoned/thread-local-vm.md)
+
+# Store
+- [Store API](./store/api.md)
+- [BlobStore Chunking](./castore/blobstore-chunking.md)
+- [BlobStore Protocol](./castore/blobstore-protocol.md)
+- [Data Model](./castore/data-model.md)
+- [Why not git trees?](./castore/why-not-git-trees.md)
+
 # Nix
 - [Specification of the Nix Language](./language-spec.md)
 - [Nix language version history](./lang-version.md)
 - [Value Pointer Equality](./value-pointer-equality.md)
-- [Daemon protocol changelog](./nix-daemon/changelog.md)
-- [Daemon protocol logging](./nix-daemon/logging.md)
-- [Daemon protocol operations](./nix-daemon/operations.md)
-- [Daemon protocol serialization](./nix-daemon/serialization.md)
\ No newline at end of file
+- [Daemon Protocol](./nix-daemon/index.md)
+  - [Changelog](./nix-daemon/changelog.md)
+  - [Logging](./nix-daemon/logging.md)
+  - [Operations](./nix-daemon/operations.md)
+  - [Serialization](./nix-daemon/serialization.md)
diff --git a/tvix/docs/src/TODO.md b/tvix/docs/src/TODO.md
index 8fb22ea822..52fa0a9e74 100644
--- a/tvix/docs/src/TODO.md
+++ b/tvix/docs/src/TODO.md
@@ -25,17 +25,69 @@ sure noone is working on this, or has some specific design in mind already.
    with a different level of `--strict`, but the toplevel doc-comment suggests
    its generic?
 
+### crate2nix for WASM (@kranzes)
+Most of Tvix is living inside a `//tvix` cargo workspace, and we use `crate2nix`
+as a build system, to get crate-level build granularity (and caching), keeping
+compile times somewhat manageable.
+
+In the future, for Store/Build, we want to build some more web frontends,
+exposing some data by calling to the API. Being able to write this in Rust,
+and reusing most of our existing code dealing with the data structures would
+be preferred.
+
+However, using the crate2nix tooling in combination with compiling for WASM is
+a bumpy ride (and `//web.tvixbolt` works around this by using
+`rustPlatform.buildRustPackage` instead, which invokes cargo inside a FOD):
+
+`buildRustCrate` in nixpkgs (which is used by `crate2nix` under the hood)
+doesn't allow specifying another `--target` explicitly, but relies on the cross
+machinery in nixpkgs exclusively.
+
+`doc/languages-frameworks/rust.section.md` suggests it should be a matter of
+re-instantiating nixpkgs for `wasm32-unknown-unknown`, but that's no recognized
+as a valid architecture.
+The suggested alternative, setting only `rustc.config` to it seems to get us
+further, but the `Crate.nix` logic for detecting arch-conditional crates doesn't
+seem to cover that case, and tries to build crates (`cpufeatures` for `sha{1,2}`)
+which are supposed to be skipped.
+
+## Perf
+ - String Contexts currently do a lot of indirections (edef)
+   (NixString -> NixStringInner -> HashSet[element] -> NixContextElement -> String -> data)
+   to get to the actual data. We should improve this. There's various ideas, one
+   of it is globally interning all Nix context elements, and only keeping
+   indices into that. We might need to have different representations for small
+   amount of context elements or larger ones, and need tooling to reason about
+   the amount of contexts we have.
+ - To calculate NAR size and digest (used for output path calculation of FODs),
+   our current `SimpleRenderer` `NarCalculationService` sequentially asks for
+   one blob after another (and internally these might consists out of multiple
+   chunks too).
+   That's a lot of roundtrips, adding up to a lot of useless waiting.
+   While we cannot avoid having to feed all bytes sequentially through sha256,
+   we already know what blobs to fetch and in which order.
+   There should be a way to buffer some "amount of upcoming bytes" in memory,
+   and not requesting these seqentially.
+   This is somewhat the "spiritual counterpart" to our sequential ingestion
+   code (`ConcurrentBlobUploader`, used by `ingest_nar`), which keeps
+   "some amount of outgoing bytes" in memory.
+
 ### Error cleanup
  - Currently, all services use tvix_castore::Error, which only has two kinds
    (invalid request, storage error), containing an (owned) string.
    This is quite primitive. We should have individual error types for BS, DS, PS.
    Maybe these should have some generics to still be able to carry errors from
    the underlying backend, similar to `IngestionError`.
+   There was an attempt to give PS separate error types (cl/11695), but this
+   ended up very verbose.
+   Every error had to be boxed, and a possible additional message be added. Some
+   errors that didn't wrap another underlying errors were hard to construct, too
+   (requiring the addition of errors). All of this without even having added
+   proper backtrace support, which would be quite helpful in store hierarchies.
+   `anyhow`'s `.context()` gives us most of this out of the box. Maybe we can
+   use that, using enums rather than `&'static str` as context in some cases?
 
 ## Fixes towards correctness
- - `builtins.toXML` is missing string context. See b/398.
- - `builtins.toXML` self-closing tags need to be configurable in a more granular
-   fashion, requires third-party crate support. See b/399.
  - `rnix` only supports string source files, but `NixString` uses bytes (and Nix
    source code might be no valid UTF-8).
 
@@ -75,10 +127,6 @@ Some more fetcher-related builtins need work:
  - `fetchTree` (hairy, seems there's no proper spec and the URL syntax seems
    subject to change/underdocumented)
 
-### Convert builtins:fetchurl to Fetches
-We need to convert `builtins:fetchurl`-style calls to `builtins.derivation` to
-fetches, not Derivations (tracked in `KnownPaths`).
-
 ### Derivation -> Build
 While we have some support for `structuredAttrs` and `fetchClosure` (at least
 enough to calculate output hashes, aka produce identical ATerm), the code
@@ -101,9 +149,33 @@ logs etc, but this is something requiring a lot of designing.
 
 ### Store composition
  - Combinators: list-by-priority, first-come-first-serve, cache
- - How do describe hierarchies. URL format too one-dimensional, but we might get
-   quite far with a similar "substituters" concept that Nix uses, to construct
-   the composed stores.
+ - Store composition hierarchies (@yuka).
+   - URL format too one-dimensional.
+   - We want to have nice and simple user-facing substituter config, including
+     sensible default wrappers for caching, retries, fallbacks, as well as
+     granular control for power-users.
+   - Current design idea:
+     - Have a concept similar to rclone config (map with store aliases as
+       keys, allowing to refer to stores by their alias from other parts of
+       the config).
+       It allows both referring to by name, as well as ad-hoc definition:
+       https://rclone.org/docs/#syntax-of-remote-paths
+     - Each store needs to be aware of its "instance name", so it can be
+       included in logs, metrics, …
+     - Have a "instantiation function" traversing such a config data structure,
+       creating store instances and plugging them together, ultimately returning
+       a dyn …Service interface.
+     - No reconfiguration/reconcilation for now
+     - Making URLs the primary data format would get ugly quite easily (hello
+       multiple layers of escaping!), so best to convert the existing URL
+       syntax to our new config format on the fly and then use one codepath
+       to instantiate/assemble. Similarly, something like the "user-facing
+       substituter config" mentioned above could aalso be converted to such a
+       config format under the hood.
+     - Maybe add a ?cache=$other_url parameter support to the URL syntax, to
+       easily wrap a store with a caching frontend, using $other_url as the
+      "near" store URL.
+
 ### Store Config
    There's already serde for some store options (bigtable uses `serde_qs`).
    We might also have common options global over all backends, like chunking
@@ -114,7 +186,22 @@ logs etc, but this is something requiring a lot of designing.
 ### BlobService
  - On the trait side, currently there's no way to distinguish reading a
    known-chunk vs blob, so we might be calling `.chunks()` unnecessarily often.
-   At least for the `object_store` backend, this might be a problem.
+   At least for the `object_store` backend, this might be a problem, causing a
+   lot of round-trips. It also doesn't compose well - every implementation of
+   `BlobService` needs to both solve the "holding metadata about chunking info"
+   as well as "storing chunks" questions.
+   Design idea (@flokli): split these two concerns into two separate traits:
+    - a `ChunkService` dealing with retrieving individual chunks, by their
+      content digests. Chunks are small enough to keep around in contiguous
+      memory.
+    - a `BlobService` storing metadata about blobs.
+
+   Individual stores would not need to implement `BlobReader` anymore, but that
+   could be a global thing with access to the whole store composition layer,
+   which should make it easier to reuse chunks from other backends. Unclear
+   if the write path should be structured the same way. At least for some
+   backends, we want the remote end to be able to decide about chunking.
+
  - While `object_store` recently got support for `Content-Type`
    (https://github.com/apache/arrow-rs/pull/5650), there's no support on the
    local filesystem yet. We'd need to add support to this (through xattrs).
@@ -134,9 +221,9 @@ logs etc, but this is something requiring a lot of designing.
 - Some work ongoing on the worker operation parsing (griff, picnoir)
 
 ### O11Y
- - gRPC trace propagation (cl/10532)
- - `tracing-tracy` (cl/10952)
- - `[tracing-]indicatif` for progress/log reporting (floklis stash)
- - unification into `tvix-tracing` crate, currently a lot of boilerplate
-   in `tvix-store` CLI entrypoint, and half of the boilerplate copied over to
-   `tvix-cli`.
+ - Maybe drop `--log-level` entirely, and only use `RUST_LOG` env exclusively?
+   `debug`,`trace` level across all crates is a bit useless, and `RUST_LOG` can
+   be much more granular…
+ - Trace propagation for object_store once they support a way to register a
+   middleware, so we can use that to register a tracing middleware.
+   https://github.com/apache/arrow-rs/issues/5990
diff --git a/tvix/docs/src/architecture.md b/tvix/docs/src/architecture.md
index 5e0aa95f1a..02ffdfdcd2 100644
--- a/tvix/docs/src/architecture.md
+++ b/tvix/docs/src/architecture.md
@@ -21,6 +21,15 @@ gRPC. The rest of this document outlines the components.
 
 ### Coordinator
 
+```admonish warning
+Currently there's no separate coordinator. Most of the interaction between
+store, builder and evaluator is done by library code living in the `tvix-glue`
+crate (and `tvix-cli` is a user of it).
+
+Keep in mind some of the statements below are outdated and neither reflect
+reality nor desired design anymore.
+```
+
 *Purpose:* The coordinator (in the simplest case, the Tvix CLI tool)
 oversees the flow of a build process and delegates tasks to the right
 subcomponents. For example, if a user runs the equivalent of
diff --git a/tvix/docs/src/castore/blobstore-chunking.md b/tvix/docs/src/castore/blobstore-chunking.md
new file mode 100644
index 0000000000..d8c3d54b52
--- /dev/null
+++ b/tvix/docs/src/castore/blobstore-chunking.md
@@ -0,0 +1,147 @@
+# BlobStore: Chunking & Verified Streaming
+
+`tvix-castore`'s BlobStore is a content-addressed storage system, using [blake3]
+as hash function.
+
+Returned data is fetched by using the digest as lookup key, and can be verified
+to be correct by feeding the received data through the hash function and
+ensuring it matches the digest initially used for the lookup.
+
+This means, data can be downloaded by any untrusted third-party as well, as the
+received data is validated to match the digest it was originally requested with.
+
+However, for larger blobs of data, having to download the entire blob at once is
+wasteful, if we only care about a part of the blob. Think about mounting a
+seekable data structure, like loop-mounting an .iso file, or doing partial reads
+in a large Parquet file, a column-oriented data format.
+
+> We want to have the possibility to *seek* into a larger file.
+
+This however shouldn't compromise on data integrity properties - we should not
+need to trust a peer we're downloading from to be "honest" about the partial
+data we're reading. We should be able to verify smaller reads.
+
+Especially when substituting from an untrusted third-party, we want to be able
+to detect quickly if that third-party is sending us wrong data, and terminate
+the connection early.
+
+## Chunking
+In content-addressed systems, this problem has historically been solved by
+breaking larger blobs into smaller chunks, which can be fetched individually,
+and making a hash of *this listing* the blob digest/identifier.
+
+ - BitTorrent for example breaks files up into smaller chunks, and maintains
+   a list of sha1 digests for each of these chunks. Magnet links contain a
+   digest over this listing as an identifier. (See [bittorrent-v2][here for
+   more details]).
+   With the identifier, a client can fetch the entire list, and then recursively
+   "unpack the graph" of nodes, until it ends up with a list of individual small
+   chunks, which can be fetched individually.
+ - Similarly, IPFS with its IPLD model builds up a Merkle DAG, and uses the
+   digest of the root node as an identitier.
+
+These approaches solve the problem of being able to fetch smaller chunks in a
+trusted fashion. They can also do some deduplication, in case there's the same
+leaf nodes same leaf nodes in multiple places.
+
+However, they also have a big disadvantage. The chunking parameters, and the
+"topology" of the graph structure itself "bleed" into the root hash of the
+entire data structure itself.
+
+Depending on the chunking parameters used, there's different representations for
+the same data, causing less data sharing/reuse in the overall system, in terms of how
+many chunks need to be downloaded vs. are already available locally, as well as
+how compact data is stored on-disk.
+
+This can be workarounded by agreeing on only a single way of chunking, but it's
+not pretty and misses a lot of deduplication potential.
+
+### Chunking in Tvix' Blobstore
+tvix-castore's BlobStore uses a hybrid approach to eliminate some of the
+disadvantages, while still being content-addressed internally, with the
+highlighted benefits.
+
+It uses [blake3] as hash function, and the blake3 digest of **the raw data
+itself** as an identifier (rather than some application-specific Merkle DAG that
+also embeds some chunking information).
+
+BLAKE3 is a tree hash where all left nodes fully populated, contrary to
+conventional serial hash functions. To be able to validate the hash of a node,
+one only needs the hash of the (2) children [^1], if any.
+
+This means one only needs to the root digest to validate a constructions, and these
+constructions can be sent [separately][bao-spec].
+
+This relieves us from the need of having to encode more granular chunking into
+our data model / identifier upfront, but can make this mostly a transport/
+storage concern.
+
+For some more description on the (remote) protocol, check
+[BlobStore Protocol](./blobstore-protocol.md).
+
+#### Logical vs. physical chunking
+
+Due to the properties of the BLAKE3 hash function, we have logical blocks of
+1KiB, but this doesn't necessarily imply we need to restrict ourselves to these
+chunk sizes w.r.t. what "physical chunks" are sent over the wire between peers,
+or are stored on-disk.
+
+The only thing we need to be able to read and verify an arbitrary byte range is
+having the covering range of aligned 1K blocks, and a construction from the root
+digest to the 1K block.
+
+Note the intermediate hash tree can be further trimmed, [omitting][bao-tree]
+lower parts of the tree while still providing verified streaming - at the cost
+of having to fetch larger covering ranges of aligned blocks.
+
+Let's pick an example. We identify each KiB by a number here for illustrational
+purposes.
+
+Assuming we omit the last two layers of the hash tree, we end up with logical
+4KiB leaf chunks (`bao_shift` of `2`).
+
+For a blob of 14 KiB total size, we could fetch logical blocks `[0..=3]`,
+`[4..=7]`, `[8..=11]` and `[12..=13]` in an authenticated fashion:
+
+`[ 0 1 2 3 ] [ 4 5 6 7 ] [ 8 9 10 11 ] [ 12 13 ]`
+
+Assuming the server now informs us about the following physical chunking:
+
+```
+[ 0 1 ] [ 2 3 4 5 ] [ 6 ] [ 7 8 ] [ 9 10 11 12 13 14 15 ]`
+```
+
+If our application now wants to arbitrarily read from 0 until 4 (inclusive):
+
+```
+[ 0 1 ] [ 2 3 4 5 ] [ 6 ] [ 7 8 ] [ 9 10 11 12 13 14 15 ]
+ |-------------|
+
+```
+
+…we need to fetch physical chunks `[ 0 1 ]`, `[ 2 3 4 5 ]` and `[ 6 ] [ 7 8 ]`.
+
+
+`[ 0 1 ]` and `[ 2 3 4 5 ]` are obvious, they contain the data we're
+interested in.
+
+We however also need to fetch the physical chunks `[ 6 ]` and `[ 7 8 ]`, so we
+can assemble `[ 4 5 6 7 ]` to verify both logical chunks:
+
+```
+[ 0 1 ] [ 2 3 4 5 ] [ 6 ] [ 7 8 ] [ 9 10 11 12 13 14 15 ]
+^       ^           ^     ^
+|----4KiB----|------4KiB-----|
+```
+
+Each physical chunk fetched can be validated to have the blake3 digest that was
+communicated upfront, and can be stored in a client-side cache/storage, so
+subsequent / other requests for the same data will be fast(er).
+
+---
+
+[^1]: and the surrounding context, aka position inside the whole blob, which is available while verifying the tree
+[bittorrent-v2]: https://blog.libtorrent.org/2020/09/bittorrent-v2/
+[blake3]: https://github.com/BLAKE3-team/BLAKE3
+[bao-spec]: https://github.com/oconnor663/bao/blob/master/docs/spec.md
+[bao-tree]: https://github.com/n0-computer/bao-tree
diff --git a/tvix/docs/src/castore/blobstore-protocol.md b/tvix/docs/src/castore/blobstore-protocol.md
new file mode 100644
index 0000000000..0dff787ccb
--- /dev/null
+++ b/tvix/docs/src/castore/blobstore-protocol.md
@@ -0,0 +1,104 @@
+# BlobStore: Protocol / Composition
+
+This documents describes the protocol that BlobStore uses to substitute blobs
+other ("remote") BlobStores.
+
+How to come up with the blake3 digest of the blob to fetch is left to another
+layer in the stack.
+
+To put this into the context of Tvix as a Nix alternative, a blob represents an
+individual file inside a StorePath.
+In the Tvix Data Model, this is accomplished by having a `FileNode` (either the
+`root_node` in a `PathInfo` message, or a individual file inside a `Directory`
+message) encode a BLAKE3 digest.
+
+However, the whole infrastructure can be applied for other usecases requiring
+exchange/storage or access into data of which the blake3 digest is known.
+
+## Protocol and Interfaces
+As an RPC protocol, BlobStore currently uses gRPC.
+
+On the Rust side of things, every blob service implements the
+[`BlobService`](../src/blobservice/mod.rs) async trait, which isn't
+gRPC-specific.
+
+This `BlobService` trait provides functionality to check for existence of Blobs,
+read from blobs, and write new blobs.
+It also provides a method to ask for more granular chunks if they are available.
+
+In addition to some in-memory, on-disk and (soon) object-storage-based
+implementations, we also have a `BlobService` implementation that talks to a
+gRPC server, as well as a gRPC server wrapper component, which provides a gRPC
+service for anything implementing the `BlobService` trait.
+
+This makes it very easy to talk to a remote `BlobService`, which does not even
+need to be written in the same language, as long it speaks the same gRPC
+protocol.
+
+It also puts very little requirements on someone implementing a new
+`BlobService`, and how its internal storage or chunking algorithm looks like.
+
+The gRPC protocol is documented in `../protos/rpc_blobstore.proto`.
+Contrary to the `BlobService` trait, it does not have any options for seeking/
+ranging, as it's more desirable to provide this through chunking (see also
+[BlobStore Chunking](./blobstore-chunking.md).
+
+## Composition
+Different `BlobStore` are supposed to be "composed"/"layered" to express
+caching, multiple local and remote sources.
+
+The fronting interface can be the same, it'd just be multiple "tiers" that can
+respond to requests, depending on where the data resides. [^1]
+
+This makes it very simple for consumers, as they don't need to be aware of the
+entire substitutor config.
+
+The flexibility of this doesn't need to be exposed to the user in the default
+case; in most cases we should be fine with some form of on-disk storage and a
+bunch of substituters with different priorities.
+
+### gRPC Clients
+Clients are encouraged to always read blobs in a chunked fashion (asking for a
+list of chunks for a blob via `BlobService.Stat()`, then fetching chunks via
+`BlobService.Read()` as needed), instead of directly reading the entire blob via
+`BlobService.Read()`.
+
+In a composition setting, this provides opportunity for caching, and avoids
+downloading some chunks if they're already present locally (for example, because
+they were already downloaded by reading from a similar blob earlier).
+
+It also removes the need for seeking to be a part of the gRPC protocol
+alltogether, as chunks are supposed to be "reasonably small" [^2].
+
+There's some further optimization potential, a `BlobService.Stat()` request
+could tell the server it's happy with very small blobs just being inlined in
+an additional additional field in the response, which would allow clients to
+populate their local chunk store in a single roundtrip.
+
+## Verified Streaming
+As already described in [BlobStore Chunking](./blobstore-chunking.md), the physical chunk
+information sent in a `BlobService.Stat()` response is still sufficient to fetch
+in an authenticated fashion.
+
+The exact protocol and formats are still a bit in flux, but here's some notes:
+
+ - `BlobService.Stat()` request gets a `send_bao` field (bool), signalling a
+   [BAO][bao-spec] should be sent. Could also be `bao_shift` integer, signalling
+   how detailed (down to the leaf chunks) it should go.
+   The exact format (and request fields) still need to be defined, edef has some
+   ideas around omitting some intermediate hash nodes over the wire and
+   recomputing them, reducing size by another ~50% over [bao-tree].
+ - `BlobService.Stat()` response gets some bao-related fields (`bao_shift`
+   field, signalling the actual format/shift level the server replies with, the
+   actual bao, and maybe some format specifier).
+   It would be nice to also be compatible with the baos used by [iroh], so we
+   can provide an implementation using it too.
+
+---
+
+[^1]: We might want to have some backchannel, so it becomes possible to provide
+      feedback to the user that something is downloaded.
+[^2]: Something between 512K-4M, TBD.
+[bao-spec]: https://github.com/oconnor663/bao/blob/master/docs/spec.md
+[bao-tree]: https://github.com/n0-computer/bao-tree
+[iroh]: https://github.com/n0-computer/iroh
diff --git a/tvix/docs/src/castore/data-model.md b/tvix/docs/src/castore/data-model.md
new file mode 100644
index 0000000000..7f7e396a22
--- /dev/null
+++ b/tvix/docs/src/castore/data-model.md
@@ -0,0 +1,50 @@
+# Data model
+
+This provides some more notes on the fields used in castore.proto.
+
+See [Store API](../store/api.md) for the full context.
+
+## Directory message
+`Directory` messages use the blake3 hash of their canonical protobuf
+serialization as its identifier.
+
+A `Directory` message contains three lists, `directories`, `files` and
+`symlinks`, holding `DirectoryNode`, `FileNode` and `SymlinkNode` messages
+respectively. They describe all the direct child elements that are contained in
+a directory.
+
+All three message types have a `name` field, specifying the (base)name of the
+element (which MUST not contain slashes or null bytes, and MUST not be '.' or '..').
+For reproducibility reasons, the lists MUST be sorted by that name and the
+name MUST be unique across all three lists.
+
+In addition to the `name` field, the various *Node messages have the following
+fields:
+
+## DirectoryNode
+A `DirectoryNode` message represents a child directory.
+
+It has a `digest` field, which points to the identifier of another `Directory`
+message, making a `Directory` a merkle tree (or strictly speaking, a graph, as
+two elements pointing to a child directory with the same contents would point
+to the same `Directory` message).
+
+There's also a `size` field, containing the (total) number of all child
+elements in the referenced `Directory`, which helps for inode calculation.
+
+## FileNode
+A `FileNode` message represents a child (regular) file.
+
+Its `digest` field contains the blake3 hash of the file contents. It can be
+looked up in the `BlobService`.
+
+The `size` field contains the size of the blob the `digest` field refers to.
+
+The `executable` field specifies whether the file should be marked as
+executable or not.
+
+## SymlinkNode
+A `SymlinkNode` message represents a child symlink.
+
+In addition to the `name` field, the only additional field is the `target`,
+which is a string containing the target of the symlink.
diff --git a/tvix/docs/src/castore/why-not-git-trees.md b/tvix/docs/src/castore/why-not-git-trees.md
new file mode 100644
index 0000000000..4a12b4ef55
--- /dev/null
+++ b/tvix/docs/src/castore/why-not-git-trees.md
@@ -0,0 +1,57 @@
+## Why not git tree objects?
+
+We've been experimenting with (some variations of) the git tree and object
+format, and ultimately decided against using it as an internal format, and
+instead adapted the one documented in the other documents here.
+
+While the tvix-store API protocol shares some similarities with the format used
+in git for trees and objects, the git one has shown some significant
+disadvantages:
+
+### The binary encoding itself
+
+#### trees
+The git tree object format is a very binary, error-prone and
+"made-to-be-read-and-written-from-C" format.
+
+Tree objects are a combination of null-terminated strings, and fields of known
+length. References to other tree objects use the literal sha1 hash of another
+tree object in this encoding.
+Extensions of the format/changes are very hard to do right, because parsers are
+not aware they might be parsing something different.
+
+The tvix-store protocol uses a canonical protobuf serialization, and uses
+the [blake3][blake3] hash of that serialization to point to other `Directory`
+messages.
+It's both compact and with a wide range of libraries for encoders and decoders
+in many programming languages.
+The choice of protobuf makes it easy to add new fields, and make old clients
+aware of some unknown fields being detected [^adding-fields].
+
+#### blob
+On disk, git blob objects start with a "blob" prefix, then the size of the
+payload, and then the data itself. The hash of a blob is the literal sha1sum
+over all of this - which makes it something very git specific to request for.
+
+tvix-store simply uses the [blake3][blake3] hash of the literal contents
+when referring to a file/blob, which makes it very easy to ask other data
+sources for the same data, as no git-specific payload is included in the hash.
+This also plays very well together with things like [iroh][iroh-discussion],
+which plans to provide a way to substitute (large)blobs by their blake3 hash
+over the IPFS network.
+
+In addition to that, [blake3][blake3] makes it possible to do
+[verified streaming][bao], as already described in other parts of the
+documentation.
+
+The git tree object format uses sha1 both for references to other trees and
+hashes of blobs, which isn't really a hash function to fundamentally base
+everything on in 2023.
+The [migration to sha256][git-sha256] also has been dead for some years now,
+and it's unclear what a "blake3" version of this would even look like.
+
+[bao]: https://github.com/oconnor663/bao
+[blake3]: https://github.com/BLAKE3-team/BLAKE3
+[git-sha256]: https://git-scm.com/docs/hash-function-transition/
+[iroh-discussion]: https://github.com/n0-computer/iroh/discussions/707#discussioncomment-5070197
+[^adding-fields]: Obviously, adding new fields will change hashes, but it's something that's easy to detect.
\ No newline at end of file
diff --git a/tvix/docs/src/contributing/code-&-commits.md b/tvix/docs/src/contributing/code-&-commits.md
new file mode 100644
index 0000000000..628c124bf1
--- /dev/null
+++ b/tvix/docs/src/contributing/code-&-commits.md
@@ -0,0 +1,76 @@
+# Code & Commits
+
+## Code quality
+
+This one should go without saying — but please ensure that your code quality
+does not fall below the rest of the project. This is of course very subjective,
+but as an example if you place code that throws away errors into a block in
+which errors are handled properly your change will be rejected.
+
+
+```admonish hint
+Usually there is a strong correlation between the visual appearance of a code
+block and its quality. This is a simple way to sanity-check your work while
+squinting and keeping some distance from your screen ;-)
+```
+
+
+## Commit messages
+
+The [Angular Conventional Commits][angular] style is the general commit style
+used in the Tvix project. Commit messages should be structured like this:
+
+```admonish example
+    type(scope): Subject line with at most a 72 character length
+
+    Body of the commit message with an empty line between subject and
+    body. This text should explain what the change does and why it has
+    been made, *especially* if it introduces a new feature.
+
+    Relevant issues should be mentioned if they exist.
+```
+
+Where `type` can be one of:
+
+* `feat`: A new feature has been introduced
+* `fix`: An issue of some kind has been fixed
+* `docs`: Documentation or comments have been updated
+* `style`: Formatting changes only
+* `refactor`: Hopefully self-explanatory!
+* `test`: Added missing tests / fixed tests
+* `chore`: Maintenance work
+* `subtree`: Operations involving `git subtree`
+
+And `scope` should refer to some kind of logical grouping inside of the
+project.
+
+It does not make sense to include the full path unless it aids in
+disambiguating. For example, when changing the struct fields in
+`tvix/glue/src/builtins/fetchers.rs` it is enough to write
+`refactor(tvix/glue): …`.
+
+Please take a look at the existing commit log for examples.
+
+
+## Commit content
+
+Multiple changes should be divided into multiple git commits whenever possible.
+Common sense applies.
+
+The fix for a single-line whitespace issue is fine to include in a different
+commit. Introducing a new feature and refactoring (unrelated) code in the same
+commit is not fine.
+
+`git commit -a` is generally **taboo**, whereas on the command line you should
+be preferring `git commit -p`.
+
+
+```admonish tip
+Tooling can really help this process. The [lazygit][] TUI or [magit][] for
+Emacs are worth looking into.
+```
+
+
+[angular]: https://www.conventionalcommits.org/en/
+[lazygit]: https://github.com/jesseduffield/lazygit
+[magit]: https://magit.vc
diff --git a/tvix/docs/src/contributing/email.md b/tvix/docs/src/contributing/email.md
new file mode 100644
index 0000000000..238ff388f5
--- /dev/null
+++ b/tvix/docs/src/contributing/email.md
@@ -0,0 +1,33 @@
+# Submitting changes via email
+
+With SSO & local accounts, hopefully Tvix provides you a low-friction or
+privacy-respecting way to make contributions by means of
+[TVL’s self-hosted Gerrit][gerrit]. However, if you still decide differently,
+you may submit a patch via email to `depot@tvl.su` where it will be added to
+Gerrit by a contributor.
+
+Please keep in mind this process is more complicated requiring extra work from
+both us & you:
+
+* You will need to manually check the Gerrit website for updates & someone will
+  need to relay potential comments to/from Gerrit to you as you won’t get
+  emails from Gerrit.
+* New revisions need to be stewarded by someone uploading changes to Gerrit
+  on your behalf.
+* As CLs cannot change owners, if you decide to get a Gerrit account later on
+  existing CLs need to be abandoned then recreated. This introduces more churn
+  to the review process since prior discussion are disconnected.
+
+Create an appropriate commit locally then send it us using either of these
+options:
+
+* `git format-patch`: This will create a `*.patch` file which you should email to
+  us.
+* `git send-email`: If configured on your system, this will take care of the
+  whole emailing process for you.
+
+The email address is a [public inbox][].
+
+
+[gerrit]: ../contributing/gerrit.html
+[public inbox]: https://inbox.tvl.su/depot/
diff --git a/tvix/docs/src/contributing/gerrit.md b/tvix/docs/src/contributing/gerrit.md
new file mode 100644
index 0000000000..71877d9d4a
--- /dev/null
+++ b/tvix/docs/src/contributing/gerrit.md
@@ -0,0 +1,112 @@
+# Contributing to Tvix
+
+## Registration
+
+Self-hosted [Gerrit](https://www.gerritcodereview.com) & changelists (CLs) are
+the preferred method of contributions & review.
+
+TVL’s Gerrit supports single sign-on (SSO) using a GitHub, GitLab, or
+StackOverflow account.
+
+Additionally if you would prefer not to use an SSO option or wish to have a
+backup authentication strategy in the event of downed server or otherwise, we
+recommend setting up a TVL-specific LDAP account. Do note that our IdP
+(Keycloak) sometimes has [problems doing SSO with GitHub][github-problematic],
+so you might get an “unexpected error” while trying to sign in with GitHub;
+that error is not your fault. You can create that account by following these
+instructions:
+
+1. Checkout [TVL’s monorepo][check-out-monorepo] if you haven’t already
+2. Be a member of `#tvl` on [hackint][], a communication network.
+3. Generate a user entry using [//web/pwcrypt](https://signup.tvl.fyi/).
+4. Commit that generated user entry to our LDAP server configuration in
+   [ops/users][ops-users] (for an example, see:
+   [CL/2671](https://cl.tvl.fyi/c/depot/+/2671))
+5. If only using LDAP, submit the patch via email (see [<cite>Submitting
+   changes via email</cite>][email])
+
+
+## Gerrit setup
+
+Gerrit uses the concept of change IDs to track commits across rebases and other
+operations that might change their hashes, and link them to unique changes in
+Gerrit.
+
+First, [upload your public SSH keys to Gerrit][Gerrit SSH]. Then change your
+remote to point to your newly-registered user over SSH. Then follow up with Git
+config by setting the default push URLs for & installing commit hooks for a
+smoother Gerrit experience.
+
+```console
+$ cd depot
+$ git remote set-url origin "ssh://$USER@code.tvl.fyi:29418/depot"
+$ git config remote.origin.url "ssh://$USER@code.tvl.fyi:29418/depot"
+$ git config remote.origin.push "HEAD:refs/for/canon"
+$ curl -L --compressed https://cl.tvl.fyi/tools/hooks/commit-msg | tee .git/hooks/commit-msg
+…
+if ! mv "${dest}" "$1" ; then
+  echo "cannot mv ${dest} to $1"
+  exit 1
+fi
+$ chmod +x .git/hooks/commit-msg
+```
+
+## Gerrit workflow
+
+The workflow on Gerrit is quite different than the pull request (PR) model that
+many developers are more likely to be accustomed to. Instead of pushing changes
+to remote branches, all changes have to be pushed to `refs/for/canon`. For each
+commit that is pushed there, a change request is created automatically
+
+Every time you create a new commit the change hook will insert a unique
+`Change-Id` tag into the commit message. Once you are satisfied with the state
+of your commit and want to submit it for review, you push it to a Git `ref`
+called `refs/for/canon`. This designates the commits as changelists (CLs)
+targeted for the `canon` branch.
+
+After you feel satisfied with your changes changes, push to the default:
+
+```console
+$ git commit -m 'docs(REVIEWS): Fixed all the errors in the reviews docs'
+$ git push origin
+```
+
+Or to a special target, such as a work-in-progress CL:
+
+```console
+$ git push origin HEAD:refs/for/canon%wip
+```
+
+During the review process, the reviewer(s) might ask you to make changes. You
+can simply amend[^amend] your commit(s) then push to the same ref (`--force*`
+flags not needed). Gerrit will automatically update your changes.
+
+```admonish caution
+Every individual commit will become a separate change. We do *not* squash
+related commits, but instead submit them one by one. Be aware that if you are
+expecting a different behavior such as attempt something like an unsquashed
+subtree merge, you will produce a *lot* of CLs. This is strongly discouraged.
+```
+
+```admonish tip
+If do not have experience with the Gerrit model, consider reading the
+[<cite>Working with Gerrit: An example</cite>][Gerrit Walkthrough] or
+[<cite>Basic Gerrit Walkthrough — For GitHub Users</cite>][github-diff].
+
+It will also be important to read about [attention sets][] to understand how
+your ‘turn’ works, how notifications will be distributed to users through the
+system, as well as the other [attention set rules][attention-set-rules].
+```
+
+
+[check-out-monorepo]: ./getting-started#tvl-monorepo
+[email]: ../contributing/email.html
+[Gerrit SSH]: https://cl.tvl.fyi/settings/#SSHKeys
+[Gerrit walkthrough]: https://gerrit-review.googlesource.com/Documentation/intro-gerrit-walkthrough.html
+[ops-users]: https://code.tvl.fyi/tree/ops/users/default.nix
+[hackint]: https://hackint.org
+[github-diff]: https://gerrit.wikimedia.org/r/Documentation/intro-gerrit-walkthrough-github.html
+[github-problematic]: https://b.tvl.fyi/issues/201
+[attention sets]: https://gerrit-review.googlesource.com/Documentation/user-attention-set.html
+[attention-set-rules]: https://gerrit-review.googlesource.com/Documentation/user-attention-set.html#_rules
+[^keycloak]: [^amend]: `git commit --amend`
diff --git a/tvix/docs/src/eval/abandoned/index.md b/tvix/docs/src/eval/abandoned/index.md
new file mode 100644
index 0000000000..1cef704d08
--- /dev/null
+++ b/tvix/docs/src/eval/abandoned/index.md
@@ -0,0 +1,3 @@
+# Abandoned ideas
+
+This chapter keeps track of abandoned ideas, and why they were abandoned.
diff --git a/tvix/docs/src/eval/abandoned/thread-local-vm.md b/tvix/docs/src/eval/abandoned/thread-local-vm.md
new file mode 100644
index 0000000000..c6a2d5e07e
--- /dev/null
+++ b/tvix/docs/src/eval/abandoned/thread-local-vm.md
@@ -0,0 +1,233 @@
+# We can't have nice things because IFD
+
+The thread-local VM work below was ultimately not merged because it
+was decided that it would be harmful for `tvix::eval::Value` to
+implement `Eq`, `Hash`, or any of the other `std` traits.
+
+Implementing `std` traits on `Value` was deemed harmful because IFD
+can cause arbitrary amounts of compilation to occur, including
+network transactions with builders.  Obviously it would be
+unexpected and error-prone to have a `PartialEq::eq()` which does
+something like this.  This problem does not manifest within the
+"nixpkgs compatibility only" scope, or in any undeprecated language
+feature other than IFD.  Although IFD is outside the "nixpkgs
+compatibility scope", it [has been added to the TVL compatibility
+scope](https://cl.tvl.fyi/c/depot/+/7193/comment/3418997b_0dbd0b65/).
+
+This was the sole reason for not merging.
+
+The explanation below may be useful in case future circumstances
+affect the relevance of the reasoning above.
+
+The implementation can be found in these CLs:
+
+- [refactor(tvix/eval): remove lifetime parameter from VM<'o>](https://cl.tvl.fyi/c/depot/+/7194)
+- [feat(tvix/eval): [FOUNDLING] thread-local VM](https://cl.tvl.fyi/c/depot/+/7195)
+- [feat(tvix/eval): [FOUNDLING] VM::vm_xxx convenience methods](https://cl.tvl.fyi/c/depot/+/7196)
+- [refactor(tvix/eval): [FOUNDLING]: drop explicit `&mut vm` parameter](https://cl.tvl.fyi/c/depot/+/7197)
+
+# Thread-local storage for tvix::eval::vm::VM
+
+## The problem
+
+`Value::force()` takes a `&mut VM` argument, since forcing a value
+requires executing opcodes.  This means that `Value::nix_eq()` too
+must take a `&mut VM`, since any sensible definition of equality
+will have to force thunks.
+
+Unfortunately Rust's `PartialEq::eq()` function does not accept any
+additional arguments like this, so `Value` cannot implement
+`PartialEq`.  Worse, structs which *contain* `Value`s can't
+implement `PartialEq` either.  This means `Value`, and anything
+containing it, cannot be the key for a `BTreeMap` or `HashMap`.  We
+can't even insert `Value`s into a `HashSet`!
+
+There are other situations like this that don't involve `PartialEq`,
+but it's the most glaring one.  The main problem is that you need a
+`VM` in order to force thunks, and thunks can be anywhere in a
+`Value`.
+
+## Solving the problem with thread-locals
+
+We could avoid threading the `&mut VM` through the entire codebase
+by making it a thread-local.
+
+To do this without a performance hit, we need to use LLVM
+thread-locals, which are the same cost as references to `static`s
+but load relative to
+[`llvm.threadlocal.address`][threadlocal-intrinsic] instead of
+relative to the data segment.  Unfortunately `#[thread_local]` [is
+unstable][thread-local-unstable] and [unsafe in
+general][thread-local-unsafe] for most of the cases where we would
+want to use it.  There is one [exception][tls-const-init], however:
+if a `!thread_local()` has a `const` initializer, the compiler will
+insert a `#[thread_local]`; this special case is both safe and
+stable.
+
+The difficult decision is what the type of the thread-local should
+be.  Since you can't get a mutable reference to a `thread_local!()`
+it will have to be some interior-mutability-bestowing wrapper around
+our current `struct VM`.  Here are the choices:
+
+### `RefCell<VM>`
+
+This is the obvious first choice, since it lets you borrow a
+`RefMut<Target=VM>`.  The problem here is that we want to keep the
+codebase written such that all the functions in `impl VM` still take
+a `&mut self`.  This means that there will be an active mutable
+borrow for the duration of `VM::call_builtin()`.  So if we implement
+`PartialEq` by having `eq()` attempt a second mutable borrow from
+the thread-local storage, it will fail since there is already an
+active borrow.
+
+The problem here is that you can't "unborrow" a `RefMut` except by
+dropping it.  There's no way around this.
+
+#### Problem: Uglification
+
+The only solution here is to rewrite all the functions in `impl VM`
+so they don't take any kind of `self` argument, and then have them
+do a short-lived `.borrow_mut()` from the thread-local `RefCell`
+*separately, each time* they want to modify one of the fields of
+`VM` (currently `frames`, `stack`, `with_stack`, `warnings`).  This
+means that if you had a code sequence like this:
+
+```
+impl VM {
+  fn foo(&mut self, ...) {
+    ...
+    self.frame().ip += 1;
+    self.some_other_method();
+    self.frame().ip += 1;
+```
+
+You would need to add *two separate `borrow_mut()`s*, one for each
+of the `self.frame().ip+=1` statements.  You can't just do one big
+`borrow_mut()` because `some_other_method()` will call
+`borrow_mut()` and panic.
+
+#### Problem: Performance
+
+The `RefCell<VM>` approach also has a fairly huge performance hit,
+because every single modification to any part of `VM` will require a
+reference count increment/decrement, and a conditional branch based
+on the check (which will never fail) that the `RefCell` isn't
+already mutably borrowed.  It will also impede a lot of rustc's
+optimizations.
+
+### `Cell<VM>`
+
+This is a non-starter because it means that in order to mutate any
+field of `VM`, you have to move the entire `struct VM` out of the
+`Cell`, mutate it, and move it back in.
+
+### `Cell<Box<VM>>`
+
+Now we're getting warmer.  Here, we can move the `Box<VM>` out of
+the cell with a single pointer-sized memory access.
+
+We don't want to do the "uglification" described in the previous
+section.  We are very fortunate that, sometime in mid-2019, the Rust
+dieties [decreed by fiat][fiat-decree] that `&Cell<T>` and `&mut T`
+are bit-for-bit identical, and even gave us mortals safe wrappers
+[`from_mut()`][from_mut] and [`get_mut()`][get_mut] around
+`mem::transmute()`.
+
+So now, when a `VM` method (which takes `&mut self`) calls out to
+some external code (like a builtin), instead of passing the `&mut
+self` to the external code it can call `Cell::from_mut(&mut self)`,
+and then `Cell::swap()` that into the thread-local storage cell for
+the duration of the external code.  After the external code returns,
+it can `Cell::swap()` it back.  This whole dance gets wrapped in a
+lexical block, and the borrow checker sees that the `&Cell<Box<VM>>`
+returned by `Cell::from_mut()` lives only until the end of the
+lexical block, *so we get the `&mut self` back after the close-brace
+for that block*.  NLL FTW.  This sounds like a lot of work, but it
+should compile down to two pointer-sized loads and two pointer-sized
+stores, and it is incurred basically only for `OpBuiltin`.
+
+This all works, with only two issues:
+
+1. `vm.rs` needs to be very careful to do the thread-local cell swap
+   dance before calling anything that might call `PartialEq::eq()`
+   (or any other method that expects to be able to pull the `VM` out
+   of thread-local storage).  There is no compile-time check that we
+   did the dance in all the right places.  If we forget to do the
+   dance somewhere we'll get a runtime panic from `Option::expect()`
+   (see next section).
+
+2. Since we need to call `Cell::from_mut()` on a `Box<VM>` rather
+   than a bare `VM`, we still need to rewrite all of `vm.rs` so that
+   every function takes a `&mut Box<VM>` instead of a `&mut self`.
+   This creates a huge amount of "noise" in the code.
+
+Fortunately, it turns out that nearly all the "noise" that arises
+from the second point can be eliminated by taking advantage of
+[deref coercions][deref-coercions]!  This was the last "shoe to
+drop".
+
+There is still the issue of having to be careful about calls from
+`vm.rs` to things outside that file, but it's manageable.
+
+### `Cell<Option<Box<VM>>>`
+
+In order to get the "safe and stable `#[thread_local]`"
+[exception][tls-const-init] we need a `const` initializer, which
+means we need to be able to put something into the `Cell` that isn't
+a `VM`.  So the type needs to be `Cell<Option<Box<VM>>>`.
+
+Recall that you can't turn an `Option<&T>` into an `&Option<T>`.
+The latter type has the "is this a `Some` or `None`" bit immediately
+adjacent to the bits representing `T`.  So if I hand you a `t:&T`
+and you wrap it as `Some(t)`, those bits aren't adjacent in memory.
+This means that all the VM methods need to operate on an
+`Option<Box<VM>>` -- we can't just wrap a `Some()` around `&mut
+self` "at the last minute" before inserting it into the thread-local
+storage cell.  Fortunately deref coercions save the day here too --
+the coercion is inferred through both layers (`Box` and `Option`) of
+wrapper, so there is no additional noise in the code.
+
+Note that Rust is clever and can find some sequence of bits that
+aren't a valid `T`, so `sizeof(Option<T>)==sizeof(T)`.  And in fact,
+`Box<T>` is one of these cases (and this is guaranteed).  So the
+`Option` has no overhead.
+
+# Closing thoughts, language-level support
+
+This would have been easier with language-level support.
+
+## What wouldn't help
+
+Although it [it was decreed][fiat-decree] that `Cell<T>` and `&mut
+T` are interchangeable, a `LocalKey<Cell<T>>` isn't quite the same
+thing as a `Cell<T>`, so it wouldn't be safe for the standard
+library to contain something like this:
+
+```
+impl<T> LocalKey<Cell<T>> {
+  fn get_mut(&self) -> &mut T {
+    unsafe {
+      // ... mem::transmute() voodoo goes here ...
+```
+
+The problem here is that you can call `LocalKey<Cell<T>>::get_mut()` twice and
+end up with two `&mut T`s that point to the same thing (mutable aliasing) which
+results in undefined behavior.
+
+## What would help
+
+The ideal solution is for Rust to let you call arbitrary methods
+`T::foo(&mut self...)` on a `LocalKey<Cell<T>>`.  This way you can
+have one (and only one) `&mut T` at any syntactical point in your
+program -- the `&mut self`.
+
+
+[tls-const-init]: https://github.com/rust-lang/rust/pull/90774
+[thread-local-unstable]: https://github.com/rust-lang/rust/issues/29594
+[thread-local-unsafe-generally]: https://github.com/rust-lang/rust/issues/54366
+[fiat-decree]: https://github.com/rust-lang/rust/issues/43038
+[from_mut]: https://doc.rust-lang.org/stable/std/cell/struct.Cell.html#method.from_mut
+[get_mut]: https://doc.rust-lang.org/stable/std/cell/struct.Cell.html#method.get_mut
+[thread-local-unsafe]: [https://github.com/rust-lang/rust/issues/54366]
+[deref-coercions]: https://doc.rust-lang.org/book/ch15-02-deref.html#implicit-deref-coercions-with-functions-and-methods
+[threadlocal-intrinsic]: https://llvm.org/docs/LangRef.html#llvm-threadlocal-address-intrinsic
diff --git a/tvix/docs/src/eval/bindings.md b/tvix/docs/src/eval/bindings.md
new file mode 100644
index 0000000000..4fb35b6235
--- /dev/null
+++ b/tvix/docs/src/eval/bindings.md
@@ -0,0 +1,134 @@
+# Compilation of bindings
+
+Compilation of Nix bindings is one of the most mind-bending parts of Nix
+evaluation. The implementation of just the compilation is currently almost 1000
+lines of code, excluding the various insane test cases we dreamt up for it.
+
+## What is a binding?
+
+In short, any attribute set or `let`-expression. Tvix currently does not treat
+formals in function parameters (e.g. `{ name ? "fred" }: ...`) the same as these
+bindings.
+
+They have two very difficult features:
+
+1. Keys can mutually refer to each other in `rec` sets or `let`-bindings,
+   including out of definition order.
+2. Attribute sets can be nested, and parts of one attribute set can be defined
+   in multiple separate bindings.
+
+Tvix resolves as much of this logic statically (i.e. at compile-time) as
+possible, but the procedure is quite complicated.
+
+## High-level concept
+
+The idea behind the way we compile bindings is to fully resolve nesting
+statically, and use the usual mechanisms (i.e. recursion/thunking/value
+capturing) for resolving dynamic values.
+
+This is done by compiling bindings in several phases:
+
+1. An initial compilation phase *only* for plain inherit statements (i.e.
+   `inherit name;`), *not* for namespaced inherits (i.e. `inherit (from)
+   name;`).
+
+2. A declaration-only phase, in which we use the compiler's scope tracking logic
+   to calculate the physical runtime stack indices (further referred to as
+   "stack slots" or just "slots") that all values will end up in.
+
+   In this phase, whenever we encounter a nested attribute set, it is merged
+   into a custom data structure that acts like a synthetic AST node.
+
+   This can be imagined similar to a rewrite like this:
+
+   ```nix
+   # initial code:
+   {
+       a.b = 1;
+       a.c = 2;
+   }
+
+   # rewritten form:
+   {
+       a = {
+           b = 1;
+           c = 2;
+       };
+   }
+   ```
+
+   The rewrite applies to attribute sets and `let`-bindings alike.
+
+   At the end of this phase, we know the stack slots of all namespaces for
+   inheriting from, all values inherited from them, and all values (and
+   optionally keys) of bindings at the current level.
+
+   Only statically known keys are actually merged, so any dynamic keys that
+   conflict will lead to a "key already defined" error at runtime.
+
+3. A compilation phase, in which all values (and, when necessary, keys) are
+   actually compiled. In this phase the custom data structure used for merging
+   is encountered when compiling values.
+
+   As this data structure acts like an AST node, the process begins recursively
+   for each nested attribute set.
+
+At the end of this process we have bytecode that leaves the required values (and
+optionally keys) on the stack. In the case of attribute sets, a final operation
+is emitted that constructs the actual attribute set structure at runtime. For
+`let`-bindings a final operation is emitted that removes these locals from the
+stack when the scope ends.
+
+## Moving parts
+
+```admonish caution
+This documents the *current* implementation. If you only care about the
+conceptual aspects, see above.
+```
+
+There's a few types involved:
+
+* `PeekableAttrs`: peekable iterator over an attribute path (e.g. `a.b.c`)
+* `BindingsKind`: enum defining the kind of bindings (attrs/recattrs/let)
+* `AttributeSet`: struct holding the bindings kind, the AST nodes with inherits
+  (both namespaced and not), and an internal representation of bindings
+  (essentially a vector of tuples of the peekable attrs and the expression to
+  compile for the value).
+* `Binding`: enum describing the kind of binding (namespaced inherit, attribute
+  set, plain binding of *any other value type*)
+* `KeySlot`: enum describing the location in which a key slot is placed at
+  runtime (nowhere, statically known value in a slot, dynamic value in a slot)
+* `TrackedBinding`: struct representing statically known information about a
+  single binding (its key slot, value slot and `Binding`)
+* `TrackedBindings`: vector of tracked bindings, which implements logic for
+  merging attribute sets together
+
+And quite a few methods on `Compiler`:
+
+* `compile_bindings`: entry point for compiling anything that looks like a
+  binding, this calls out to the functions below.
+* `compile_plain_inherits`: takes all inherits of a bindings node and compiles
+  the ones that are trivial to compile (i.e. just plain inherits without a
+  namespace). The `rnix` parser does not represent namespaced/plain inherits in
+  different nodes, so this function also aggregates the namespaced inherits and
+  returns them for further use
+* `declare_namespaced_inherits`: passes over all namespaced inherits and
+  declares them on the locals stack, as well as inserts them into the provided
+  `TrackedBindings`
+* `declare_bindings`: declares all regular key/value bindings in a bindings
+  scope, but without actually compiling their keys or values.
+
+  There's a lot of heavy lifting going on here:
+
+  1. It invokes the various pieces of logic responsible for merging nested
+     attribute sets together, creating intermediate data structures in the value
+     slots of bindings that can be recursively processed the same way.
+  2. It decides on the key slots of expressions based on the kind of bindings,
+     and the type of expression providing the key.
+* `bind_values`: runs the actual compilation of values. Notably this function is
+  responsible for recursively compiling merged attribute sets when it encounters
+  a `Binding::Set` (on which it invokes `compile_bindings` itself).
+
+In addition to these several methods (such as `compile_attr_set`,
+`compile_let_in`, ...) invoke the binding-kind specific logic and then call out
+to the functions above.
diff --git a/tvix/docs/src/eval/build-references.md b/tvix/docs/src/eval/build-references.md
new file mode 100644
index 0000000000..dd53f65d83
--- /dev/null
+++ b/tvix/docs/src/eval/build-references.md
@@ -0,0 +1,259 @@
+# Build references in derivations
+
+This document describes how build references are calculated in Tvix. Build
+references are used to determine which store paths should be available to a
+builder during the execution of a build (i.e. the full build closure of a
+derivation).
+
+## String contexts in C++ Nix
+
+In C++ Nix, each string value in the evaluator carries an optional so-called
+"string context".
+
+These contexts are themselves a list of strings that take one of the following
+formats:
+
+1. `!<output_name>!<drv_path>`
+
+   This format describes a build reference to a specific output of a derivation.
+
+2. `=<drv_path>`
+
+   This format is used for a special case where a derivation attribute directly
+   refers to a derivation path (e.g. by accessing `.drvPath` on a derivation).
+
+   ```admonish note
+   In C++ Nix this case is quite special and actually requires a store-database
+   query during evaluation.
+   ```
+
+3. `<path>` - a non-descript store path input, usually a plain source file (e.g.
+   from something like `src = ./.` or `src = ./foo.txt`).
+
+   In the case of `unsafeDiscardOutputDependency` this is used to pass a raw
+   derivation file, but *not* pull in its outputs.
+
+Lets introduce names for these (in the same order) to make them easier to
+reference below:
+
+```rust
+enum BuildReference {
+    /// !<output_name>!<drv_path>
+    SingleOutput(OutputName, DrvPath),
+
+    /// =<drv_path>
+    DrvClosure(DrvPath),
+
+    /// <path>
+    Path(StorePath),
+}
+```
+
+String contexts are, broadly speaking, created whenever a string is the result
+of a computation (e.g. string interpolation) that used a *computed* path or
+derivation in any way.
+
+Note: This explicitly does *not* include simply writing a literal string
+containing a store path (whether valid or not). That is only permitted through
+the `storePath` builtin.
+
+## Derivation inputs
+
+Based on the data above, the fields `inputDrvs` and `inputSrcs` of derivations
+are populated in `builtins.derivationStrict` (the function which
+`builtins.derivation`, which isn't actually a builtin, wraps).
+
+`inputDrvs` is represented by a map of derivation paths to the set of their
+outputs that were referenced by the context.
+
+TODO: What happens if the set is empty? Somebody claimed this means all outputs.
+
+`inputSrcs` is represented by a set of paths.
+
+These are populated by the above references as follows:
+
+* `SingleOutput` entries are merged into `inputDrvs`
+* `Path` entries are inserted into `inputSrcs`
+* `DrvClosure` leads to a special store computation (`computeFSClosure`), which
+  finds all paths referenced by the derivation and then inserts all of them into
+  the fields as above (derivations with _all_ their outputs)
+
+This is then serialised in the derivation and passed down the pipe.
+
+## Builtins interfacing with contexts
+
+C++ Nix has several builtins that interface directly with string contexts:
+
+* `unsafeDiscardStringContext`: throws away a string's string context (if
+  present)
+* `hasContext`: returns `true`/`false` depending on whether the string has
+  context
+* `unsafeDiscardOutputDependency`: drops dependencies on the *outputs* of a
+  `.drv` in the context, passing only the literal `.drv` itself
+
+  ```admonish note
+  This is only used for special test-cases in nixpkgs, and deprecated Nix
+  commands like `nix-push`.
+  ```
+* `getContext`: returns the string context in serialised form as a Nix attribute
+  set
+* `appendContext`: adds a given string context to the string in the same format
+  as returned by `getContext`
+
+Most of the string manipulation operations will propagate the context to the
+result based on their parameters' contexts.
+
+## Placeholders
+
+C++ Nix has `builtins.placeholder`, which given the name of an output (e.g.
+`out`) creates a hashed string representation of that output name. If that
+string is used anywhere in input attributes, the builder will replace it with
+the actual name of the corresponding output of the current derivation.
+
+C++ Nix does not use contexts for this, it blindly creates a rewrite map of
+these placeholder strings to the names of all outputs, and runs the output
+replacement logic on all environment variables it creates, attribute files it
+passes etc.
+
+## Tvix & string contexts
+
+In the past, Tvix did not track string contexts in its evaluator at all, see
+the historical section for more information about that.
+
+Tvix tracks string contexts in every `NixString` structure via a
+`HashSet<BuildReference>` and offers an API to combine the references while
+keeping the exact internal structure of that data private.
+
+## Historical attempt: Persistent reference tracking
+
+We were investigating implementing a system which allows us to drop string
+contexts in favour of reference scanning derivation attributes.
+
+This means that instead of maintaining and passing around a string context data
+structure in eval, we maintain a data structure of *known paths* from the same
+evaluation elsewhere in Tvix, and scan each derivation attribute against this
+set of known paths when instantiating derivations.
+
+We believed we could take the stance that the system of string contexts as
+implemented in C++ Nix is likely an implementation detail that should not be
+leaking to the language surface as it does now.
+
+### Tracking "known paths"
+
+Every time a Tvix evaluation does something that causes a store interaction, a
+"known path" is created. On the language surface, this is the result of one of:
+
+1. Path literals (e.g. `src = ./.`).
+2. Calls to `builtins.derivationStrict` yielding a derivation and its output
+   paths.
+3. Calls to `builtins.path`.
+
+Whenever one of these occurs, some metadata that persists for the duration of
+one evaluation should be created in Nix. This metadata needs to be available in
+`builtins.derivationStrict`, and should be able to respond to these queries:
+
+1. What is the set of all known paths? (used for e.g. instantiating an
+   Aho-Corasick type string searcher)
+2. What is the _type_ of a path? (derivation path, derivation output, source
+   file)
+3. What are the outputs of a derivation?
+4. What is the derivation of an output?
+
+These queries will need to be asked of the metadata when populating the
+derivation fields.
+
+```admonish note
+Depending on how we implement `builtins.placeholder`, it might be useful
+to track created placeholders in this metadata, too.
+```
+
+### Context builtins
+
+Context-reading builtins can be implemented in Tvix by adding `hasContext` and
+`getContext` with the appropriate reference-scanning logic. However, we should
+evaluate how these are used in nixpkgs and whether their uses can be removed.
+
+Context-mutating builtins can be implemented by tracking their effects in the
+value representation of Tvix, however we should consider not doing this at all.
+
+`unsafeDiscardOutputDependency` should probably never be used and we should warn
+or error on it.
+
+`unsafeDiscardStringContext` is often used as a workaround for avoiding IFD in
+inconvenient places (e.g. in the TVL depot pipeline generation). This is
+unnecessary in Tvix. We should evaluate which other uses exist, and act on them
+appropriately.
+
+The initial danger with diverging here is that we might cause derivation hash
+discrepancies between Tvix and C++ Nix, which can make initial comparisons of
+derivations generated by the two systems difficult. If this occurs we need to
+discuss how to approach it, but initially we will implement the mutating
+builtins as no-ops.
+
+### Why this did not work for us?
+
+Nix has a feature to perform environmental checks of your derivation, e.g.
+"these derivation outputs should not be referenced in this derivation", this was
+introduced in Nix 2.2 by
+https://github.com/NixOS/nix/commit/3cd15c5b1f5a8e6de87d5b7e8cc2f1326b420c88.
+
+Unfortunately, this feature introduced a very unfortunate and critical bug: all
+usage of this feature with contextful strings will actually force the
+derivation to depend at least at build time on those specific paths, see
+https://github.com/NixOS/nix/issues/4629.
+
+For example, if you wanted to `disallowedReferences` to a package and you used a
+derivation as a path, you would actually register that derivation as a input
+derivation of that derivation.
+
+This bug is still unfixed in Nix and it seems that fixing it would require
+introducing different ways to evaluate Nix derivations to preserve the
+output path calculation for Nix expressions so far.
+
+All of this would be fine if the bug behavior was uniform in the sense that no
+one tried to force-workaround it. Since Nixpkgs 23.05, due to
+https://github.com/NixOS/nixpkgs/pull/211783 this is not true anymore.
+
+If you let nixpkgs be the disjoint union of bootstrapping derivations $A$ and
+`stdenv.mkDerivation`-built derivations $B$.
+
+$A$ suffers from the bug and $B$ doesn't by the forced usage of
+`unsafeDiscardStringContext` on those special checking fields.
+
+This means that to build hash-compatible $A$ **and** $B$, we need to
+distinguish $A$ and $B$. A lot of hacks could be imagined to support this
+problem.
+
+Let's assume we have a solution to that problem, it means that we are able to
+detect implicitly when a set of specific fields are
+`unsafeDiscardStringContext`-ed.
+
+Thus, we could use that same trick to implement `unsafeDiscardStringContext`
+entirely for all fields actually.
+
+Now, to implement `unsafeDiscardStringContext` in the persistent reference
+tracking model, you will need to store a disallowed list of strings that should
+not trigger a reference when we are scanning a derivation parameters.
+
+But assume you have something like:
+
+```nix
+derivation {
+   buildInputs = [
+     stdenv.cc
+   ];
+
+   disallowedReferences = [ stdenv.cc ];
+}
+```
+
+If you unregister naively the `stdenv.cc` reference, it will silence the fact
+that it is part of the `buildInputs`, so you will observe that Nix will fail
+the derivation during environmental check, but Tvix would silently force remove
+that reference.
+
+Until proven otherwise, it seems highly difficult to have the fine-grained
+information to prevent reference tracking of those specific fields. It is not a
+failure of the persistent reference tracking, it is an unresolved critical bug
+of Nix that only nixpkgs really workarounded for `stdenv.mkDerivation`-based
+derivations.
diff --git a/tvix/docs/src/eval/builtins.md b/tvix/docs/src/eval/builtins.md
new file mode 100644
index 0000000000..d9fcd72cca
--- /dev/null
+++ b/tvix/docs/src/eval/builtins.md
@@ -0,0 +1,137 @@
+# Nix builtins
+
+Nix has a lot of built-in functions, some of which are accessible in
+the global scope, and some of which are only accessible through the
+global `builtins` attribute set.
+
+This document is an attempt to track all of these builtins, but
+without documenting their functionality.
+
+See also https://nixos.org/manual/nix/stable/expressions/builtins.html
+
+The `impl` column indicates implementation status in tvix:
+- implemented: "" (empty cell)
+- not yet implemented, but not blocked: `todo`
+- not yet implemented, but blocked by other prerequisites:
+  - `store`: awaiting eval<->store api(s)
+  - `context`: awaiting support for string contexts
+
+| name                          | global | arity | pure  | impl    |
+|-------------------------------|--------|-------|-------|---------|
+| abort                         | true   | 1     |       |         |
+| add                           | false  | 2     | true  |         |
+| addErrorContext               | false  | ?     |       | context |
+| all                           | false  | 2     | true  |         |
+| any                           | false  | 2     | true  |         |
+| appendContext                 | false  | ?     |       |         |
+| attrNames                     | false  | 1     | true  |         |
+| attrValues                    | false  |       | true  |         |
+| baseNameOf                    | true   |       |       |         |
+| bitAnd                        | false  |       |       |         |
+| bitOr                         | false  |       |       |         |
+| bitXor                        | false  |       |       |         |
+| builtins                      | true   |       |       |         |
+| catAttrs                      | false  |       |       |         |
+| compareVersions               | false  |       |       |         |
+| concatLists                   | false  |       |       |         |
+| concatMap                     | false  |       |       |         |
+| concatStringsSep              | false  |       |       |         |
+| currentSystem                 | false  |       |       |         |
+| currentTime                   | false  |       | false |         |
+| deepSeq                       | false  |       |       |         |
+| derivation                    | true   |       |       | store   |
+| derivationStrict              | true   |       |       | store   |
+| dirOf                         | true   |       |       |         |
+| div                           | false  |       |       |         |
+| elem                          | false  |       |       |         |
+| elemAt                        | false  |       |       |         |
+| false                         | true   |       |       |         |
+| fetchGit                      | true   |       |       | store   |
+| fetchMercurial                | true   |       |       | store   |
+| fetchTarball                  | true   |       |       | store   |
+| fetchurl                      | false  |       |       | store   |
+| filter                        | false  |       |       |         |
+| filterSource                  | false  |       |       | store   |
+| findFile                      | false  |       | false | todo    |
+| foldl'                        | false  |       |       |         |
+| fromJSON                      | false  |       |       |         |
+| fromTOML                      | true   |       |       |         |
+| functionArgs                  | false  |       |       |         |
+| genList                       | false  |       |       |         |
+| genericClosure                | false  |       |       | todo    |
+| getAttr                       | false  |       |       |         |
+| getContext                    | false  |       |       |         |
+| getEnv                        | false  |       | false |         |
+| hasAttr                       | false  |       |       |         |
+| hasContext                    | false  |       |       |         |
+| hashFile                      | false  |       | false |         |
+| hashString                    | false  |       |       |         |
+| head                          | false  |       |       |         |
+| import                        | true   |       |       |         |
+| intersectAttrs                | false  |       |       |         |
+| isAttrs                       | false  |       |       |         |
+| isBool                        | false  |       |       |         |
+| isFloat                       | false  |       |       |         |
+| isFunction                    | false  |       |       |         |
+| isInt                         | false  |       |       |         |
+| isList                        | false  |       |       |         |
+| isNull                        | true   |       |       |         |
+| isPath                        | false  |       |       |         |
+| isString                      | false  |       |       |         |
+| langVersion                   | false  |       |       |         |
+| length                        | false  |       |       |         |
+| lessThan                      | false  |       |       |         |
+| listToAttrs                   | false  |       |       |         |
+| map                           | true   |       |       |         |
+| mapAttrs                      | false  |       |       |         |
+| match                         | false  |       |       |         |
+| mul                           | false  |       |       |         |
+| nixPath                       | false  |       |       | todo    |
+| nixVersion                    | false  |       |       | todo    |
+| null                          | true   |       |       |         |
+| parseDrvName                  | false  |       |       |         |
+| partition                     | false  |       |       |         |
+| path                          | false  |       | sometimes | store |
+| pathExists                    | false  |       | false |         |
+| placeholder                   | true   |       |       | context |
+| readDir                       | false  |       | false |         |
+| readFile                      | false  |       | false |         |
+| removeAttrs                   | true   |       |       |         |
+| replaceStrings                | false  |       |       |         |
+| scopedImport                  | true   |       |       |         |
+| seq                           | false  |       |       |         |
+| sort                          | false  |       |       |         |
+| split                         | false  |       |       |         |
+| splitVersion                  | false  |       |       |         |
+| storeDir                      | false  |       |       | store   |
+| storePath                     | false  |       |       | store   |
+| stringLength                  | false  |       |       |         |
+| sub                           | false  |       |       |         |
+| substring                     | false  |       |       |         |
+| tail                          | false  |       |       |         |
+| throw                         | true   |       |       |         |
+| toFile                        | false  |       |       | store   |
+| toJSON                        | false  |       |       |         |
+| toPath                        | false  |       |       |         |
+| toString                      | true   |       |       |         |
+| toXML                         | true   |       |       |         |
+| trace                         | false  |       |       |         |
+| true                          | true   |       |       |         |
+| tryEval                       | false  |       |       |         |
+| typeOf                        | false  |       |       |         |
+| unsafeDiscardOutputDependency | false  |       |       |         |
+| unsafeDiscardStringContext    | false  |       |       |         |
+| unsafeGetAttrPos              | false  |       |       | todo    |
+| valueSize                     | false  |       |       | todo    |
+
+## Added after C++ Nix 2.3 (without Flakes enabled)
+
+| name          | global | arity | pure  | impl  |
+|---------------|--------|-------|-------|-------|
+| break         | false  | 1     |       | todo  |
+| ceil          | false  | 1     | true  |       |
+| fetchTree     | true   | 1     |       | todo  |
+| floor         | false  | 1     | true  |       |
+| groupBy       | false  | 2     | true  |       |
+| traceVerbose  | false  | 2     |       | todo  |
+| zipAttrsWith  | false  | 2     | true  | todo  |
diff --git a/tvix/docs/src/eval/catchable-errors.md b/tvix/docs/src/eval/catchable-errors.md
new file mode 100644
index 0000000000..ce320a9217
--- /dev/null
+++ b/tvix/docs/src/eval/catchable-errors.md
@@ -0,0 +1,131 @@
+# (Possible) Implementation(s) of Catchable Errors for `builtins.tryEval`
+
+## Terminology
+
+Talking about “catchable errors” in Nix in general is a bit precarious since
+there is no properly established terminology. Also, the existing terms are less
+than apt. The reason for this lies in the fact that catchable errors (or
+whatever you want to call them) don't properly _exist_ in the language: While
+Nix's `builtins.tryEval` is (originally) based on the C++ exception system,
+it specifically lacks the ability of such systems to have an exception _value_
+whilst handling it. Consequently, these errors don't have an obvious name
+as they never appear _in_ the Nix language. They just have to be named in the
+respective Nix implementation:
+
+- In C++ Nix the only term for such errors is `AssertionError` which is the
+  name of the (C++) exception used in the implementation internally. This
+  term isn't great, though, as `AssertionError`s can not only be generated
+  using `assert`, but also using `throw` and failed `NIX_PATH` resolutions.
+  Were this terminology to be used in documentation addressing Nix language
+  users, it would probably only serve confusion.
+
+- Tvix currently (as of r/7573) uses the term catchable errors. This term
+  relates to nothing in the language as such: Errors are not caught, we rather
+  try to evaluate an expression. Catching also sort of implies that a value
+  representation of the error is attainable (like in an exception system) which
+  is untrue.
+
+In light of this I (sterni) would like to suggest “tryable errors” as an
+alternative term going forward which isn't inaccurate and relates to terms
+already established by language internal naming.
+
+However, this document will continue using the term catchable error until the
+naming is adjusted in Tvix itself.
+
+## Implementation
+
+Below we discuss different implementation approaches in Tvix in order to arrive
+at a proposal for the new one. The historical discussion is intended as a basis
+for discussing the proposal: Are we committing to an old or current mistake? Are
+we solving all problems that cropped up or were solved at any given point in
+time?
+
+### Original
+
+The original implementation of `tryEval` in cl/6924 was quite straightforward:
+It would simply interrupt the propagation of a potential catchable error to the
+top level (which usually happened using the `?` operator) in the builtin and
+construct the appropriate representation of an unsuccessful evaluation if the
+error was deemed catchable. It had, however, multiple problems:
+
+- The VM was originally written without `tryEval` in mind, i.e. it largely
+  assumed that an error would always cause execution to be terminated. This
+  problem was later solved (cl/6940).
+- Thunks could not be `tryEval`-ed multiple times (b/281). This was another
+  consequence of VM architecture at the time: Thunks would be blackholed
+  before evaluation was started and the error could occur. Due to the
+  interaction of the generator-based VM code and `Value::force` the part
+  of the code altering the thunk state would never be informed about the
+  evaluation result in case of a failure, so the thunk would remain
+  blackholed leading to a crash if the same thunk was `tryEval`-ed or
+  forced again. To solve this issue, amjoseph completely overhauled
+  the implementation.
+
+One key point about this implementation is that it is based on the assumption
+that catchable errors can only be generated in thunks, i.e. expressions causing
+them are never evaluated strictly. This can be illustrated using C++ Nix:
+
+```console
+> nix-instantiate --eval -E '[ (assert false; true) (builtins.throw "") <nixpkgs> ]'
+[ <CODE> <CODE> <CODE> ]
+```
+
+If this wasn't the case, the VM could encounter the error in a situation where
+the error would not have needed to pass through the `tryEval` builtin, causing
+evaluation to abort.
+
+### Present
+
+The current system (mostly implemented in cl/9289) uses a very different
+approach: Instead of relying on the thunk boundary, catchable errors are no
+longer errors, but special values. They are created at the relevant points (e.g.
+`builtins.throw`) and propagated whenever they are encountered by VM ops or
+builtins. Finally, they either encounter `builtins.tryEval` (and are converted to
+an ordinary value again) or the top level where they become a normal error again.
+
+The problems with this mostly stem from the confusion between values and errors
+that it necessitates:
+
+- In most circumstances, catchable errors end up being errors again, as `tryEval`
+  is not used a lot. So `throw`s usually end up causing evaluation to abort.
+  Consequently, not only `Value::Catchable` is necessary, but also a corresponding
+  error variant that is _only_ created if a catchable value remains at the end of
+  evaluation. A requirement that was missed until cl/10991 (!) which illustrate
+  how strange that architecture is. A consequence of this is that catchable
+  errors have no location information at all.
+- `Value::Catchable` is similar to other internal values in Tvix, but is much
+  more problematic. Aside from thunks, internal values only exist for a brief
+  amount of time on the stack and it is very clear what parts of the VM or
+  builtins need to handle them. This means that the rest of the implementation
+  need to consider them, keeping the complexity caused by the internal value
+  low. `Value::Catchable`, on the other hand, may exist anywhere and be passed
+  to any VM op or builtin, so it needs to be correctly propagated _everywhere_.
+  This causes a lot of noise in the code as well as a big potential for bugs.
+  Essentially, catchable errors require as much attention by the Tvix developer
+  as laziness. This doesn't really correlate to the importance of the two
+  features to the Nix language.
+
+### Future?
+
+The core assumption of the original solution does offer a path forward: After
+cl/9289 we should be in a better position to introspect an error occurring from
+within the VM code, but we need a better way of storing such an error to prevent
+another b/281. If catchable errors can only be generated in thunks, we can just
+use the thunk representation for this. This would mean that `Thunk::force_`
+would need to check if evaluation was successful and (in case of failure)
+change the thunk representation
+
+- either to the original `ThunkRepr::Suspended` which would be simple, but of
+  course mean duplicated evaluation work in some expressions. In fact, this
+  would probably leave a lot of easy performance on the table for use cases we
+  would like to support, e.g. tree walkers for nixpkgs.
+- or to a new `ThunkRepr` variant that stores the kind of the error and all
+  necessary location info so stack traces can work properly. This of course
+  reintroduces some of the difficulty of having two kinds of errors, but it is
+  hopefully less problematic, as the thunk boundary (i.e. `Thunk::force`) is
+  where errors would usually occur.
+
+Besides the question whether this proposal can actually be implemented, another
+consideration is whether the underlying assumption will hold in the future, i.e.
+can we implement optimizations for thunk elimination in a way that thunks that
+generate catchable errors are never eliminated?
diff --git a/tvix/docs/src/eval/known-optimisation-potential.md b/tvix/docs/src/eval/known-optimisation-potential.md
new file mode 100644
index 0000000000..11babcb59a
--- /dev/null
+++ b/tvix/docs/src/eval/known-optimisation-potential.md
@@ -0,0 +1,161 @@
+# Known Optimisation Potential
+
+There are several areas of the Tvix evaluator code base where
+potentially large performance gains can be achieved through
+optimisations that we are already aware of.
+
+The shape of most optimisations is that of moving more work into the
+compiler to simplify the runtime execution of Nix code. This leads, in
+some cases, to drastically higher complexity in both the compiler
+itself and in invariants that need to be guaranteed between the
+runtime and the compiler.
+
+For this reason, and because we lack the infrastructure to adequately
+track their impact (WIP), we have not yet implemented these
+optimisations, but note the most important ones here.
+
+* Use "open upvalues" [hard]
+
+  Right now, Tvix will immediately close over all upvalues that are
+  created and clone them into the `Closure::upvalues` array.
+
+  Instead of doing this, we can statically determine most locals that
+  are closed over *and escape their scope* (similar to how the
+  `compiler::scope::Scope` struct currently tracks whether locals are
+  used at all).
+
+  If we implement the machinery to track this, we can implement some
+  upvalues at runtime by simply sticking stack indices in the upvalue
+  array and only copy the values where we know that they escape.
+
+* Avoid `with` value duplication [easy]
+
+  If a `with` makes use of a local identifier in a scope that can not
+  close before the with (e.g. not across `LambdaCtx` boundaries), we
+  can avoid the allocation of the phantom value and duplication of the
+  `NixAttrs` value on the stack. In this case we simply push the stack
+  index of the known local.
+
+* Multiple attribute selection [medium]
+
+  An instruction could be introduced that avoids repeatedly pushing an
+  attribute set to/from the stack if multiple keys are being selected
+  from it. This occurs, for example, when inheriting from an attribute
+  set or when binding function formals.
+
+* Split closure/function representation [easy]
+
+  Functions have fewer fields that need to be populated at runtime and
+  can directly use the `value::function::Lambda` representation where
+  possible.
+
+* Apply `compiler::optimise_select` to other set operations [medium]
+
+  In addition to selects, statically known attribute resolution could
+  also be used for things like `?` or `with`. The latter might be a
+  little more complicated but is worth investigating.
+
+* Inline fully applied builtins with equivalent operators [medium]
+
+  Some `builtins` have equivalent operators, e.g. `builtins.sub`
+  corresponds to the `-` operator, `builtins.hasAttr` to the `?`
+  operator etc. These operators additionally compile to a primitive
+  VM opcode, so they should be just as cheap (if not cheaper) as
+  a builtin application.
+
+  In case the compiler encounters a fully applied builtin (i.e.
+  no currying is occurring) and the `builtins` global is unshadowed,
+  it could compile the equivalent operator bytecode instead: For
+  example, `builtins.sub 20 22` would be compiled as `20 - 22`.
+  This would ensure that equivalent `builtins` can also benefit
+  from special optimisations we may implement for certain operators
+  (in the absence of currying). E.g. we could optimise access
+  to the `builtins` attribute set which a call to
+  `builtins.getAttr "foo" builtins` should also profit from.
+
+* Avoid nested `VM::run` calls [hard]
+
+  Currently when encountering Nix-native callables (thunks, closures)
+  the VM's run loop will nest and return the value of the nested call
+  frame one level up. This makes the Rust call stack almost mirror the
+  Nix call stack, which is usually undesirable.
+
+  It is possible to detect situations where this is avoidable and
+  instead set up the VM in such a way that it continues and produces
+  the desired result in the same run loop, but this is kind of tricky
+  to get right - especially while other parts are still in flux.
+
+  For details consult the commit with Gerrit change ID
+  `I96828ab6a628136e0bac1bf03555faa4e6b74ece`, in which the initial
+  attempt at doing this was reverted.
+
+* Avoid thunks if only identifier closing is required [medium]
+
+  Some constructs, like `with`, mostly do not change runtime behaviour
+  if thunked. However, they are wrapped in thunks to ensure that
+  deferred identifiers are resolved correctly.
+
+  This can be avoided, as we statically analyse the scope and should
+  be able to tell whether any such logic was required.
+
+* Intern literals [easy]
+
+  Currently, the compiler emits a separate entry in the constant
+  table for each literal.  So the program `1 + 1 + 1` will have
+  three entries in its `Chunk::constants` instead of only one.
+
+* Do some list and attribute set operations in place [hard]
+
+  Algorithms that can not do a lot of work inside `builtins` like `map`,
+  `filter` or `foldl'` usually perform terribly if they use data structures like
+  lists and attribute sets.
+
+  `builtins` can do work in place on a copy of a `Value`, but naïvely expressed
+  recursive algorithms will usually use `//` and `++` to do a single change to a
+  `Value` at a time, requiring a full copy of the data structure each time.
+  It would be a big improvement if we could do some of these operations in place
+  without requiring a new copy.
+
+  There are probably two approaches: We could determine statically if a value is
+  reachable from elsewhere and emit a special in place instruction if not. An
+  easier alternative is probably to rely on reference counting at runtime: If no
+  other reference to a value exists, we can extend the list or update the
+  attribute set in place.
+
+  An **alternative** to this is using [persistent data
+  structures](https://en.wikipedia.org/wiki/Persistent_data_structure) or at the
+  very least [immutable data structures](https://docs.rs/im/latest/im/) that can
+  be copied more efficiently than the stock structures we are using at the
+  moment.
+
+* Skip finalising unfinalised thunks or non-thunks instead of crashing [easy]
+
+  Currently `OpFinalise` crashes the VM if it is called on values that don't
+  need to be finalised. This helps catching miscompilations where `OpFinalise`
+  operates on the wrong `StackIdx`. In the case of function argument patterns,
+  however, this means extra VM stack and instruction overhead for dynamically
+  determining if finalisation is necessary or not. This wouldn't be necessary
+  if `OpFinalise` would just noop on any values that don't need to be finalised
+  (anymore).
+
+* Phantom binding for from expression of inherits [easy]
+
+  The from expression of an inherit is reevaluated for each inherit. This can
+  be demonstrated using the following Nix expression which, counter-intuitively,
+  will print “plonk” twice.
+
+  ```nix
+  let
+    inherit (builtins.trace "plonk" { a = null; b = null; }) a b;
+  in
+  builtins.seq a (builtins.seq b null)
+  ```
+
+  In most Nix code, the from expression is just an identifier, so it is not
+  terribly inefficient, but in some cases a more expensive expression may
+  be used. We should create a phantom binding for the from expression that
+  is reused in the inherits, so only a single thunk is created for the from
+  expression.
+
+  Since we discovered this, C++ Nix has implemented a similar optimization:
+  <https://github.com/NixOS/nix/pull/9847>.
diff --git a/tvix/docs/src/eval/language-issues.md b/tvix/docs/src/eval/language-issues.md
new file mode 100644
index 0000000000..152e6594a1
--- /dev/null
+++ b/tvix/docs/src/eval/language-issues.md
@@ -0,0 +1,46 @@
+# Nix language issues
+
+In the absence of a language standard, what Nix (the language) is, is prescribed
+by the behavior of the C++ Nix implementation. Still, there are reasons not to
+accept some behavior:
+
+* Tvix aims for nixpkgs compatibility only. This means we can ignore behavior in
+  edge cases nixpkgs doesn't trigger as well as obscure features it doesn't use
+  (e.g. `__overrides`).
+* Some behavior of the Nix evaluator seems to be unintentional or an
+  implementation detail leaking out into language behavior.
+
+Especially in the latter case, it makes sense to raise the respective issue and
+maybe to get rid of the behavior in all implementations for good. Below is an
+(incomplete) list of such issues:
+
+* [Behaviour of nested attribute sets depends on definition order][i7111]
+* [Partially constructed attribute sets are observable during dynamic attr names construction][i7012]
+* [Nix parsers merges multiple attribute set literals for the same key incorrectly depending on definition order][i7115]
+
+On the other hand, there is behavior that seems to violate one's expectation
+about the language at first, but has good enough reasons from an implementor's
+perspective to keep them:
+
+* Dynamic keys are forbidden in `let` and `inherit`. This makes sure that we
+  only need to do runtime identifier lookups for `with`. More dynamic (i.e.
+  runtime) lookups would make the scoping system even more complicated as well
+  as hurt performance.
+* Dynamic attributes of `rec` sets are not added to its scope. This makes sense
+  for the same reason.
+* Dynamic and nested attributes in attribute sets don't get merged. This is a
+  tricky one, but avoids doing runtime (recursive) merges of attribute sets.
+  Instead all necessary merging can be inferred statically, i.e. the C++ Nix
+  implementation already merges at parse time, making nested attribute keys
+  syntactic sugar effectively.
+
+Other behavior is just odd, surprising or underdocumented:
+
+* `builtins.foldl'` doesn't force the initial accumulator (but all other
+  intermediate accumulator values), differing from e.g. Haskell, see
+  the [relevant PR discussion][p7158].
+
+[i7111]: https://github.com/NixOS/nix/issues/7111
+[i7012]: https://github.com/NixOS/nix/issues/7012
+[i7115]: https://github.com/NixOS/nix/issues/7115
+[p7158]: https://github.com/NixOS/nix/pull/7158
diff --git a/tvix/docs/src/eval/opcodes-attrsets.md b/tvix/docs/src/eval/opcodes-attrsets.md
new file mode 100644
index 0000000000..7026f3319d
--- /dev/null
+++ b/tvix/docs/src/eval/opcodes-attrsets.md
@@ -0,0 +1,122 @@
+# attrset-opcodes
+
+The problem with attrset literals is twofold:
+
+1. The keys of attribute sets may be dynamically evaluated.
+
+   Access:
+
+   ```nix
+   let
+     k = "foo";
+     attrs = { /* etc. */ };
+   in attrs."${k}"
+   ```
+
+   Literal:
+   ```nix
+   let
+     k = "foo";
+   in {
+     "${k}" = 42;
+   }
+   ```
+
+   The problem with this is that the attribute set key is not known at
+   compile time, and needs to be dynamically evaluated by the VM as an
+   expression.
+
+   For the most part this should be pretty simple, assuming a
+   theoretical instruction set:
+
+   ```
+   0000  OP_CONSTANT(0) # key "foo"
+   0001  OP_CONSTANT(1) # value 42
+   0002  OP_ATTR_SET(1) # construct attrset from 2 stack values
+   ```
+
+   The operation pushing the key needs to be replaced with one that
+   leaves a single value (the key) on the stack, i.e. the code for the
+   expression, e.g.:
+
+   ```
+   0000..000n <operations leaving a string value on the stack>
+   000n+1     OP_CONSTANT(1) # value 42
+   000n+2     OP_ATTR_SET(1) # construct attrset from 2 stack values
+   ```
+
+   This is fairly easy to do by simply recursing in the compiler when
+   the key expression is encountered.
+
+2. The keys of attribute sets may be nested.
+
+   This is the non-trivial part of dealing with attribute set
+   literals. Specifically, the nesting can be arbitrarily deep and the
+   AST does not guarantee that related set keys are located
+   adjacently.
+
+   Furthermore, this frequently occurs in practice in Nix. We need a
+   bytecode representation that makes it possible to construct nested
+   attribute sets at runtime.
+
+   Proposal: AttrPath values
+
+   If we can leave a value representing an attribute path on the
+   stack, we can offload the construction of nested attribute sets to
+   the `OpAttrSet` operation.
+
+   Under the hood, OpAttrSet in practice constructs a `Map<NixString,
+   Value>` attribute set in most cases. This means it expects to pop
+   the value of the key of the stack, but is otherwise free to do
+   whatever it wants with the underlying map.
+
+   In a simple example, we could have code like this:
+
+   ```nix
+   {
+     a.b = 15;
+   }
+   ```
+
+   This would be compiled to a new `OpAttrPath` instruction that
+   constructs and pushes an attribute path from a given number of
+   fragments (which are popped off the stack).
+
+   For example,
+
+   ```
+   0000 OP_CONSTANT(0)  # key "a"
+   0001 OP_CONSTANT(1)  # key "b"
+   0002 OP_ATTR_PATH(2) # construct attrpath from 2 fragments
+   0003 OP_CONSTANT(2)  # value 42
+   0004 OP_ATTRS(1)     # construct attrset from one pair
+   ```
+
+   Right before `0004` the stack would be left like this:
+
+   [ AttrPath[a,b], 42 ]
+
+   Inside of the `OP_ATTRS` instruction we could then begin
+   construction of the map and insert the nested attribute sets as
+   required, as well as validate that there are no duplicate keys.
+
+3. Both of these cases can occur simultaneously, but this is not a
+   problem as the opcodes combine perfectly fine, e.g.:
+
+   ```nix
+   let
+     k = "a";
+   in {
+     "${k}".b = 42;
+   }
+   ```
+
+   results in
+
+   ```
+   0000..000n <operations leaving a string value on the stack>
+   000n+1     OP_CONSTANT(1)  # key "b"
+   000n+2     OP_ATTR_PATH(2) # construct attrpath from 2 fragments
+   000n+3     OP_CONSTANT(2)  # value 42
+   000n+4     OP_ATTR_SET(1)  # construct attrset from 2 stack values
+   ```
diff --git a/tvix/docs/src/eval/recursive-attrs.md b/tvix/docs/src/eval/recursive-attrs.md
new file mode 100644
index 0000000000..5ce1cb2b64
--- /dev/null
+++ b/tvix/docs/src/eval/recursive-attrs.md
@@ -0,0 +1,67 @@
+# Recursive attribute sets
+
+The construction behaviour of recursive attribute sets is very
+specific, and a bit peculiar.
+
+In essence, there are multiple "phases" of scoping that take place
+during attribute set construction:
+
+1. Every inherited value without an explicit source is inherited only
+   from the **outer** scope in which the attribute set is enclosed.
+
+2. A new scope is opened in which all recursive keys are evaluated.
+   This only considers **statically known keys**, attributes can
+   **not** recurse into dynamic keys in `self`!
+
+   For example, this code is invalid in C++ Nix:
+
+   ```
+   nix-repl> rec { ${"a"+""} = 2; b = a * 10; }
+   error: undefined variable 'a' at (string):1:26
+   ```
+
+3. Finally, a third scope is opened in which dynamic keys are
+   evaluated.
+
+This behaviour, while possibly a bit strange and unexpected, actually
+simplifies the implementation of recursive attribute sets in Tvix as
+well.
+
+Essentially, a recursive attribute set like this:
+
+```nix
+rec {
+  inherit a;
+  b = a * 10;
+  ${"c" + ""} = b * 2;
+}
+```
+
+Can be compiled like the following expression:
+
+```nix
+let
+  inherit a;
+in let
+  b = a * 10;
+  in {
+    inherit a b;
+    ${"c" + ""} = b * 2;
+  }
+```
+
+Completely deferring the resolution of recursive identifiers to the
+existing handling of recursive scopes (i.e. deferred access) in let
+bindings.
+
+In practice, we can further specialise this and compile each scope
+directly into the form expected by `OpAttrs` (that is, leaving
+attribute names on the stack) before each value's position.
+
+C++ Nix's Implementation
+------------------------
+
+* [`ExprAttrs`](https://github.com/NixOS/nix/blob/2097c30b08af19a9b42705fbc07463bea60dfb5b/src/libexpr/nixexpr.hh#L241-L268)
+  (AST representation of attribute sets)
+* [`ExprAttrs::eval`](https://github.com/NixOS/nix/blob/075bf6e5565aff9fba0ea02f3333c82adf4dccee/src/libexpr/eval.cc#L1333-L1414)
+* [`addAttr`](https://github.com/NixOS/nix/blob/master/src/libexpr/parser.y#L98-L156) (`ExprAttrs` construction in the parser)
diff --git a/tvix/docs/src/eval/vm-loop.md b/tvix/docs/src/eval/vm-loop.md
new file mode 100644
index 0000000000..a75c7eec31
--- /dev/null
+++ b/tvix/docs/src/eval/vm-loop.md
@@ -0,0 +1,314 @@
+# tvix-eval VM loop
+
+This document describes the new tvix-eval VM execution loop implemented in the
+chain focusing around cl/8104.
+
+## Background
+
+The VM loop implemented in Tvix prior to cl/8104 had several functions:
+
+1. Advancing the instruction pointer for a chunk of Tvix bytecode and
+   executing instructions in a loop until a result was yielded.
+
+2. Tracking Nix call frames as functions/thunks were entered/exited.
+
+3. Catching trampoline requests returned from instructions to force suspended
+   thunks without increasing stack size *where possible*.
+
+4. Handling trampolines through an inner trampoline loop, switching between a
+   code execution mode and execution of subsequent trampolines.
+
+This implementation of the trampoline logic was added on to the existing VM,
+which previously always recursed for thunk forcing. There are some cases (for
+example values that need to be forced *inside* of the execution of a builtin)
+where trampolines could not previously be used, and the VM recursed anyways.
+
+As a result of this trampoline logic being added "on top" of the existing VM
+loop the code became quite difficult to understand. This led to several bugs,
+for example: b/251, b/246, b/245, and b/238.
+
+These bugs were tricky to deal with, as we had to try and make the VM do
+things that are somewhat difficult to fit into its model. We could of course
+keep extending the trampoline logic to accommodate all sorts of concepts (such
+as finalisers), but that seems like it does not solve the root problem.
+
+## New VM loop
+
+In cl/8104, a unified new solution is implemented with which the VM is capable
+of evaluating everything without increasing the call stack size.
+
+This is done by introducing a new frame stack in the VM, on which execution
+frames are enqueued that are either:
+
+1. A bytecode frame, consisting of Tvix bytecode that evaluates compiled Nix
+   code.
+2. A generator frame, consisting of some VM logic implemented in pure Rust
+   code that can be *suspended* when it hits a point where the VM would
+   previously need to recurse.
+
+We do this by making use of the `async` *keyword* in Rust, but notably
+*without* introducing asynchronous I/O or concurrency in tvix-eval (the
+complexity of which is currently undesirable for us).
+
+Specifically, when writing a Rust function that uses the `async` keyword, such
+as:
+
+```rust
+async fn some_builtin(input: Value) -> Result<Value, ErrorKind> {
+  let mut out = NixList::new();
+
+  for element in input.to_list()? {
+    let result = do_something_that_requires_the_vm(element).await;
+    out.push(result);
+  }
+
+  Ok(out)
+}
+```
+
+The compiler actually generates a state-machine under-the-hood which allows
+the execution of that function to be *suspended* whenever it hits an `await`.
+
+We use the [`genawaiter`][] crate that gives us a data structure and simple
+interface for getting instances of these state machines that can be stored in
+a struct (in our case, a *generator frame*).
+
+The execution of the VM then becomes the execution of an *outer loop*, which
+is responsible for selecting the next generator frame to execute, and two
+*inner loops*, which drive the execution of a bytecode frame or generator
+frame forward until it either yields a value or asks to be suspended in favour
+of another frame.
+
+All "communication" between frames happens solely through values left on the
+stack: Whenever a frame of either type runs to completion, it is expected to
+leave a *single* value on the stack. It follows that the whole VM, upon
+completion of the last (or initial, depending on your perspective) frame
+yields its result as the return value.
+
+The core of the VM restructuring is cl/8104, unfortunately one of the largest
+single commit changes we've had to make yet, as it touches pretty much all
+areas of tvix-eval. The introduction of the generators and the
+message/response system we built to request something from the VM, suspend a
+generator, and wait for the return is in cl/8148.
+
+The next sections describe in detail how the three different loops work.
+
+### Outer VM loop
+
+The outer VM loop is responsible for selecting the next frame to run, and
+dispatching it correctly to inner loops, as well as determining when to shut
+down the VM and return the final result.
+
+```
+                          ╭──────────────────╮
+                 ╭────────┤ match frame kind ├──────╮
+                 │        ╰──────────────────╯      │
+                 │                                  │
+    ┏━━━━━━━━━━━━┷━━━━━┓                ╭───────────┴───────────╮
+───►┃ frame_stack.pop()┃                ▼                       ▼
+    ┗━━━━━━━━━━━━━━━━━━┛       ┏━━━━━━━━━━━━━━━━┓      ┏━━━━━━━━━━━━━━━━━┓
+                 ▲             ┃ bytecode frame ┃      ┃ generator frame ┃
+                 │             ┗━━━━━━━━┯━━━━━━━┛      ┗━━━━━━━━┯━━━━━━━━┛
+                 │[yes, cont.]          │                       │
+                 │                      ▼                       ▼
+    ┏━━━━━━━━┓   │             ╔════════════════╗      ╔═════════════════╗
+◄───┨ return ┃   │             ║ inner bytecode ║      ║ inner generator ║
+    ┗━━━━━━━━┛   │             ║      loop      ║      ║      loop       ║
+        ▲        │             ╚════════╤═══════╝      ╚════════╤════════╝
+        │   ╭────┴─────╮                │                       │
+        │   │ has next │                ╰───────────┬───────────╯
+   [no] ╰───┤  frame?  │                            │
+            ╰────┬─────╯                            ▼
+                 │                         ┏━━━━━━━━━━━━━━━━━┓
+                 │                         ┃ frame completed ┃
+                 ╰─────────────────────────┨  or suspended   ┃
+                                           ┗━━━━━━━━━━━━━━━━━┛
+```
+
+Initially, the VM always pops a frame from the frame stack and then inspects
+the type of frame it found. As a consequence the next frame to execute is
+always the frame at the top of the stack, and setting up a VM initially for
+code execution is done by leaving a bytecode frame with the code to execute on
+the stack and passing control to the outer loop.
+
+Control is dispatched to either of the inner loops (depending on the type of
+frame) and the cycle continues once they return.
+
+When an inner loop returns, it has either finished its execution (and left its
+result value on the *value stack*), or its frame has requested to be
+suspended.
+
+Frames request suspension by re-enqueueing *themselves* through VM helper
+methods, and then leaving the frame they want to run *on top* of themselves in
+the frame stack before yielding control back to the outer loop.
+
+The inner control loops inform the outer loops about whether the frame has
+been *completed* or *suspended* by returning a boolean.
+
+### Inner bytecode loop
+
+The inner bytecode loop drives the execution of some Tvix bytecode by
+continously looking at the next instruction to execute, and dispatching to the
+instruction handler.
+
+```
+   ┏━━━━━━━━━━━━━┓
+◄──┨ return true ┃
+   ┗━━━━━━━━━━━━━┛
+          ▲
+     ╔════╧═════╗
+     ║ OpReturn ║
+     ╚══════════╝
+          ▲
+          ╰──┬────────────────────────────╮
+             │                            ▼
+             │                 ╔═════════════════════╗
+    ┏━━━━━━━━┷━━━━━┓           ║ execute instruction ║
+───►┃ inspect next ┃           ╚══════════╤══════════╝
+    ┃  instruction ┃                      │
+    ┗━━━━━━━━━━━━━━┛                      │
+             ▲                      ╭─────┴─────╮
+             ╰──────────────────────┤ suspends? │
+                       [no]         ╰─────┬─────╯
+                                          │
+                                          │
+   ┏━━━━━━━━━━━━━━┓                       │
+◄──┨ return false ┃───────────────────────╯
+   ┗━━━━━━━━━━━━━━┛              [yes]
+```
+
+With this refactoring, the compiler now emits a special `OpReturn` instruction
+at the end of bytecode chunks. This is a signal to the runtime that the chunk
+has completed and that its current value should be returned, without having to
+perform instruction pointer arithmetic.
+
+When `OpReturn` is encountered, the inner bytecode loop returns control to the
+outer loop and informs it (by returning `true`) that the bytecode frame has
+completed.
+
+Any other instruction may also request a suspension of the bytecode frame (for
+example, instructions that need to force a value). In this case the inner loop
+is responsible for setting up the frame stack correctly, and returning `false`
+to inform the outer loop of the suspension
+
+### Inner generator loop
+
+The inner generator loop is responsible for driving the execution of a
+generator frame by continously calling [`Gen::resume`][] until it requests a
+suspension (as a result of which control is returned to the outer loop), or
+until the generator is done and yields a value.
+
+```
+   ┏━━━━━━━━━━━━━┓
+◄──┨ return true ┃ ◄───────────────────╮
+   ┗━━━━━━━━━━━━━┛                     │
+                                       │
+                               [Done]  │
+                    ╭──────────────────┴─────────╮
+                    │ inspect generator response │◄────────────╮
+                    ╰──────────────────┬─────────╯             │
+                            [yielded]  │              ┏━━━━━━━━┷━━━━━━━━┓
+                                       │              ┃ gen.resume(msg) ┃◄──
+                                       ▼              ┗━━━━━━━━━━━━━━━━━┛
+                                 ╭────────────╮                ▲
+                                 │ same-frame │                │
+                                 │  request?  ├────────────────╯
+                                 ╰─────┬──────╯      [yes]
+   ┏━━━━━━━━━━━━━━┓                    │
+◄──┨ return false ┃ ◄──────────────────╯
+   ┗━━━━━━━━━━━━━━┛                [no]
+```
+
+On each execution of a generator frame, `resume_with` is called with a
+[`VMResponse`][] (i.e. a message *from* the VM *to* the generator). For a newly
+created generator, the initial message is just `Empty`.
+
+A generator may then respond by signaling that it has finished execution
+(`Done`), in which case the inner generator loop returns control to the outer
+loop and informs it that this generator is done (by returning `true`).
+
+A generator may also respond by signaling that it needs some data from the VM.
+This is implemented through a request-response pattern, in which the generator
+returns a `Yielded` message containing a [`VMRequest`][]. These requests can be
+very simple ("Tell me the current store path") or more complex ("Call this Nix
+function with these values").
+
+Requests are divided into two classes: Same-frame requests (requests that can be
+responded to *without* returning control to the outer loop, i.e. without
+executing a *different* frame), and multi-frame generator requests. Based on the
+type of request, the inner generator loop will either handle it right away and
+send the response in a new `resume_with` call, or return `false` to the outer
+generator loop after setting up the frame stack.
+
+Most of this logic is implemented in cl/8148.
+
+[`Gen::resume`]: https://docs.rs/genawaiter/0.99.1/genawaiter/rc/struct.Gen.html#method.resume_with
+[`VMRequest`]: https://cs.tvl.fyi/depot@2696839770c1ccb62929ff2575a633c07f5c9593/-/blob/tvix/eval/src/vm/generators.rs?L44
+[`VMResponse`]: https://cs.tvl.fyi/depot@2696839770c1ccb62929ff2575a633c07f5c9593/-/blob/tvix/eval/src/vm/generators.rs?L169
+
+## Advantages & Disadvantages of the approach
+
+This approach has several advantages:
+
+* The execution model is much simpler than before, making it fairly
+  straightforward to build up a mental model of what the VM does.
+
+* All "out of band requests" inside the VM are handled through the same
+  abstraction (generators).
+
+* Implementation is not difficult, albeit a little verbose in some cases (we
+  can argue about whether or not to introduce macros for simplifying it).
+
+* Several parts of the VM execution are now much easier to document,
+  potentially letting us onboard tvix-eval contributors faster.
+
+* The linear VM execution itself is much easier to trace now, with for example
+  the `RuntimeObserver` (and by extension `tvixbolt`) giving much clearer
+  output now.
+
+But it also comes with some disadvantages:
+
+* Even though we "only" use the `async` keyword without a full async-I/O
+  runtime, we still encounter many of the drawbacks of the fragmented Rust
+  async ecosystem.
+
+  The biggest issue with this is that parts of the standard library become
+  unavailable to us, for example the built-in `Vec::sort_by` can no longer be
+  used for sorting in Nix because our comparators themselves are `async`.
+
+  This led us to having to implement some logic on our own, as the design of
+  `async` in Rust even makes it difficult to provide usecase-generic
+  implementations of concepts like sorting.
+
+* We need to allocate quite a few new structures on the heap in order to drive
+  generators, as generators involve storing `Future` types (with unknown
+  sizes) inside of structs.
+
+  In initial testing this seems to make no significant difference in
+  performance (our performance in an actual nixpkgs-eval is still bottlenecked
+  by I/O concerns and reference scanning), but is something to keep in mind
+  later on when we start optimising more after the low-hanging fruits have
+  been reaped.
+
+## Alternatives considered
+
+1. Tacking on more functionality onto the existing VM loop
+   implementation to accomodate problems as they show up. This is not
+   preferred as the code is already getting messy.
+
+2. Making tvix-eval a fully `async` project, pulling in something like Tokio
+   or `async-std` as a runtime. This is not preferred due to the massively
+   increased complexity of those solutions, and all the known issues of fully
+   buying in to the async ecosystem.
+
+   tvix-eval fundamentally should work for use-cases besides building Nix
+   packages (e.g. for `//tvix/serde`), and its profile should be as slim as
+   possible.
+
+3. Convincing the Rust developers that Rust needs a way to guarantee
+   constant-stack-depth tail calls through something like a `tailcall`
+   keyword.
+
+4. ... ?
+
+[`genawaiter`]: https://docs.rs/genawaiter/
diff --git a/tvix/docs/src/getting-started.md b/tvix/docs/src/getting-started.md
new file mode 100644
index 0000000000..1cbb6de7d4
--- /dev/null
+++ b/tvix/docs/src/getting-started.md
@@ -0,0 +1,59 @@
+# Getting Started
+
+## Getting the code, a developer shell, & building the CLI
+
+Tvix can be built with the Rust standard `cargo build`. A Nix shell is provided
+with the correctly-versioned tooling to build.
+
+### TVL monorepo
+
+```console
+$ git clone https://code.tvl.fyi/depot.git
+$ cd depot
+```
+
+[Direnv][] is highly recommended in order to enable [`mg`][mg], a tool for
+workflows in monorepos. Follow the [Direnv installation
+instructions][direnv-inst], then after it’s set up continue with:
+
+```console
+$ direnv allow
+$ mg shell //tvix:shell
+$ cd tvix
+$ cargo build
+```
+
+### Or just Tvix
+
+At present, this option isn’t suitable for contributions & lacks the tooling of
+the monorepo, but still provides a `shell.nix` which can be used for building
+the Tvix project.
+
+```console
+$ git clone https://code.tvl.fyi/depot.git:workspace=views/tvix.git
+$ cd tvix
+$ nix-shell
+$ cargo build
+```
+
+
+# Builds & tests
+
+All projects are built using [Nix][] to avoid ‘build pollution’ via the user’s
+local environment.
+
+If you have Nix installed and are contributing to a project tracked in this
+repository, you can usually build the project by calling `nix-build -A
+path.to.project`.
+
+For example, to build a project located at `//tools/foo` you would call
+`nix-build -A tools.foo`
+
+If the project has tests, check that they still work before submitting your
+change.
+
+
+[Direnv]: https://direnv.net
+[direnv-inst]: https://direnv.net/docs/installation.html
+[Nix]: https://nixos.org/nix/
+[mg]: https://code.tvl.fyi/tree/tools/magrathea
diff --git a/tvix/docs/src/language-spec.md b/tvix/docs/src/language-spec.md
index 0ff1dc491e..b3908b2cf4 100644
--- a/tvix/docs/src/language-spec.md
+++ b/tvix/docs/src/language-spec.md
@@ -1,8 +1,10 @@
 # Specification of the Nix Language
 
-WARNING: This document is a work in progress. Please keep an eye on
+```admonish attention
+This document is a work in progress. Please keep an eye on
 [`topic:nix-spec`](https://cl.tvl.fyi/q/topic:nix-spec) for ongoing
 CLs.
+```
 
 Nix is a general-purpose, functional programming language which this
 document aims to describe.
diff --git a/tvix/docs/src/nix-daemon/index.md b/tvix/docs/src/nix-daemon/index.md
new file mode 100644
index 0000000000..e47c20151e
--- /dev/null
+++ b/tvix/docs/src/nix-daemon/index.md
@@ -0,0 +1,15 @@
+# Nix Daemon Protocol
+
+The Nix Daemon protocol is what's used to communicate with the `nix-daemon`,
+either on the local system (in which case the communication happens via a Unix
+domain socket), or with a remote Nix (in which this is tunneled over SSH).
+
+It uses a custom binary format which isn't too documented. The subpages here
+collect serve as an in-depth detail about some of the inner workings, data types
+etc.
+
+A first implementation of this exists in
+[griff/Nix.rs](https://github.com/griff/Nix.rs/tree/main).
+
+Work is underway to port / factor this out into reusable building blocks into
+the [nix-compat] crate.
diff --git a/tvix/docs/src/store/api.md b/tvix/docs/src/store/api.md
new file mode 100644
index 0000000000..b20ec0e8bf
--- /dev/null
+++ b/tvix/docs/src/store/api.md
@@ -0,0 +1,287 @@
+# tvix-[ca]store API
+
+This document outlines the design of the API exposed by tvix-castore and tvix-
+store, as well as other implementations of this store protocol.
+
+This document is meant to be read side-by-side with
+[Data Model](../castore/data-model.md) which describes the data model
+in more detail.
+
+The store API has four main consumers:
+
+1. The evaluator (or more correctly, the CLI/coordinator, in the Tvix
+   case) communicates with the store to:
+
+   * Upload files and directories (e.g. from `builtins.path`, or `src = ./path`
+     Nix expressions).
+   * Read files from the store where necessary (e.g. when `nixpkgs` is
+     located in the store, or for IFD).
+
+2. The builder communicates with the store to:
+
+   * Upload files and directories after a build, to persist build artifacts in
+     the store.
+
+3. Tvix clients (such as users that have Tvix installed, or, depending
+   on perspective, builder environments) expect the store to
+   "materialise" on disk to provide a directory layout with store
+   paths.
+
+4. Stores may communicate with other stores, to substitute already built store
+   paths, i.e. a store acts as a binary cache for other stores.
+
+The store API attempts to reuse parts of its API between these three
+consumers by making similarities explicit in the protocol. This leads
+to a protocol that is slightly more complex than a simple "file
+upload/download"-system, but at significantly greater efficiency, both in terms
+of deduplication opportunities as well as granularity.
+
+## The Store model
+
+Contents inside a tvix-store can be grouped into three different message types:
+
+ * Blobs
+ * Directories
+ * PathInfo (see further down)
+
+(check `castore.md` for more detailed field descriptions)
+
+### Blobs
+A blob object contains the literal file contents of regular (or executable)
+files.
+
+### Directory
+A directory object describes the direct children of a directory.
+
+It contains:
+ - name of child (regular or executable) files, and their [blake3][blake3] hash.
+ - name of child symlinks, and their target (as string)
+ - name of child directories, and their [blake3][blake3] hash (forming a Merkle DAG)
+
+### Content-addressed Store Model
+For example, lets consider a directory layout like this, with some
+imaginary hashes of file contents:
+
+```
+.
+├── file-1.txt        hash: 5891b5b522d5df086d0ff0b110fb
+└── nested
+    └── file-2.txt    hash: abc6fd595fc079d3114d4b71a4d8
+```
+
+A hash for the *directory* `nested` can be created by creating the `Directory`
+object:
+
+```json
+{
+  "directories": [],
+  "files": [{
+    "name": "file-2.txt",
+    "digest": "abc6fd595fc079d3114d4b71a4d8",
+    "size": 123,
+  }],
+  "symlink": [],
+}
+```
+
+And then hashing a serialised form of that data structure. We use the blake3
+hash of the canonical protobuf representation. Let's assume the hash was
+`ff0029485729bcde993720749232`.
+
+To create the directory object one layer up, we now refer to our `nested`
+directory object in `directories`, and to `file-1.txt` in `files`:
+
+```json
+{
+  "directories": [{
+    "name": "nested",
+    "digest": "ff0029485729bcde993720749232",
+    "size": 1,
+  }],
+  "files": [{
+    "name": "file-1.txt",
+    "digest": "5891b5b522d5df086d0ff0b110fb",
+    "size": 124,
+  }]
+}
+```
+
+This Merkle DAG of Directory objects, and flat store of blobs can be used to
+describe any file/directory/symlink inside a store path. Due to its content-
+addressed nature, it'll automatically deduplicate (re-)used (sub)directories,
+and allow substitution from any (untrusted) source.
+
+The thing that's now only missing is the metadata to map/"mount" from the
+content-addressed world to a physical path.
+
+### PathInfo
+As most paths in the Nix store currently are input-addressed [^input-addressed],
+and the `tvix-castore` data model is also not intrinsically using NAR hashes,
+we need something mapping from an input-addressed "output path hash" (or a Nix-
+specific content-addressed path) to the contents in the `tvix-castore` world.
+
+That's what `PathInfo` provides. It embeds the root node (Directory, File or
+Symlink) at a given store path.
+
+The root nodes' `name` field is populated with the (base)name inside
+`/nix/store`, so `xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-pname-1.2.3`.
+
+The `PathInfo` message also stores references to other store paths, and some
+more NARInfo-specific metadata (signatures, narhash, narsize).
+
+
+## API overview
+
+There's three different services:
+
+### BlobService
+`BlobService` can be used to store and retrieve blobs of data, used to host
+regular file contents.
+
+It is content-addressed, using [blake3][blake3]
+as a hashing function.
+
+As blake3 is a tree hash, there's an opportunity to do
+[verified streaming][bao] of parts of the file,
+which doesn't need to trust any more information than the root hash itself.
+Future extensions of the `BlobService` protocol will enable this.
+
+### DirectoryService
+`DirectoryService` allows lookups (and uploads) of `Directory` messages, and
+whole reference graphs of them.
+
+
+### PathInfoService
+The PathInfo service provides lookups from a store path hash to a `PathInfo`
+message.
+
+## Example flows
+
+Below there are some common use cases of tvix-store, and how the different
+services are used.
+
+###  Upload files and directories
+This is needed for `builtins.path` or `src = ./path` in Nix expressions (A), as
+well as for uploading build artifacts to a store (B).
+
+The path specified needs to be (recursively, BFS-style) traversed.
+ * All file contents need to be hashed with blake3, and submitted to the
+   *BlobService* if not already present.
+   A reference to them needs to be added to the parent Directory object that's
+   constructed.
+ * All symlinks need to be added to the parent directory they reside in.
+ * Whenever a Directory has been fully traversed, it needs to be uploaded to
+   the *DirectoryService* and a reference to it needs to be added to the parent
+   Directory object.
+
+Most of the hashing / directory traversal/uploading can happen in parallel,
+as long as Directory objects only refer to Directory objects and Blobs that
+have already been uploaded.
+
+When reaching the root, a `PathInfo` object needs to be constructed.
+
+ * In the case of content-addressed paths (A), the name of the root node is
+   based on the NAR representation of the contents.
+   It might make sense to be able to offload the NAR calculation to the store,
+   which can cache it.
+ * In the case of build artifacts (B), the output path is input-addressed and
+   known upfront.
+
+Contrary to Nix, this has the advantage of not having to upload a lot of things
+to the store that didn't change.
+
+### Reading files from the store from the evaluator
+This is the case when `nixpkgs` is located in the store, or IFD in general.
+
+The store client asks the `PathInfoService` for the `PathInfo` of the output
+path in the request, and looks at the root node.
+
+If something other than the root of the store path is requested, like for
+example `maintainers/maintainer-list.nix`, the root_node Directory is inspected
+and potentially a chain of `Directory` objects requested from
+*DirectoryService*. [^n+1query].
+
+When the desired file is reached, the *BlobService* can be used to read the
+contents of this file, and return it back to the evaluator.
+
+FUTUREWORK: Define how importing from symlinks should/does work.
+
+Contrary to Nix, this has the advantage of not having to copy all of the
+contents of a store path to the evaluating machine, but really only fetching
+the files the evaluator currently cares about.
+
+### Materializing store paths on disk
+This is useful for people running a Tvix-only system, or running builds on a
+"Tvix remote builder" in its own mount namespace.
+
+In a system with Nix installed, we can't simply manually "extract" things to
+`/nix/store`, as Nix assumes to own all writes to this location.
+In these use cases, we're probably better off exposing a tvix-store as a local
+binary cache (that's what `//tvix/nar-bridge-go` does).
+
+Assuming we are in an environment where we control `/nix/store` exclusively, a
+"realize to disk" would either "extract" things from the `tvix-store` to a
+filesystem, or expose a `FUSE`/`virtio-fs` filesystem.
+
+The latter is already implemented, and particularly interesting for (remote)
+build workloads, as build inputs can be realized on-demand, which saves copying
+around a lot of never- accessed files.
+
+In both cases, the API interactions are similar.
+ * The *PathInfoService* is asked for the `PathInfo` of the requested store path.
+ * If everything should be "extracted", the *DirectoryService* is asked for all
+   `Directory` objects in the closure, the file structure is created, all Blobs
+   are downloaded and placed in their corresponding location and all symlinks
+   are created accordingly.
+ * If this is a FUSE filesystem, we can decide to only request a subset,
+   similar to the "Reading files from the store from the evaluator" use case,
+   even though it might make sense to keep all Directory objects around.
+   (See the caveat in "Trust model" though!)
+
+### Stores communicating with other stores
+The gRPC API exposed by the tvix-store allows composing multiple stores, and
+implementing some caching strategies, that store clients don't need to be aware
+of.
+
+ * For example, a caching strategy could have a fast local tvix-store, that's
+   asked first and filled with data from a slower remote tvix-store.
+
+ * Multiple stores could be asked for the same data, and whatever store returns
+   the right data first wins.
+
+
+## Trust model / Distribution
+As already described above, the only non-content-addressed service is the
+`PathInfo` service.
+
+This means, all other messages (such as `Blob` and `Directory` messages) can be
+substituted from many different, untrusted sources/mirrors, which will make
+plugging in additional substitution strategies like IPFS, local network
+neighbors super simple. That's also why it's living in the `tvix-castore` crate.
+
+As for `PathInfo`, we don't specify an additional signature mechanism yet, but
+carry the NAR-based signatures from Nix along.
+
+This means, if we don't trust a remote `PathInfo` object, we currently need to
+"stream" the NAR representation to validate these signatures.
+
+However, the slow part is downloading of NAR files, and considering we have
+more granularity available, we might only need to download some small blobs,
+rather than a whole NAR file.
+
+A future signature mechanism, that is only signing (parts of) the `PathInfo`
+message, which only points to content-addressed data will enable verified
+partial access into a store path, opening up opportunities for lazy filesystem
+access etc.
+
+
+
+[blake3]: https://github.com/BLAKE3-team/BLAKE3
+[bao]: https://github.com/oconnor663/bao
+[^input-addressed]: Nix hashes the A-Term representation of a .drv, after doing
+                    some replacements on refered Input Derivations to calculate
+                    output paths.
+[^n+1query]: This would expose an N+1 query problem. However it's not a problem
+             in practice, as there's usually always a "local" caching store in
+             the loop, and *DirectoryService* supports a recursive lookup for
+             all `Directory` children of a `Directory`
diff --git a/tvix/docs/src/value-pointer-equality.md b/tvix/docs/src/value-pointer-equality.md
index d84efcb50c..a4539513ef 100644
--- a/tvix/docs/src/value-pointer-equality.md
+++ b/tvix/docs/src/value-pointer-equality.md
@@ -47,8 +47,10 @@ works in C++ Nix, the only production ready Nix implementation currently availab
 
 ## Nix (Pointer) Equality in C++ Nix
 
-TIP: The summary presented here is up-to-date as of 2023-06-27 and was tested
-with Nix 2.3, 2.11 and 2.15.
+```admonish info
+The summary presented here is up-to-date as of 2023-06-27 and was tested with
+Nix 2.3, 2.11 and 2.15.
+```
 
 ### `EvalState::eqValues` and `ExprOpEq::eval`