about summary refs log tree commit diff
path: root/users/Profpatsch
diff options
context:
space:
mode:
Diffstat (limited to 'users/Profpatsch')
-rw-r--r--users/Profpatsch/OWNERS4
-rw-r--r--users/Profpatsch/advent-of-code/2020/01/main.py22
-rw-r--r--users/Profpatsch/advent-of-code/2020/02/main.py77
-rw-r--r--users/Profpatsch/advent-of-code/2020/03/main.py66
-rw-r--r--users/Profpatsch/advent-of-code/2020/04/main.py104
-rw-r--r--users/Profpatsch/aerc-no-config-perms.patch12
-rw-r--r--users/Profpatsch/aerc.dhall157
-rw-r--r--users/Profpatsch/aerc.nix51
-rw-r--r--users/Profpatsch/alacritty.dhall48
-rw-r--r--users/Profpatsch/alacritty.nix34
-rw-r--r--users/Profpatsch/aliases.nix75
-rw-r--r--users/Profpatsch/arglib/netencode.nix42
-rw-r--r--users/Profpatsch/atomically-write.nix29
-rw-r--r--users/Profpatsch/blog/default.nix472
-rw-r--r--users/Profpatsch/blog/notes/an-idealized-conflang.md298
-rw-r--r--users/Profpatsch/blog/notes/preventing-oom.md33
-rw-r--r--users/Profpatsch/blog/notes/rust-string-conversions.md53
-rw-r--r--users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md123
-rw-r--r--users/Profpatsch/cdb.nix93
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/default.nix3
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/shell.nix17
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/test.json14
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/test.py13
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/test.sh14
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/tmp.el28
-rw-r--r--users/Profpatsch/emacs-tree-sitter-move/tree-sitter-move.el139
-rw-r--r--users/Profpatsch/exactSource.nix90
-rw-r--r--users/Profpatsch/execline/default.nix37
-rw-r--r--users/Profpatsch/execline/exec_helpers.rs149
-rw-r--r--users/Profpatsch/git-db/default.nix10
-rw-r--r--users/Profpatsch/git-db/git-db.rs90
-rw-r--r--users/Profpatsch/imap-idle.nix17
-rw-r--r--users/Profpatsch/imap-idle.rs140
-rw-r--r--users/Profpatsch/importDhall.nix93
-rw-r--r--users/Profpatsch/lens.nix137
-rw-r--r--users/Profpatsch/lib.nix108
-rw-r--r--users/Profpatsch/netencode/README.md115
-rw-r--r--users/Profpatsch/netencode/default.nix160
-rw-r--r--users/Profpatsch/netencode/gen.nix73
-rw-r--r--users/Profpatsch/netencode/netencode-mustache.rs52
-rw-r--r--users/Profpatsch/netencode/netencode.rs891
-rw-r--r--users/Profpatsch/netencode/pretty.rs163
-rw-r--r--users/Profpatsch/netstring/README.md18
-rw-r--r--users/Profpatsch/netstring/default.nix69
-rw-r--r--users/Profpatsch/netstring/tests/default.nix64
-rw-r--r--users/Profpatsch/nix-home/default.nix203
-rw-r--r--users/Profpatsch/nixpkgs-rewriter/MetaStdenvLib.hs80
-rw-r--r--users/Profpatsch/nixpkgs-rewriter/default.nix148
-rw-r--r--users/Profpatsch/read-http.nix19
-rw-r--r--users/Profpatsch/read-http.rs249
-rw-r--r--users/Profpatsch/reverse-haskell-deps.hs72
-rw-r--r--users/Profpatsch/reverse-haskell-deps.nix31
-rw-r--r--users/Profpatsch/solarized.dhall39
-rw-r--r--users/Profpatsch/struct-edit/default.nix13
-rw-r--r--users/Profpatsch/struct-edit/main.go431
-rw-r--r--users/Profpatsch/toINI.nix79
-rw-r--r--users/Profpatsch/tree-sitter.nix209
-rw-r--r--users/Profpatsch/writers/default.nix97
-rw-r--r--users/Profpatsch/writers/tests/default.nix56
-rw-r--r--users/Profpatsch/ytextr/create-symlink-farm.nix19
-rw-r--r--users/Profpatsch/ytextr/default.nix82
61 files changed, 6324 insertions, 0 deletions
diff --git a/users/Profpatsch/OWNERS b/users/Profpatsch/OWNERS
new file mode 100644
index 000000000000..5a73d4c3a1fd
--- /dev/null
+++ b/users/Profpatsch/OWNERS
@@ -0,0 +1,4 @@
+inherited: false
+owners:
+  - Profpatsch
+  - sterni
diff --git a/users/Profpatsch/advent-of-code/2020/01/main.py b/users/Profpatsch/advent-of-code/2020/01/main.py
new file mode 100644
index 000000000000..e636017a54d5
--- /dev/null
+++ b/users/Profpatsch/advent-of-code/2020/01/main.py
@@ -0,0 +1,22 @@
+import sys
+
+l = []
+with open('./input', 'r') as f:
+    for line in f:
+        l.append(int(line))
+
+s = set(l)
+
+res=None
+for el in s:
+    for el2 in s:
+        if (2020-(el+el2)) in s:
+            res=(el, el2, 2020-(el+el2))
+            break
+
+if res == None:
+    sys.exit("could not find a number that adds to 2020")
+
+print(res)
+
+print(res[0] * res[1] * res[2])
diff --git a/users/Profpatsch/advent-of-code/2020/02/main.py b/users/Profpatsch/advent-of-code/2020/02/main.py
new file mode 100644
index 000000000000..e3b27c382a21
--- /dev/null
+++ b/users/Profpatsch/advent-of-code/2020/02/main.py
@@ -0,0 +1,77 @@
+import sys
+
+def parse(line):
+    a = line.split(sep=" ", maxsplit=1)
+    assert len(a) == 2
+    fromto = a[0].split(sep="-")
+    assert len(fromto) == 2
+    (from_, to) = (int(fromto[0]), int(fromto[1]))
+    charpass = a[1].split(sep=": ")
+    assert len(charpass) == 2
+    char = charpass[0]
+    assert len(char) == 1
+    pass_ = charpass[1]
+    assert pass_.endswith("\n")
+    pass_ = pass_[:-1]
+    return {
+        "from": from_,
+        "to": to,
+        "char": char,
+        "pass": pass_
+    }
+
+def char_in_pass(char, pass_):
+    return pass_.count(char)
+
+def validate_01(entry):
+    no = char_in_pass(entry["char"], entry["pass"])
+    if no < entry["from"]:
+        return { "too-small": entry }
+    elif no > entry["to"]:
+        return { "too-big": entry }
+    else:
+        return { "ok": entry }
+
+def char_at_pos(char, pos, pass_):
+    assert pos <= len(pass_)
+    return pass_[pos-1] == char
+
+def validate_02(entry):
+    one = char_at_pos(entry["char"], entry["from"], entry["pass"])
+    two = char_at_pos(entry["char"], entry["to"], entry["pass"])
+    if one and two:
+        return { "both": entry }
+    elif one:
+        return { "one": entry }
+    elif two:
+        return { "two": entry }
+    else:
+        return { "none": entry }
+
+
+res01 = []
+res02 = []
+with open("./input", 'r') as f:
+    for line in f:
+        p = parse(line)
+        res01.append(validate_01(p))
+        res02.append(validate_02(p))
+
+count01=0
+for r in res01:
+    print(r)
+    if r.get("ok", False):
+        count01=count01+1
+
+count02=0
+for r in res02:
+    print(r)
+    if r.get("one", False):
+        count02=count02+1
+    elif r.get("two", False):
+        count02=count02+1
+    else:
+        pass
+
+print("count 1: {}".format(count01))
+print("count 2: {}".format(count02))
diff --git a/users/Profpatsch/advent-of-code/2020/03/main.py b/users/Profpatsch/advent-of-code/2020/03/main.py
new file mode 100644
index 000000000000..4d6baf946c3e
--- /dev/null
+++ b/users/Profpatsch/advent-of-code/2020/03/main.py
@@ -0,0 +1,66 @@
+import itertools
+import math
+
+def tree_line(init):
+    return {
+        "init-len": len(init),
+        "known": '',
+        "rest": itertools.repeat(init)
+    }
+
+def tree_line_at(pos, tree_line):
+    needed = (pos + 1) - len(tree_line["known"])
+    # internally advance the tree line to the position requested
+    if needed > 0:
+        tree_line["known"] = tree_line["known"] \
+          + ''.join(
+            itertools.islice(
+                tree_line["rest"],
+                1+math.floor(needed / tree_line["init-len"])))
+    # print(tree_line)
+    return tree_line["known"][pos] == '#'
+
+def tree_at(linepos, pos, trees):
+    return tree_line_at(pos, trees[linepos])
+
+def slope_positions(trees, right, down):
+    line = 0
+    pos = 0
+    while line < len(trees):
+        yield (line, pos)
+        line = line + down
+        pos = pos + right
+
+trees = []
+with open("./input", 'r') as f:
+    for line in f:
+        line = line.rstrip()
+        trees.append(tree_line(line))
+
+# print(list(itertools.islice(trees[0], 5)))
+# print(list(map(
+#     lambda x: tree_at(0, x, trees),
+#     range(100)
+# )))
+# print(list(slope_positions(trees, right=3, down=1)))
+
+def count_slope_positions(trees, slope):
+    count = 0
+    for (line, pos) in slope:
+        if tree_at(line, pos, trees):
+            count = count + 1
+    return count
+
+print(
+        count_slope_positions(trees, slope_positions(trees, right=1, down=1))
+    *
+        count_slope_positions(trees, slope_positions(trees, right=3, down=1))
+    *
+        count_slope_positions(trees, slope_positions(trees, right=5, down=1))
+    *
+        count_slope_positions(trees, slope_positions(trees, right=7, down=1))
+    *
+        count_slope_positions(trees, slope_positions(trees, right=1, down=2))
+)
+
+# I realized I could have just used a modulo instead …
diff --git a/users/Profpatsch/advent-of-code/2020/04/main.py b/users/Profpatsch/advent-of-code/2020/04/main.py
new file mode 100644
index 000000000000..36bbed7146d6
--- /dev/null
+++ b/users/Profpatsch/advent-of-code/2020/04/main.py
@@ -0,0 +1,104 @@
+import sys
+import itertools
+import re
+import pprint
+
+def get_entry(fd):
+    def to_dict(keyval):
+        res = {}
+        for (k, v) in keyval:
+            assert k not in res
+            res[k] = v
+        return res
+
+    res = []
+    for line in fd:
+        if line == "\n":
+            yield to_dict(res)
+            res = []
+        else:
+            line = line.rstrip()
+            items = line.split(" ")
+            for i in items:
+                res.append(i.split(":", maxsplit=2))
+
+def val_hgt(hgt):
+    m = re.fullmatch(r'([0-9]+)(cm|in)', hgt)
+    if m:
+        (i, what) = m.group(1,2)
+        i = int(i)
+        if what == "cm":
+            return i >= 150 and i <= 193
+        elif what == "in":
+            return i >= 59 and i <= 76
+        else:
+            return False
+
+required_fields = [
+    { "name": "byr",
+      "check": lambda s: int(s) >= 1920 and int(s) <= 2002
+    },
+    { "name": "iyr",
+      "check": lambda s: int(s) >= 2010 and int(s) <= 2020
+    },
+    { "name": "eyr",
+      "check": lambda s: int(s) >= 2020 and int(s) <= 2030,
+    },
+    { "name": "hgt",
+      "check": lambda s: val_hgt(s)
+    },
+    { "name": "hcl",
+      "check": lambda s: re.fullmatch(r'#[0-9a-f]{6}', s)
+    },
+    { "name": "ecl",
+      "check": lambda s: re.fullmatch(r'amb|blu|brn|gry|grn|hzl|oth', s)
+    },
+    { "name": "pid",
+      "check": lambda s: re.fullmatch(r'[0-9]{9}', s)
+    },
+    # we should treat it as not required
+    # "cid"
+]
+
+required_dict = {}
+for f in required_fields:
+    required_dict[f["name"]] = f
+
+def validate(keyval):
+    if keyval[0] not in required_dict:
+        return { "ok": keyval }
+    if required_dict[keyval[0]]["check"](keyval[1]):
+        return { "ok": keyval }
+    else:
+        return { "validation": keyval }
+
+def all_fields(entry):
+    missing = []
+    for r in required_dict:
+        if r not in e:
+            missing.append(r)
+    if missing == []:
+        return { "ok": entry }
+    else:
+        return { "missing": missing }
+
+count=0
+for e in get_entry(sys.stdin):
+    a = all_fields(e)
+    if a.get("ok", False):
+        res = {}
+        bad = False
+        for keyval in e.items():
+            r = validate(keyval)
+            if r.get("validation", False):
+                bad = True
+            res[keyval[0]] = r
+        if bad:
+            pprint.pprint({ "validation": res })
+        else:
+            pprint.pprint({ "ok": e })
+            count = count+1
+    else:
+        pprint.pprint(a)
+
+print(count)
diff --git a/users/Profpatsch/aerc-no-config-perms.patch b/users/Profpatsch/aerc-no-config-perms.patch
new file mode 100644
index 000000000000..86b41cd74b08
--- /dev/null
+++ b/users/Profpatsch/aerc-no-config-perms.patch
@@ -0,0 +1,12 @@
+diff --git a/config/config.go b/config/config.go
+index 0472daf..5eed379 100644
+--- a/config/config.go
++++ b/config/config.go
+@@ -779,6 +779,7 @@ func (config *AercConfig) LoadBinds(binds *ini.File, baseName string, baseGroup
+ // checkConfigPerms checks for too open permissions
+ // printing the fix on stdout and returning an error
+ func checkConfigPerms(filename string) error {
++        return nil;
+ 	info, err := os.Stat(filename)
+ 	if err != nil {
+ 		return nil // disregard absent files
diff --git a/users/Profpatsch/aerc.dhall b/users/Profpatsch/aerc.dhall
new file mode 100644
index 000000000000..fb63f7044be8
--- /dev/null
+++ b/users/Profpatsch/aerc.dhall
@@ -0,0 +1,157 @@
+let NameVal = λ(T : Type) → { name : Text, value : T }
+
+in  λ ( imports
+      : { -- Take an aerc filter from the aerc distribution /share directory
+          aercFilter : Text → Text
+        , -- given a dsl of functions to create an Ini, render the ini file
+          toIni :
+            { globalSection : List (NameVal Text)
+            , sections : List (NameVal (List (NameVal Text)))
+            } →
+              Text
+        }
+      ) →
+      let List/map
+          : ∀(a : Type) → ∀(b : Type) → (a → b) → List a → List b
+          = λ(a : Type) →
+            λ(b : Type) →
+            λ(f : a → b) →
+            λ(xs : List a) →
+              List/build
+                b
+                ( λ(list : Type) →
+                  λ(cons : b → list → list) →
+                    List/fold a xs list (λ(x : a) → cons (f x))
+                )
+
+      in  { accounts =
+              imports.toIni
+                { globalSection = [] : List (NameVal Text)
+                , sections =
+                  [ { name = "mail"
+                    , value =
+                      [ { name = "archive", value = "Archive" }
+                      , { name = "copy-to", value = "Sent" }
+                      , { name = "default", value = "INBOX" }
+                      , { name = "from"
+                        , value = "Profpatsch <mail@profpatsch.de>"
+                        }
+                      , { name = "source", value = "maildir://~/.Mail/mail" }
+                      , { name = "postpone", value = "Drafts" }
+                      ]
+                    }
+                  ]
+                }
+          , aerc =
+              imports.toIni
+                { globalSection = [] : List (NameVal Text)
+                , sections =
+                  [ { name = "filters"
+                    , value =
+                      [ { name = "text/html"
+                        , value = imports.aercFilter "html"
+                        }
+                      , let _ = "-- TODO: this awk should be taken from nix!"
+
+                        in  { name = "text/*"
+                            , value = "awk -f ${imports.aercFilter "plaintext"}"
+                            }
+                      ]
+                    }
+                  ]
+                }
+          , binds =
+              let
+                  -- keybinding and command to run
+                  Key =
+                    { ctrl : Bool, key : Text, cmd : Text }
+
+              in  let
+                      -- render a key to config format
+                      renderKey =
+                        λ(k : Key) →
+                          if    k.ctrl
+                          then  { name = "<C-${k.key}>", value = k.cmd }
+                          else  { name = k.key, value = k.cmd }
+
+                  let
+
+                      -- render a list of keys to config format
+                      renderKeys =
+                        λ(keys : List Key) →
+                          List/map Key (NameVal Text) renderKey keys
+
+                  let
+                      -- create a section whith a name and a list of keys
+                      sect =
+                        λ(section : Text) →
+                        λ(keys : List Key) →
+                          { name = section, value = renderKeys keys }
+
+                  let
+
+                      -- set key without modifiers
+                      key =
+                        λ(key : Text) → { key }
+
+                  let
+                      -- set special key without modifiers
+                      special =
+                        λ(key : Text) → { key = "<${key}>" }
+
+                  let
+                      -- no modifier
+                      none =
+                        { ctrl = False }
+
+                  let
+                      -- set control key
+                      ctrl =
+                        { ctrl = True }
+
+                  let
+                      -- set a command to execute
+                      cmd =
+                        λ(cmd : Text) → { cmd = ":${cmd}<Enter>" }
+
+                  let
+                      -- set a command, but stay on the prompt
+                      prompt =
+                        λ(cmd : Text) → { cmd = ":${cmd}<Space>" }
+
+                  let config =
+                        { globalSection =
+                            renderKeys
+                              [ ctrl ∧ key "p" ∧ cmd "prev-tab"
+                              , ctrl ∧ key "n" ∧ cmd "next-tab"
+                              , ctrl ∧ key "t" ∧ cmd "term"
+                              ]
+                        , sections =
+                          [ sect
+                              "messages"
+                              [ ctrl ∧ key "q" ∧ cmd "quit"
+                              , none ∧ special "Up" ∧ cmd "prev"
+                              , none ∧ special "Down" ∧ cmd "next"
+                              , none ∧ special "PgUp" ∧ cmd "prev 100%"
+                              , none ∧ special "PgDn" ∧ cmd "next 100%"
+                              , none ∧ key "g" ∧ cmd "select 0"
+                              , none ∧ key "G" ∧ cmd "select -1"
+                              , ctrl ∧ key "Up" ∧ cmd "prev-folder"
+                              , ctrl ∧ key "Down" ∧ cmd "next-folder"
+                              , none ∧ key "v" ∧ cmd "mark -t"
+                              , none ∧ key "V" ∧ cmd "mark -v"
+                              , none ∧ special "Enter" ∧ cmd "view"
+                              , none ∧ key "c" ∧ cmd "compose"
+                              , none ∧ key "|" ∧ prompt "pipe"
+                              , none ∧ key "t" ∧ prompt "term"
+                              , none ∧ key "/" ∧ prompt "search"
+                              , none ∧ key "n" ∧ cmd "next-result"
+                              , none ∧ key "N" ∧ cmd "prev-result"
+                              , none ∧ special "Esc" ∧ cmd "clear"
+                              ]
+                          , sect "view" [ none ∧ key "q" ∧ cmd "close" ]
+                          ]
+                        }
+
+                  in  imports.toIni config
+          }
diff --git a/users/Profpatsch/aerc.nix b/users/Profpatsch/aerc.nix
new file mode 100644
index 000000000000..569f045a00dc
--- /dev/null
+++ b/users/Profpatsch/aerc.nix
@@ -0,0 +1,51 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  aerc-patched = pkgs.aerc.overrideAttrs (old: {
+    patches = old.patches or [ ] ++ [
+      ./aerc-no-config-perms.patch
+    ];
+  });
+
+  bins = depot.nix.getBins aerc-patched [ "aerc" ];
+
+  config =
+    depot.users.Profpatsch.importDhall.importDhall
+      {
+        root = ./.;
+        files = [
+          "aerc.dhall"
+        ];
+        main = "aerc.dhall";
+        deps = [ ];
+      }
+      {
+        aercFilter = name: "${aerc-patched}/share/aerc/filters/${name}";
+        toIni = depot.users.Profpatsch.toINI { };
+      };
+
+  aerc-config = pkgs.linkFarm "alacritty-config" [
+    {
+      name = "aerc/accounts.conf";
+      path = pkgs.writeText "accounts.conf" config.accounts;
+    }
+    {
+      name = "aerc/aerc.conf";
+      path = pkgs.writeText "aerc.conf" config.aerc;
+    }
+    {
+      name = "aerc/binds.conf";
+      path = pkgs.writeText "binds.conf" config.binds;
+    }
+  ];
+
+  aerc = depot.nix.writeExecline "aerc" { } [
+    "export"
+    "XDG_CONFIG_HOME"
+    aerc-config
+    bins.aerc
+    "$@"
+  ];
+
+in
+aerc
diff --git a/users/Profpatsch/alacritty.dhall b/users/Profpatsch/alacritty.dhall
new file mode 100644
index 000000000000..b4d99c8294b2
--- /dev/null
+++ b/users/Profpatsch/alacritty.dhall
@@ -0,0 +1,48 @@
+let sol = (./solarized.dhall).hex
+
+let black = "#000000"
+
+let white = "#ffffff"
+
+let
+    -- todo: this looks not too good
+    solarized-dark =
+      { --Colors (Solarized Dark)
+        colors =
+        { -- Default colors
+          primary =
+          { background = black, foreground = white }
+        , -- Cursor colors
+          cursor =
+          { text = sol.base03, cursor = sol.base0 }
+        , -- Normal colors
+          normal =
+          { black = sol.base02
+          , red = sol.red
+          , green = sol.green
+          , yellow = sol.yellow
+          , blue = sol.blue
+          , magenta = sol.magenta
+          , cyan = sol.cyan
+          , white = sol.base2
+          }
+        , -- Bright colors
+          bright =
+          { black = sol.base03
+          , red = sol.orange
+          , green = sol.base01
+          , yellow = sol.base00
+          , blue = sol.base0
+          , magenta = sol.violet
+          , cyan = sol.base1
+          , white = sol.base3
+          }
+        }
+      }
+
+in  { alacritty-config = { font.size = 18, scolling.history = 100000 }
+    ,   -- This disables the dpi-sensitive scaling (cause otherwise the font will be humongous on my laptop screen)
+        alacritty-env
+      . WINIT_X11_SCALE_FACTOR
+      = 1
+    }
diff --git a/users/Profpatsch/alacritty.nix b/users/Profpatsch/alacritty.nix
new file mode 100644
index 000000000000..d2cb8de2fc86
--- /dev/null
+++ b/users/Profpatsch/alacritty.nix
@@ -0,0 +1,34 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  bins = depot.nix.getBins pkgs.alacritty [ "alacritty" ];
+
+  config =
+    depot.users.Profpatsch.importDhall.importDhall {
+      root = ./.;
+      files = [
+        "alacritty.dhall"
+        "solarized.dhall"
+      ];
+      main = "alacritty.dhall";
+      deps = [ ];
+    };
+
+  config-file = lib.pipe config.alacritty-config [
+    (lib.generators.toYAML { })
+    (pkgs.writeText "alacritty.conf")
+  ];
+
+
+  alacritty = depot.nix.writeExecline "alacritty" { } (
+    (lib.concatLists (lib.mapAttrsToList (k: v: [ "export" k (toString v) ]) config.alacritty-env))
+    ++ [
+      bins.alacritty
+      "--config-file"
+      config-file
+      "$@"
+    ]
+  );
+
+in
+alacritty
diff --git a/users/Profpatsch/aliases.nix b/users/Profpatsch/aliases.nix
new file mode 100644
index 000000000000..6a1c2c1a63c3
--- /dev/null
+++ b/users/Profpatsch/aliases.nix
@@ -0,0 +1,75 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  bins = depot.nix.getBins pkgs.findutils [ "find" ];
+
+in
+depot.nix.readTree.drvTargets {
+
+  findia = depot.nix.writeExecline "findia"
+    {
+      readNArgs = 1;
+      # TODO: comment out, thanks to sterni blocking the runExecline change
+      # meta.description = ''
+      #   Find case-insensitive anywhere (globbing)
+
+      #   Usage: findia <pattern> <more find(1) arguments>
+      # '';
+    } [
+    bins.find
+    "-iname"
+    "*\${1}*"
+    "$@"
+  ];
+
+  findial = depot.nix.writeExecline "findial"
+    {
+      readNArgs = 1;
+      # TODO: comment out, thanks to sterni blocking the runExecline change
+      # meta.description = ''
+      #   Find case-insensitive anywhere (globbing), follow symlinks";
+
+      #   Usage: findial <pattern> <more find(1) arguments>
+      # '';
+    } [
+    bins.find
+    "-L"
+    "-iname"
+    "*\${1}*"
+    "$@"
+  ];
+
+  findian = depot.nix.writeExecline "findian"
+    {
+      readNArgs = 2;
+      # TODO: comment out, thanks to sterni blocking the runExecline change
+      # meta.description = ''
+      #   Find case-insensitive anywhere (globbing) in directory
+
+      #   Usage: findian <directory> <pattern> <more find(1) arguments>
+      # '';
+    } [
+    bins.find
+    "$1"
+    "-iname"
+    "*\${2}*"
+    "$@"
+  ];
+
+  findiap = depot.nix.writeExecline "findiap"
+    {
+      readNArgs = 2;
+      # TODO: comment out, thanks to sterni blocking the runExecline change
+      # meta.description = ''
+      #   Find case-insensitive anywhere (globbing) in directory, the pattern allows for paths.
+
+      #   Usage: findiap <directory> <pattern> <more find(1) arguments>
+      # '';
+    } [
+    bins.find
+    "$1"
+    "-ipath"
+    "*\${2}*"
+    "$@"
+  ];
+}
diff --git a/users/Profpatsch/arglib/netencode.nix b/users/Profpatsch/arglib/netencode.nix
new file mode 100644
index 000000000000..3f1d121e5132
--- /dev/null
+++ b/users/Profpatsch/arglib/netencode.nix
@@ -0,0 +1,42 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  netencode = {
+    rust = depot.nix.writers.rustSimpleLib
+      {
+        name = "arglib-netencode";
+        dependencies = [
+          depot.users.Profpatsch.execline.exec-helpers
+          depot.users.Profpatsch.netencode.netencode-rs
+        ];
+      } ''
+      extern crate netencode;
+      extern crate exec_helpers;
+
+      use netencode::{T};
+      use std::os::unix::ffi::OsStrExt;
+
+      pub fn arglib_netencode(prog_name: &str, env: Option<&std::ffi::OsStr>) -> T {
+          let env = match env {
+              None => std::ffi::OsStr::from_bytes("ARGLIB_NETENCODE".as_bytes()),
+              Some(a) => a
+          };
+          let t = match std::env::var_os(env) {
+              None => exec_helpers::die_user_error(prog_name, format!("could not read args, envvar {} not set", env.to_string_lossy())),
+              // TODO: good error handling for the different parser errors
+              Some(soup) => match netencode::parse::t_t(soup.as_bytes()) {
+                  Ok((remainder, t)) => match remainder.is_empty() {
+                      true => t,
+                      false => exec_helpers::die_environment_problem(prog_name, format!("arglib: there was some unparsed bytes remaining: {:?}", remainder))
+                  },
+                  Err(err) => exec_helpers::die_environment_problem(prog_name, format!("arglib parsing error: {:?}", err))
+              }
+          };
+          std::env::remove_var(env);
+          t
+      }
+    '';
+  };
+
+in
+depot.nix.readTree.drvTargets netencode
diff --git a/users/Profpatsch/atomically-write.nix b/users/Profpatsch/atomically-write.nix
new file mode 100644
index 000000000000..c4d07cfbb1fa
--- /dev/null
+++ b/users/Profpatsch/atomically-write.nix
@@ -0,0 +1,29 @@
+{ depot, pkgs, ... }:
+# Atomically write a file (just `>` redirection in bash
+# empties a file even if the command crashes).
+#
+# Maybe there is an existing tool for that?
+# But it’s easy enough to implement.
+#
+# Example:
+#   atomically-write
+#     ./to
+#     echo "foo"
+#
+# will atomically write the string "foo" into ./to
+let
+  atomically-write = pkgs.writers.writeDash "atomically-write" ''
+    set -e
+    to=$1
+    shift
+    # assumes that the tempfile is on the same file system, (or in memory)
+    # for the `mv` at the end to be more-or-less atomic.
+    tmp=$(${pkgs.coreutils}/bin/mktemp -d)
+    trap 'rm -r "$tmp"' EXIT
+    "$@" \
+      > "$tmp/out"
+    mv "$tmp/out" "$to"
+  '';
+
+in
+atomically-write
diff --git a/users/Profpatsch/blog/default.nix b/users/Profpatsch/blog/default.nix
new file mode 100644
index 000000000000..9848d83c5627
--- /dev/null
+++ b/users/Profpatsch/blog/default.nix
@@ -0,0 +1,472 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  bins = depot.nix.getBins pkgs.lowdown [ "lowdown" ]
+    // depot.nix.getBins pkgs.cdb [ "cdbget" "cdbmake" "cdbdump" ]
+    // depot.nix.getBins pkgs.coreutils [ "mv" "cat" "printf" "test" ]
+    // depot.nix.getBins pkgs.s6-networking [ "s6-tcpserver" ]
+    // depot.nix.getBins pkgs.time [ "time" ]
+  ;
+
+  # /
+  # TODO: use
+  toplevel = [
+    {
+      route = [ "notes" ];
+      name = "Notes";
+      page = { cssFile }: router cssFile;
+    }
+    {
+      route = [ "projects" ];
+      name = "Projects";
+      # page = projects;
+    }
+  ];
+
+  # /notes/*
+  notes = [
+    {
+      route = [ "notes" "an-idealized-conflang" ];
+      name = "An Idealized Configuration Language";
+      page = { cssFile }: markdownToHtml {
+        name = "an-idealized-conflang";
+        markdown = ./notes/an-idealized-conflang.md;
+        inherit cssFile;
+      };
+    }
+    {
+      route = [ "notes" "rust-string-conversions" ];
+      name = "Converting between different String types in Rust";
+      page = { cssFile }: markdownToHtml {
+        name = "rust-string-conversions";
+        markdown = ./notes/rust-string-conversions.md;
+        inherit cssFile;
+      };
+    }
+    {
+      route = [ "notes" "preventing-oom" ];
+      name = "Preventing out-of-memory (OOM) errors on Linux";
+      page = { cssFile }: markdownToHtml {
+        name = "preventing-oom";
+        markdown = ./notes/preventing-oom.md;
+        inherit cssFile;
+      };
+    }
+  ];
+
+  projects = [
+    {
+      name = "lorri";
+      description = "<code>nix-shell</code> replacement for projects";
+      link = "https://github.com/nix-community/lorri";
+    }
+    {
+      name = "netencode";
+      description = ''A human-readble nested data exchange format inspired by <a href="https://en.wikipedia.org/wiki/Netstring">netstrings</a> and <a href="https://en.wikipedia.org/wiki/Bencode">bencode</a>.'';
+      link = depotCgitLink { relativePath = "users/Profpatsch/netencode/README.md"; };
+    }
+    {
+      name = "yarn2nix";
+      description = ''nix dependency generator for the <a href="https://yarnpkg.com/"><code>yarn</code> Javascript package manager</a>'';
+      link = "https://github.com/Profpatsch/yarn2nix";
+    }
+  ];
+
+  posts = [
+    {
+      date = "2017-05-04";
+      title = "Ligature Emulation in Emacs";
+      subtitle = "It’s not pretty, but the results are";
+      description = "How to set up ligatures using <code>prettify-symbols-mode</code> and the Hasklig/FiraCode fonts.";
+      page = { cssFile }: markdownToHtml {
+        name = "2017-05-04-ligature-emluation-in-emacs";
+        markdown = ./posts/2017-05-04-ligature-emulation-in-emacs.md;
+        inherit cssFile;
+      };
+      route = [ "posts" "2017-05-04-ligature-emluation-in-emacs" ];
+      tags = [ "emacs" ];
+    }
+  ];
+
+  # convert a markdown file to html via lowdown
+  markdownToHtml =
+    { name
+    , # the file to convert
+      markdown
+    , # css file to add to the final result, as { route }
+      cssFile
+    }:
+    depot.nix.runExecline "${name}.html" { } ([
+      "importas"
+      "out"
+      "out"
+      (depot.users.Profpatsch.lib.debugExec "")
+      bins.lowdown
+      "-s"
+      "-Thtml"
+    ] ++
+    (lib.optional (cssFile != null) ([ "-M" "css=${mkRoute cssFile.route}" ]))
+    ++ [
+      "-o"
+      "$out"
+      markdown
+    ]);
+
+  # takes a { route … } attrset and converts the route lists to an absolute path
+  fullRoute = attrs: lib.pipe attrs [
+    (map (x@{ route, ... }: x // { route = mkRoute route; }))
+  ];
+
+  # a cdb from route to a netencoded version of data for each route
+  router = cssFile: lib.pipe (notes ++ posts) [
+    (map (r: with depot.users.Profpatsch.lens;
+    lib.pipe r [
+      (over (field "route") mkRoute)
+      (over (field "page") (_ { inherit cssFile; }))
+    ]))
+    (map (x: {
+      name = x.route;
+      value = depot.users.Profpatsch.netencode.gen.dwim x;
+    }))
+    lib.listToAttrs
+    (cdbMake "router")
+  ];
+
+  # Create a link to the given source file/directory, given the relative path in the depot repo.
+  # Checks that the file exists at evaluation time.
+  depotCgitLink =
+    {
+      # relative path from the depot root (without leading /).
+      relativePath
+    }:
+      assert
+      (lib.assertMsg
+        (builtins.pathExists (depot.path.origSrc + "/${relativePath}"))
+        "depotCgitLink: path /${relativePath} does not exist in depot, and depot.path was ${toString depot.path}");
+      "https://code.tvl.fyi/tree/${relativePath}";
+
+  # look up a route by path ($1)
+  router-lookup = cssFile: depot.nix.writeExecline "router-lookup" { readNArgs = 1; } [
+    cdbLookup
+    (router cssFile)
+    "$1"
+  ];
+
+  runExeclineStdout = name: args: cmd: depot.nix.runExecline name args ([
+    "importas"
+    "-ui"
+    "out"
+    "out"
+    "redirfd"
+    "-w"
+    "1"
+    "$out"
+  ] ++ cmd);
+
+  notes-index-html =
+    let o = fullRoute notes;
+    in ''
+      <ul>
+      ${scope o (o: ''
+        <li><a href="${str o.route}">${esc o.name}</a></li>
+      '')}
+      </ul>
+    '';
+
+  notes-index = pkgs.writeText "notes-index.html" notes-index-html;
+
+  # A simple mustache-inspired string interpolation combinator
+  # that takes an object and a template (a function from o to string)
+  # and returns a string.
+  scope = o: tpl:
+    if builtins.typeOf o == "list" then
+      lib.concatMapStringsSep "\n" tpl o
+    else if builtins.typeOf o == "set" then
+      tpl o
+    else throw "${lib.generators.toPretty {} o} not allowed in template";
+
+  # string-escape html (TODO)
+  str = s: s;
+  # html-escape (TODO)
+  esc = s: s;
+  html = s: s;
+
+  projects-index-html =
+    let o = projects;
+    in ''
+      <dl>
+      ${scope o (o: ''
+        <dt><a href="${str o.link}">${esc o.name}</a></dt>
+        <dd>${html o.description}</dd>
+      '')}
+      </dl>
+    '';
+
+  projects-index = pkgs.writeText "projects-index.html" projects-index-html;
+
+  posts-index-html =
+    let o = fullRoute posts;
+    in ''
+      <dl>
+      ${scope o (o: ''
+        <dt>${str o.date} <a href="${str o.route}">${esc o.title}</a></dt>
+        <dd>${html o.description}</dd>
+      '')}
+      </dl>
+    '';
+
+  posts-index = pkgs.writeText "projects-index.html" posts-index-html;
+
+  arglibNetencode = val: depot.nix.writeExecline "arglib-netencode" { } [
+    "export"
+    "ARGLIB_NETENCODE"
+    (depot.users.Profpatsch.netencode.gen.dwim val)
+    "$@"
+  ];
+
+  # A simple http server that serves the site. Yes, it’s horrible.
+  site-server = { cssFile, port }: depot.nix.writeExecline "blog-server" { } [
+    (depot.users.Profpatsch.lib.runInEmptyEnv [ "PATH" ])
+    bins.s6-tcpserver
+    "127.0.0.1"
+    port
+    bins.time
+    "--format=time: %es"
+    "--"
+    runOr
+    return400
+    "pipeline"
+    [
+      (arglibNetencode {
+        what = "request";
+      })
+      depot.users.Profpatsch.read-http
+    ]
+    depot.users.Profpatsch.netencode.record-splice-env
+    runOr
+    return500
+    "importas"
+    "-i"
+    "path"
+    "path"
+    "if"
+    [ depot.tools.eprintf "GET \${path}\n" ]
+    runOr
+    return404
+    "backtick"
+    "-ni"
+    "TEMPLATE_DATA"
+    [
+      # TODO: factor this out of here, this is routing not serving
+      "ifelse"
+      [ bins.test "$path" "=" "/notes" ]
+      [
+        "export"
+        "content-type"
+        "text/html"
+        "export"
+        "serve-file"
+        notes-index
+        depot.users.Profpatsch.netencode.env-splice-record
+      ]
+      "ifelse"
+      [ bins.test "$path" "=" "/projects" ]
+      [
+        "export"
+        "content-type"
+        "text/html"
+        "export"
+        "serve-file"
+        projects-index
+        depot.users.Profpatsch.netencode.env-splice-record
+      ]
+      "ifelse"
+      [ bins.test "$path" "=" "/posts" ]
+      [
+        "export"
+        "content-type"
+        "text/html"
+        "export"
+        "serve-file"
+        posts-index
+        depot.users.Profpatsch.netencode.env-splice-record
+      ]
+      # TODO: ignore potential query arguments. See 404 message
+      "pipeline"
+      [ (router-lookup cssFile) "$path" ]
+      depot.users.Profpatsch.netencode.record-splice-env
+      "importas"
+      "-ui"
+      "page"
+      "page"
+      "export"
+      "content-type"
+      "text/html"
+      "export"
+      "serve-file"
+      "$page"
+      depot.users.Profpatsch.netencode.env-splice-record
+    ]
+    runOr
+    return500
+    "if"
+    [
+      "pipeline"
+      [
+        bins.printf
+        ''
+          HTTP/1.1 200 OK
+          Content-Type: {{{content-type}}}; charset=UTF-8
+          Connection: close
+
+        ''
+      ]
+      depot.users.Profpatsch.netencode.netencode-mustache
+    ]
+    "pipeline"
+    [ "importas" "t" "TEMPLATE_DATA" bins.printf "%s" "$t" ]
+    depot.users.Profpatsch.netencode.record-splice-env
+    "importas"
+    "-ui"
+    "serve-file"
+    "serve-file"
+    bins.cat
+    "$serve-file"
+  ];
+
+  # run argv or $1 if argv returns a failure status code.
+  runOr = depot.nix.writeExecline "run-or" { readNArgs = 1; } [
+    "foreground"
+    [ "$@" ]
+    "importas"
+    "?"
+    "?"
+    "ifelse"
+    [ bins.test "$?" "-eq" "0" ]
+    [ ]
+    "if"
+    [ depot.tools.eprintf "runOr: exited \${?}, running \${1}\n" ]
+    "$1"
+  ];
+
+  return400 = depot.nix.writeExecline "return400" { } [
+    bins.printf
+    "%s"
+    ''
+      HTTP/1.1 400 Bad Request
+      Content-Type: text/plain; charset=UTF-8
+      Connection: close
+
+    ''
+  ];
+
+  return404 = depot.nix.writeExecline "return404" { } [
+    bins.printf
+    "%s"
+    ''
+      HTTP/1.1 404 Not Found
+      Content-Type: text/plain; charset=UTF-8
+      Connection: close
+
+      This page doesn’t exist! Query arguments are not handled at the moment.
+    ''
+  ];
+
+  return500 = depot.nix.writeExecline "return500" { } [
+    bins.printf
+    "%s"
+    ''
+      HTTP/1.1 500 Internal Server Error
+      Content-Type: text/plain; charset=UTF-8
+      Connection: close
+
+      Encountered an internal server error. Please try again.
+    ''
+  ];
+
+  capture-stdin = depot.nix.writers.rustSimple
+    {
+      name = "capture-stdin";
+      dependencies = [ depot.users.Profpatsch.execline.exec-helpers ];
+    } ''
+    extern crate exec_helpers;
+    use std::io::Read;
+    fn main() {
+      let (args, prog) = exec_helpers::args_for_exec("capture-stdin", 1);
+      let valname = &args[1];
+      let mut v : Vec<u8> = vec![];
+      std::io::stdin().lock().read_to_end(&mut v).unwrap();
+      exec_helpers::exec_into_args("capture-stdin", prog, vec![(valname, v)]);
+    }
+  '';
+
+  # go from a list of path elements to an absolute route string
+  mkRoute = route: "/" + lib.concatMapStringsSep "/" urlencodeAscii route;
+
+  # urlencodes, but only ASCII characters
+  # https://en.wikipedia.org/wiki/Percent-encoding
+  urlencodeAscii = urlPiece:
+    let
+      raw = [ "!" "#" "$" "%" "&" "'" "(" ")" "*" "+" "," "/" ":" ";" "=" "?" "@" "[" "]" ];
+      enc = [ "%21" "%23" "%24" "%25" "%26" "%27" "%28" "%29" "%2A" "%2B" "%2C" "%2F" "%3A" "%3B" "%3D" "%3F" "%40" "%5B" "%5D" ];
+      rest = [ "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "-" "_" "." "~" ];
+    in
+    assert lib.assertMsg (lib.all (c: builtins.elem c (raw ++ rest)) (lib.stringToCharacters urlPiece))
+      "urlencodeAscii: the urlPiece must only contain valid url ASCII characters, was: ${urlPiece}";
+    builtins.replaceStrings raw enc urlPiece;
+
+
+  # create a cdb record entry, as required by the cdbmake tool
+  cdbRecord = key: val:
+    "+${toString (builtins.stringLength key)},${toString (builtins.stringLength val)}:"
+    + "${key}->${val}\n";
+
+  # create a full cdbmake input from an attribute set of keys to values (strings)
+  cdbRecords =
+    with depot.nix.yants;
+    defun [ (attrs (either drv string)) string ]
+      (attrs:
+        (lib.concatStrings (lib.mapAttrsToList cdbRecord attrs)) + "\n");
+
+  # run cdbmake on a list of key/value pairs (strings
+  cdbMake = name: attrs: depot.nix.runExecline "${name}.cdb"
+    {
+      stdin = cdbRecords attrs;
+    } [
+    "importas"
+    "out"
+    "out"
+    depot.users.Profpatsch.lib.eprint-stdin
+    "if"
+    [ bins.cdbmake "db" "tmp" ]
+    bins.mv
+    "db"
+    "$out"
+  ];
+
+  # look up a key ($2) in the given cdb ($1)
+  cdbLookup = depot.nix.writeExecline "cdb-lookup" { readNArgs = 2; } [
+    # cdb ($1) on stdin
+    "redirfd"
+    "-r"
+    "0"
+    "$1"
+    # key ($2) lookup
+    bins.cdbget
+    "$2"
+  ];
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    router
+    depotCgitLink
+    site-server
+    notes-index
+    notes-index-html
+    projects-index
+    projects-index-html
+    posts-index-html
+    ;
+
+}
diff --git a/users/Profpatsch/blog/notes/an-idealized-conflang.md b/users/Profpatsch/blog/notes/an-idealized-conflang.md
new file mode 100644
index 000000000000..5c6b39f6e81b
--- /dev/null
+++ b/users/Profpatsch/blog/notes/an-idealized-conflang.md
@@ -0,0 +1,298 @@
+tags: netencode, json
+date: 2022-03-31
+certainty: likely
+status: initial
+title: An idealized Configuration Language
+
+# An Idealized Configuration Language
+
+JSON brought us one step closer to what an idealized configuration language is,
+which I define as “data, stripped of all externalities of the system it is working in”.
+
+Specifically, JSON is very close to what I consider the minimal properties to represent structured data.
+
+## A short history, according to me
+
+In the beginning, Lisp defined s-expressions as a stand-in for an actual syntax.
+Then, people figured out that it’s also a way to represent structured data.
+It has scalars, which can be nested into lists, recursively.
+
+```
+(this is (a (list) (of lists)))
+```
+
+This provides the first three rules of our idealized language:
+
+1. A **scalar** is a primitive value that is domain-specific.
+   We can assume a bunch of bytes here, or a text or an integer.
+   
+2. A **list** gives an ordering to `0..n` (or `1..n`) values
+   
+3. Both a scalar and a list are the *same kind* of “thing” (from here on called **value**),
+   lists can be created from arbitrary values *recursively*
+   (for example scalars, or lists of scalars and other lists)
+
+
+Later, ASN.1 came and had the important insight that the same idealized data structure
+can be represented in different fashions,
+for example as a binary-efficient version and a human-readable format.
+
+Then, XML “graced” the world for a decade or two, and the main lesson from it was
+that you don’t want to mix markup languages and configuration languages,
+and that you don’t want a committee to design these things.
+
+---
+
+In the meantime, Brendan Eich designed Javascript. Its prototype-based object system
+arguably stripped down the rituals of existing OO-systems.
+Douglas Crockford later extracted the object format (minus functions) into a syntax, and we got JSON.
+
+```
+{
+  "foo": [
+    { "nested": "attrs" },
+    "some text"
+  ],
+  "bar": 42
+}
+```
+
+JSON adds another fundamental idea into the mix:
+
+4. **Records** are unordered collections of `name`/`value` pairs.
+   A `name` is defined to be a unicode string, so a semantic descriptor of the nested `value`.
+
+Unfortunately, the JSON syntax does not actually specify any semantics of records (`objects` in JSON lingo),
+in particular it does not mention what the meaning is if a `name` appears twice in one record.
+
+If records can have multiple entries with the same `name`, suddenly ordering becomes important!
+But wait, remember earlier we defined *lists* to impose ordering on two values.
+So in order to rectify that problem, we say that
+
+5. A `name` can only appear in a record *once*, names must be unique.
+
+This is the current state of the programming community at large,
+where most “modern” configuration languages basically use a version of the JSON model
+as their underlying data structure. (However not all of them use the same version.)
+
+## Improving JSON’s data model
+
+We are not yet at the final “idealized” configuration language, though.
+
+Modern languages like Standard ML define their data types as a mixture of 
+
+* *records* (“structs” in the C lingo)
+* and *sums* (which you can think about as enums that can hold more `value`s inside them)
+
+This allows to express the common pattern where some fields in a record are only meaningful
+if another field—the so-called `tag`-field—is set to a specific value.
+
+An easy example: if a request can fail with an error message or succeed with a result.
+
+You could model that as 
+
+```
+{
+  "was_error": true,
+  "error_message": "there was an error"
+}
+```
+
+or
+
+```
+{
+  "was_error": false,
+  "result": 42
+}
+```
+
+in your JSON representation.
+
+But in a ML-like language (like, for example, Rust), you would instead model it as
+
+```
+type RequestResult 
+  = Error { error_message: String }
+  | Success { result: i64 }
+```
+
+where the distinction in `Error` or `Success` makes it clear that `error_message` and `result`
+only exist in one of these cases, not the other.
+
+We *can* encode exactly that idea into JSON in multiple ways, but not a “blessed” way.
+
+For example, another way to encode the above would be
+
+```
+{ 
+  "Error": { 
+    "error_message": "there was an error"
+  }
+}
+```
+
+and
+
+```
+{ 
+  "Success": { 
+    "result": 42
+  }
+}
+```
+
+Particularly notice the difference between the language representation, where the type is “closed”only `Success` or `Error` can happen—
+and the data representation where the type is “open”, more cases could potentially exist.
+
+This is an important differentiation from a type system:
+Our idealized configuration language just gives more structure to a bag of data,
+it does not restrict which value can be where.
+Think of a value in an unityped language, like Python.
+
+
+So far we have the notion of 
+
+1. a scalar (a primitive)
+2. a list (ordering on values)
+3. a record (unordered collection of named values)
+
+and in order to get the “open” `tag`ged enumeration values, we introduce
+
+4. a `tag`, which gives a name to a value
+
+We can then redefine `record` to mean “an unordered collection of `tag`ged values”,
+which further reduces the amount of concepts needed.
+
+And that’s it, this is the full idealized configuration language.
+
+
+## Some examples of data modelling with tags
+
+This is all well and good, but what does it look like in practice?
+
+For these examples I will be using JSON with a new `< "tag": value >` syntax
+to represent `tag`s.
+
+From a compatibility standpoint, `tag`s (or sum types) have dual properties to record types.
+
+With a record, when you have a producer that *adds* a field to it, the consumer will still be able to handle the record (provided the semantics of the existing fields is not changed by the new field).
+
+With a tag, *removing* a tag from the producer will mean that the consumer will still be able to handle the tag. It might do one “dead” check on the removed `tag`, but can still handle the remaining ones just fine.
+
+<!-- TODO: some illustration here -->
+    
+An example of how that is applied in practice is that in `protobuf3`, fields of a record are *always* optional fields.
+
+We can model optional fields by wrapping them in `< "Some": value >` or `< "None": {} >` (where the actual value of the `None` is ignored or always an empty record).
+
+So a protobuf with the fields `foo: int` and `bar: string` has to be parsed by the receiver als containing *four* possibilities:
+
+№|foo|bar|
+|--:|---|---|
+|1|`<"None":{}>`|`<"None":{}>`|
+|2|`<"Some":42>`|`<"None":{}>`|
+|3|`<"None":{}>`|`<"Some":"x">`|
+|4|`<"Some":42>`|`<"Some":"x">`|
+
+Now, iff the receiver actually handles all four possibilities
+(and doesn’t just crash if a field is not set, as customary in million-dollar-mistake languages),
+it’s easy to see how removing a field from the producer is semantically equal to always setting it to `<"None":{}>`.
+Since all receivers should be ready to receive `None` for every field, this provides a simple forward-compatibility scheme.
+
+We can abstract this to any kind of tag value:
+If you start with “more” tags, you give yourself space to remove them later without breaking compatibility, typically called “forward compatibility”.
+
+
+## To empty list/record or not to
+
+Something to think about is whether records and fields should be defined
+to always contain at least one element.
+
+As it stands, JSON has multiple ways of expressing the “empty value”:
+
+* `null`
+* `[]`
+* `{}`
+* `""`
+* *leave out the field*
+
+and two of those come from the possibility of having empty structured values.
+
+## Representations of this language
+
+This line of thought originally fell out of me designing [`netencode`](https://code.tvl.fyi/tree/users/Profpatsch/netencode/README.md)
+as a small human-debuggable format for pipeline serialization.
+
+In addition to the concepts mentioned here (especially tags),
+it provides a better set of scalars than JSON (specifically arbitrary bytestrings),
+but it cannot practically be written or modified by hand,
+which might be a good thing depending on how you look at it.
+
+---
+
+The way that is compatible with the rest of the ecosystem is probably to use a subset of json
+to represent our idealized language.
+
+There is multiple ways of encoding tags in json, which each have their pros and cons.
+
+The most common is probably the “tag field” variant, where the tag is pulled into the nested record:
+
+```
+{
+  "_tag": "Success",
+  "result": 42
+}
+```
+
+Which has the advantage that people know how to deal with it and that it’s easy to “just add another field”,
+plus it is backward-compatible when you had a record in the first place.
+
+It has multiple disadvantages however:
+
+* If your value wasn’t a record (e.g. an int) before, you have to put it in a record and assign an arbitrary name to its field
+* People are not forced to “unwrap” the tag first, so they are going to forget to check it
+* The magic “_tag” name cannot be used by any of the record’s fields
+
+
+An in-between version of this with less downsides is to always push a json record onto the stack:
+
+```
+{
+  "tag": "Success",
+  "value": {
+    "result": 42
+  }
+}
+```
+
+This makes it harder for people to miss checking the `tag`, but still possible of course.
+It also makes it easily possible to inspect the contents of `value` without knowing the
+exhaustive list of `tag`s, which can be useful in practice (though often not sound!).
+It also gets rid of the “_tag” field name clash problem.
+
+Disadvantages:
+
+* Breaks the backwards-compatibility with an existing record-based approach if you want to introduce `tag`s
+* Verbosity of representation
+* hard to distinguish a record with the `tag` and `value` fields from a `tag`ed value (though you know the type layout of your data on a higher level, don’t you? ;) )
+
+
+The final, “most pure” representation is the one I gave in the original introduction:
+
+```
+{
+  "Success": {
+    "result": 42
+  }
+}
+```
+
+Now you *have* to match on the `tag` name first, before you can actually access your data,
+and it’s less verbose than the above representation.
+
+Disavantages:
+
+* You also have to *know* what `tag`s to expect, it’s harder to query cause you need to extract the keys and values from the dict and then take the first one.
+* Doing a “tag backwards compat” check is harder,
+  because you can’t just check whether `_tag` or `tag`/`value` are the keys in the dict.
diff --git a/users/Profpatsch/blog/notes/preventing-oom.md b/users/Profpatsch/blog/notes/preventing-oom.md
new file mode 100644
index 000000000000..59ea4f747700
--- /dev/null
+++ b/users/Profpatsch/blog/notes/preventing-oom.md
@@ -0,0 +1,33 @@
+tags: linux
+date: 2020-01-25
+certainty: likely
+status: initial
+title: Preventing out-of-memory (OOM) errors on Linux
+
+# Preventing out-of-memory (OOM) errors on Linux
+
+I’ve been running out of memory more and more often lately. I don’t use any swap space because I am of the opinion that 16GB of memory should be sufficient for most daily and professional tasks. Which is generally true, however sometimes I have a runaway filling my memory. Emacs is very good at doing this for example, prone to filling your RAM when you open json files with very long lines.
+
+In theory, the kernel OOM killer should come in and save the day, but the Linux OOM killer is notorious for being extremely … conservative. It will try to free every internal structure it can before even thinking about touching any userspace processes. At that point, the desktop usually stopped responding minutes ago.
+
+Luckily the kernel provides memory statistics for the whole system, as well as single process, and the [`earlyoom`](https://github.com/rfjakob/earlyoom) tool uses those to keep memory usage under a certain limit. It will start killing processes, “heaviest” first, until the given upper memory limit is satisfied again.
+
+On NixOS, I set:
+
+```nix
+{
+  services.earlyoom = {
+    enable = true;
+    freeMemThreshold = 5; # <%5 free
+  };
+}
+```
+
+and after activation, this simple test shows whether the daemon is working:
+
+```shell
+$ tail /dev/zero
+fish: “tail /dev/zero” terminated by signal SIGTERM (Polite quit request)
+```
+
+`tail /dev/zero` searches for the last line of the file `/dev/zero`, and since it cannot know that there is no next line and no end to the stream of `\0` this file produces, it will fill the RAM as quickly as physically possible. Before it can fill it completely, `earlyoom` recognizes that the limit was breached, singles out the `tail` command as the process using the most amount of memory, and sends it a `SIGTERM`.
diff --git a/users/Profpatsch/blog/notes/rust-string-conversions.md b/users/Profpatsch/blog/notes/rust-string-conversions.md
new file mode 100644
index 000000000000..99071ef9d370
--- /dev/null
+++ b/users/Profpatsch/blog/notes/rust-string-conversions.md
@@ -0,0 +1,53 @@
+# Converting between different String types in Rust
+
+```
+let s: String = ...
+let st: &str = ...
+let u: &[u8] = ...
+let b: [u8; 3] = b"foo"
+let v: Vec<u8> = ...
+let os: OsString = ...
+let ost: OsStr = ...
+
+From       To         Use                                    Comment
+----       --         ---                                    -------
+&str     -> String    String::from(st)
+&str     -> &[u8]     st.as_bytes()
+&str     -> Vec<u8>   st.as_bytes().to_owned()               via &[u8]
+&str     -> &OsStr    OsStr::new(st)
+
+String   -> &str      &s                                     alt. s.as_str()
+String   -> &[u8]     s.as_bytes()
+String   -> Vec<u8>   s.into_bytes()
+String   -> OsString  OsString::from(s)
+
+&[u8]    -> &str      str::from_utf8(u).unwrap()
+&[u8]    -> String    String::from_utf8(u).unwrap()
+&[u8]    -> Vec<u8>   u.to_owned()
+&[u8]    -> &OsStr    OsStr::from_bytes(u)                   use std::os::unix::ffi::OsStrExt;
+
+[u8; 3]  -> &[u8]     &b[..]                                 byte literal
+[u8; 3]  -> &[u8]     "foo".as_bytes()                       alternative via utf8 literal
+
+Vec<u8>  -> &str      str::from_utf8(&v).unwrap()            via &[u8]
+Vec<u8>  -> String    String::from_utf8(v)
+Vec<u8>  -> &[u8]     &v
+Vec<u8>  -> OsString  OsString::from_vec(v)                  use std::os::unix::ffi::OsStringExt;
+
+&OsStr   -> &str      ost.to_str().unwrap()
+&OsStr   -> String    ost.to_os_string().into_string()       via OsString
+                         .unwrap()
+&OsStr   -> Cow<str>  ost.to_string_lossy()                  Unicode replacement characters
+&OsStr   -> OsString  ost.to_os_string()
+&OsStr   -> &[u8]     ost.as_bytes()                         use std::os::unix::ffi::OsStringExt;
+
+OsString -> String    os.into_string().unwrap()              returns original OsString on failure
+OsString -> &str      os.to_str().unwrap()
+OsString -> &OsStr    os.as_os_str()
+OsString -> Vec<u8>   os.into_vec()                          use std::os::unix::ffi::OsStringExt;
+```
+
+
+## Source
+
+Original source is [this document on Pastebin](https://web.archive.org/web/20190710121935/https://pastebin.com/Mhfc6b9i)
diff --git a/users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md b/users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md
new file mode 100644
index 000000000000..ba80888badd8
--- /dev/null
+++ b/users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md
@@ -0,0 +1,123 @@
+title: Ligature Emulation in Emacs
+date: 2017-05-04
+
+Monday was (yet another)
+[NixOS hackathon][hackathon] at [OpenLab Augsburg][ola].
+[Maximilian][mhuber] was there and to my amazement
+he got working ligatures in his Haskell files in Emacs! Ever since Hasklig
+updated its format to use ligatures and private Unicode code points a while ago,
+the hack I had used in my config stopped working.
+
+Encouraged by that I decided to take a look on Tuesday. Long story short, I was
+able to [get it working in a pretty satisfying way][done].
+
+[hackathon]: https://www.meetup.com/Munich-NixOS-Meetup/events/239077247/
+[mhuber]: https://github.com/maximilianhuber
+[ola]: https://openlab-augsburg.de
+[done]: https://github.com/i-tu/Hasklig/issues/84#issuecomment-298803495
+
+What’s left to do is package it into a module and push to melpa.
+
+
+### elisp still sucks, but it’s bearable, sometimes
+
+I’m the kind of person who, when trying to fix something elisp related, normally
+gives up two hours later and three macro calls deep. Yes, homoiconic,
+non-lexically-scoped, self-rewriting code is not exactly my fetish.
+This time the task and the library (`prettify-symbols-mode`) were simple enough
+for that to not happen.
+
+Some interesting technical trivia:
+
+- elisp literal character syntax is `?c`. `?\t` is the tab character
+- You join characters by `(string c1 c2 c3 ...)`
+- [dash.el][dash] is pretty awesome and does what a functional programmer
+  expects. Also, Rainbow Dash.
+- Hasklig and FiraCode multi-column symbols actually [only occupy one column, on
+  the far right of the glyph][glyph]. `my-correct-symbol-bounds` fixes emacs’
+  rendering in that case.
+
+
+[dash]: https://github.com/magnars/dash.el
+[glyph]: https://github.com/tonsky/FiraCode/issues/211#issuecomment-239082368
+
+
+## Appendix A
+
+For reference, here’s the complete code as it stands now. Feel free to paste
+into your config; let’s make it [MIT][mit]. Maybe link to this site, in case there are
+updates.
+
+[mit]: https://opensource.org/licenses/MIT
+
+```elisp
+ (defun my-correct-symbol-bounds (pretty-alist)
+    "Prepend a TAB character to each symbol in this alist,
+this way compose-region called by prettify-symbols-mode
+will use the correct width of the symbols
+instead of the width measured by char-width."
+    (mapcar (lambda (el)
+              (setcdr el (string ?\t (cdr el)))
+              el)
+            pretty-alist))
+
+  (defun my-ligature-list (ligatures codepoint-start)
+    "Create an alist of strings to replace with
+codepoints starting from codepoint-start."
+    (let ((codepoints (-iterate '1+ codepoint-start (length ligatures))))
+      (-zip-pair ligatures codepoints)))
+
+  ; list can be found at https://github.com/i-tu/Hasklig/blob/master/GlyphOrderAndAliasDB#L1588
+  (setq my-hasklig-ligatures
+    (let* ((ligs '("&&" "***" "*>" "\\\\" "||" "|>" "::"
+                   "==" "===" "==>" "=>" "=<<" "!!" ">>"
+                   ">>=" ">>>" ">>-" ">-" "->" "-<" "-<<"
+                   "<*" "<*>" "<|" "<|>" "<$>" "<>" "<-"
+                   "<<" "<<<" "<+>" ".." "..." "++" "+++"
+                   "/=" ":::" ">=>" "->>" "<=>" "<=<" "<->")))
+      (my-correct-symbol-bounds (my-ligature-list ligs #Xe100))))
+
+  ;; nice glyphs for haskell with hasklig
+  (defun my-set-hasklig-ligatures ()
+    "Add hasklig ligatures for use with prettify-symbols-mode."
+    (setq prettify-symbols-alist
+          (append my-hasklig-ligatures prettify-symbols-alist))
+    (prettify-symbols-mode))
+
+  (add-hook 'haskell-mode-hook 'my-set-hasklig-ligatures)
+```
+
+## Appendix B (Update 1): FiraCode integration
+
+I also created a mapping for [FiraCode][fira]. You need to grab the [additional
+symbol font][symbol] that adds (most) ligatures to the unicode private use area.
+Consult your system documentation on how to add it to your font cache.
+Next add `"Fira Code"` and `"Fira Code Symbol"` to your font preferences. Symbol
+only contains the additional characters, so you need both.
+
+If you are on NixOS, the font package should be on the main branch shortly, [I
+added a package][symbol-pkg].
+
+[fira]: https://github.com/tonsky/FiraCode/
+[symbol]: https://github.com/tonsky/FiraCode/issues/211#issuecomment-239058632
+[symbol-pkg]: https://github.com/NixOS/nixpkgs/pull/25517
+
+Here’s the mapping adjusted for FiraCode:
+
+```elisp
+  (setq my-fira-code-ligatures
+    (let* ((ligs '("www" "**" "***" "**/" "*>" "*/" "\\\\" "\\\\\\"
+                  "{-" "[]" "::" ":::" ":=" "!!" "!=" "!==" "-}"
+                  "--" "---" "-->" "->" "->>" "-<" "-<<" "-~"
+                  "#{" "#[" "##" "###" "####" "#(" "#?" "#_" "#_("
+                  ".-" ".=" ".." "..<" "..." "?=" "??" ";;" "/*"
+                  "/**" "/=" "/==" "/>" "//" "///" "&&" "||" "||="
+                  "|=" "|>" "^=" "$>" "++" "+++" "+>" "=:=" "=="
+                  "===" "==>" "=>" "=>>" "<=" "=<<" "=/=" ">-" ">="
+                  ">=>" ">>" ">>-" ">>=" ">>>" "<*" "<*>" "<|" "<|>"
+                  "<$" "<$>" "<!--" "<-" "<--" "<->" "<+" "<+>" "<="
+                  "<==" "<=>" "<=<" "<>" "<<" "<<-" "<<=" "<<<" "<~"
+                  "<~~" "</" "</>" "~@" "~-" "~=" "~>" "~~" "~~>" "%%"
+                  "x" ":" "+" "+" "*")))
+      (my-correct-symbol-bounds (my-ligature-list ligs #Xe100))))
+```
diff --git a/users/Profpatsch/cdb.nix b/users/Profpatsch/cdb.nix
new file mode 100644
index 000000000000..86e0a2d58f24
--- /dev/null
+++ b/users/Profpatsch/cdb.nix
@@ -0,0 +1,93 @@
+{ depot, pkgs, ... }:
+
+let
+  cdbListToNetencode = depot.nix.writers.rustSimple
+    {
+      name = "cdb-list-to-netencode";
+      dependencies = [
+        depot.third_party.rust-crates.nom
+        depot.users.Profpatsch.execline.exec-helpers
+        depot.users.Profpatsch.netencode.netencode-rs
+      ];
+    } ''
+    extern crate nom;
+    extern crate exec_helpers;
+    extern crate netencode;
+    use std::collections::HashMap;
+    use std::io::BufRead;
+    use nom::{IResult};
+    use nom::sequence::{tuple};
+    use nom::bytes::complete::{tag, take};
+    use nom::character::complete::{digit1, char};
+    use nom::error::{context, ErrorKind, ParseError};
+    use nom::combinator::{map_res};
+    use netencode::{T, Tag};
+
+    fn usize_t(s: &[u8]) -> IResult<&[u8], usize> {
+        context(
+            "usize",
+            map_res(
+                map_res(digit1, |n| std::str::from_utf8(n)),
+                |s| s.parse::<usize>())
+        )(s)
+    }
+
+    fn parse_cdb_record(s: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> {
+        let (s, (_, klen, _, vlen, _)) = tuple((
+            char('+'),
+            usize_t,
+            char(','),
+            usize_t,
+            char(':')
+        ))(s)?;
+        let (s, (key, _, val)) = tuple((
+            take(klen),
+            tag("->"),
+            take(vlen),
+        ))(s)?;
+        Ok((s, (key, val)))
+    }
+
+    fn main() {
+        let mut res = vec![];
+        let stdin = std::io::stdin();
+        let mut lines = stdin.lock().split(b'\n');
+        loop {
+            match lines.next() {
+                None => exec_helpers::die_user_error("cdb-list-to-netencode", "stdin ended but we didn’t receive the empty line to signify the end of the cdbdump input!"),
+                Some(Err(err)) => exec_helpers::die_temporary("cdb-list-to-netencode", format!("could not read from stdin: {}", err)),
+                Some(Ok(line)) =>
+                    if &line == b"" {
+                        // the cdbdump input ends after an empty line (double \n)
+                        break;
+                    } else {
+                        match parse_cdb_record(&line) {
+                            Ok((b"", (key, val))) => {
+                                let (key, val) = match
+                                    std::str::from_utf8(key)
+                                    .and_then(|k| std::str::from_utf8(val).map(|v| (k, v))) {
+                                    Ok((key, val)) => (key.to_owned(), val.to_owned()),
+                                    Err(err) => exec_helpers::die_user_error("cdb-list-to-netencode", format!("cannot decode line {:?}, we only support utf8-encoded key/values pairs for now: {}", String::from_utf8_lossy(&line), err)),
+                                };
+                                let _ = res.push((key, val));
+                            },
+                            Ok((rest, _)) => exec_helpers::die_user_error("cdb-list-to-netencode", format!("could not decode record line {:?}, had some trailing bytes", String::from_utf8_lossy(&line))),
+                            Err(err) => exec_helpers::die_user_error("cdb-list-to-netencode", format!("could not decode record line {:?}: {:?}", String::from_utf8_lossy(&line), err)),
+                        }
+                    }
+            }
+        }
+        let list = T::List(res.into_iter().map(
+            |(k, v)| T::Record(vec![(String::from("key"), T::Text(k)), (String::from("val"), T::Text(v))].into_iter().collect())
+        ).collect());
+        netencode::encode(&mut std::io::stdout(), &list.to_u());
+    }
+
+  '';
+
+in
+{
+  inherit
+    cdbListToNetencode
+    ;
+}
diff --git a/users/Profpatsch/emacs-tree-sitter-move/default.nix b/users/Profpatsch/emacs-tree-sitter-move/default.nix
new file mode 100644
index 000000000000..a9f259d96d20
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/default.nix
@@ -0,0 +1,3 @@
+# nothing yet (TODO: expose shell & tool)
+{ ... }:
+{ }
diff --git a/users/Profpatsch/emacs-tree-sitter-move/shell.nix b/users/Profpatsch/emacs-tree-sitter-move/shell.nix
new file mode 100644
index 000000000000..f400d5c02161
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/shell.nix
@@ -0,0 +1,17 @@
+{ pkgs ? import ../../../third_party { }, ... }:
+let
+  inherit (pkgs) lib;
+
+  treeSitterGrammars = pkgs.runCommandLocal "grammars" { } ''
+    mkdir -p $out/bin
+    ${lib.concatStringsSep "\n"
+      (lib.mapAttrsToList (name: src: "ln -s ${src}/parser $out/bin/${name}.so") pkgs.tree-sitter.builtGrammars)};
+  '';
+
+in
+pkgs.mkShell {
+  buildInputs = [
+    pkgs.tree-sitter.builtGrammars.python
+  ];
+  TREE_SITTER_GRAMMAR_DIR = treeSitterGrammars;
+}
diff --git a/users/Profpatsch/emacs-tree-sitter-move/test.json b/users/Profpatsch/emacs-tree-sitter-move/test.json
new file mode 100644
index 000000000000..d9f8075976d6
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/test.json
@@ -0,0 +1,14 @@
+{
+    "foo": {
+        "x": [ 1, 2, 3, 4 ],
+        "bar": "test"
+    },
+    "foo": {
+        "x": [ 1, 2, 3, 4 ],
+        "bar": "test"
+    },
+    "foo": {
+        "x": [ 1, 2, 3, 4 ],
+        "bar": "test"
+    }
+}
diff --git a/users/Profpatsch/emacs-tree-sitter-move/test.py b/users/Profpatsch/emacs-tree-sitter-move/test.py
new file mode 100644
index 000000000000..0f57bae035da
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/test.py
@@ -0,0 +1,13 @@
+(4 + 5 + 5)
+
+def foo(a, b, c)
+
+def bar(a, b):
+    4
+    4
+    4
+
+[1, 4, 5, 10]
+
+def foo():
+    pass
diff --git a/users/Profpatsch/emacs-tree-sitter-move/test.sh b/users/Profpatsch/emacs-tree-sitter-move/test.sh
new file mode 100644
index 000000000000..681081f5909d
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/test.sh
@@ -0,0 +1,14 @@
+function foo () {
+    local x=123
+}
+
+function bar () {
+    local x=123
+}
+
+echo abc def \
+     gef gef
+
+printf \
+    "%s\n" \
+    haha
diff --git a/users/Profpatsch/emacs-tree-sitter-move/tmp.el b/users/Profpatsch/emacs-tree-sitter-move/tmp.el
new file mode 100644
index 000000000000..88d13fa45b81
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/tmp.el
@@ -0,0 +1,28 @@
+(defun tree-sitter-load-from-grammar-dir (grammar-dir sym lang-name)
+  (tree-sitter-load
+   sym
+   (format "%s/bin/%s"
+           (getenv grammar-dir)
+           lang-name)))
+
+(defun tree-sitter-init-tmp-langs (alist)
+  (mapcar
+   (lambda (lang)
+     (pcase-let ((`(,name ,sym ,mode) lang))
+       (tree-sitter-load-from-grammar-dir "TREE_SITTER_GRAMMAR_DIR" sym name)
+       (cons mode sym)))
+   alist))
+
+
+(setq tree-sitter-major-mode-language-alist
+      (tree-sitter-init-tmp-langs
+       '(("python" python python-mode)
+         ("json" json js-mode)
+         ("bash" bash sh-mode)
+         )))
+
+(define-key evil-normal-state-map (kbd "C-.") #'tree-sitter-move-reset)
+(define-key evil-normal-state-map (kbd "C-<right>") #'tree-sitter-move-right)
+(define-key evil-normal-state-map (kbd "C-<left>") #'tree-sitter-move-left)
+(define-key evil-normal-state-map (kbd "C-<up>") #'tree-sitter-move-up)
+(define-key evil-normal-state-map (kbd "C-<down>") #'tree-sitter-move-down)
diff --git a/users/Profpatsch/emacs-tree-sitter-move/tree-sitter-move.el b/users/Profpatsch/emacs-tree-sitter-move/tree-sitter-move.el
new file mode 100644
index 000000000000..907e1e4081bc
--- /dev/null
+++ b/users/Profpatsch/emacs-tree-sitter-move/tree-sitter-move.el
@@ -0,0 +1,139 @@
+;; this is not an actual cursor, just a node.
+;; It’s not super efficient, but cursors can’t be *set* to an arbitrary
+;; subnode, because they can’t access the parent otherwise.
+;; We’d need a way to reset the cursor and walk down to the node?!
+(defvar-local tree-sitter-move--cursor nil
+  "the buffer-local cursor used for movement")
+
+(defvar-local tree-sitter-move--debug-overlay nil
+  "an overlay used to visually display the region currently marked by the cursor")
+
+;;;;; TODO: should everything use named nodes? Only some things?
+;;;;; maybe there should be a pair of functions for everything?
+;;;;; For now restrict to named nodes.
+
+(defun tree-sitter-move--setup ()
+  ;; TODO
+  (progn
+    ;; TODO: if tree-sitter-mode fails to load, display a better error
+    (tree-sitter-mode t)
+    (setq tree-sitter-move--cursor (tsc-root-node tree-sitter-tree))
+    (add-variable-watcher
+     'tree-sitter-move--cursor
+     #'tree-sitter-move--debug-overlay-update)))
+
+(defun tree-sitter-move--debug-overlay-update (sym newval &rest _args)
+  "variable-watcher to update the debug overlay when the cursor changes"
+  (let ((start (tsc-node-start-position newval))
+        (end (tsc-node-end-position newval)))
+    (symbol-macrolet ((o tree-sitter-move--debug-overlay))
+      (if o
+          (move-overlay o start end)
+        (setq o (make-overlay start end))
+        (overlay-put o 'face 'highlight)
+        ))))
+
+(defun tree-sitter-move--debug-overlay-teardown ()
+  "Turn of the overlay visibility and delete the overlay object"
+  (when tree-sitter-move--debug-overlay
+    (delete-overlay tree-sitter-move--debug-overlay)
+    (setq tree-sitter-move--debug-overlay nil)))
+
+(defun tree-sitter-move--teardown ()
+  (setq tree-sitter-move--cursor nil)
+  (tree-sitter-move--debug-overlay-teardown)
+  (tree-sitter-mode nil))
+
+;; Get the syntax node the cursor is on.
+(defun tsc-get-named-node-at-point ()
+  (let ((p (point)))
+    (tsc-get-named-descendant-for-position-range
+     (tsc-root-node tree-sitter-tree) p p)))
+
+;; TODO: is this function necessary?
+;; Maybe tree-sitter always guarantees that parents are named?
+(defun tsc-get-named-parent (node)
+  (when-let ((parent (tsc-get-parent node)))
+    (while (and parent (not (tsc-node-named-p parent)))
+      (setq parent (tsc-get-parent parent)))
+    parent))
+
+(defun tsc-get-first-named-node-with-siblings-up (node)
+  "Returns the first 'upwards' node that has siblings. That includes the current
+  node, so if the given node has siblings, it is returned. Returns nil if there
+  is no such node until the root"
+  (when-let ((has-siblings-p
+              (lambda (parent-node)
+                (> (tsc-count-named-children parent-node)
+                   1)))
+             (cur node)
+             (parent (tsc-get-named-parent node)))
+    (while (and parent (not (funcall has-siblings-p parent)))
+      (setq cur parent)
+      (setq parent (tsc-get-named-parent cur)))
+    cur))
+
+(defun tree-sitter-move--set-cursor-to-node (node)
+  (setq tree-sitter-move--cursor node))
+
+(defun tree-sitter-move--set-cursor-to-node-at-point ()
+  (tree-sitter-move--set-cursor-to-node (tsc-get-named-node-at-point)))
+
+(defun tree-sitter-move--move-point-to-node (node)
+  (set-window-point
+    (selected-window)
+    (tsc-node-start-position node)))
+
+
+;; interactive commands (“do what I expect” section)
+
+(defun tree-sitter-move-reset ()
+  (interactive)
+  (tree-sitter-move--set-cursor-to-node-at-point))
+
+(defun tree-sitter-move-right ()
+  (interactive)
+  (tree-sitter-move--move-skip-non-sibling-nodes 'tsc-get-next-named-sibling))
+
+(defun tree-sitter-move-left ()
+  (interactive)
+  (tree-sitter-move--move-skip-non-sibling-nodes 'tsc-get-prev-named-sibling))
+
+(defun tree-sitter-move-up ()
+  (interactive)
+  (tree-sitter-move--move-skip-non-sibling-nodes 'tsc-get-parent))
+
+;; TODO: does not skip siblings yet, because the skip function only goes up (not down)
+(defun tree-sitter-move-down ()
+  (interactive)
+  (tree-sitter-move--move-if-possible (lambda (n) (tsc-get-nth-named-child n 0))))
+
+(defun tree-sitter-move--move-skip-non-sibling-nodes (move-fn)
+  "Moves to the sidewards next sibling. If the current node does not have siblings, go
+  upwards until something has siblings and then move to the side (right or left)."
+  (tree-sitter-move--move-if-possible
+   (lambda (cur)
+     (when-let ((with-siblings
+                 (tsc-get-first-named-node-with-siblings-up cur)))
+       (funcall move-fn with-siblings)))))
+
+(defun tree-sitter-move--move-if-possible (dir-fn)
+  (let ((next (funcall dir-fn tree-sitter-move--cursor)))
+    (when next
+      (tree-sitter-move--set-cursor-to-node next)
+      (tree-sitter-move--move-point-to-node next))))
+
+; mostly stolen from tree-sitter-mode
+;;;###autoload
+(define-minor-mode tree-sitter-move-mode
+  "Minor mode to do cursor movements via tree-sitter"
+  :init-value nil
+  :lighter " tree-sitter-move"
+  (if tree-sitter-move-mode
+      (tree-sitter--error-protect
+          (progn
+            (tree-sitter-move--setup))
+        (setq tree-sitter-move-mode nil)
+        (tree-sitter-move--teardown))
+    (lambda ())
+    (tree-sitter-move--teardown)))
diff --git a/users/Profpatsch/exactSource.nix b/users/Profpatsch/exactSource.nix
new file mode 100644
index 000000000000..5c713b5b1c84
--- /dev/null
+++ b/users/Profpatsch/exactSource.nix
@@ -0,0 +1,90 @@
+{ ... }:
+# SPDX-License-Identifier: MIT
+# Created by Graham Christensen
+# version from https://github.com/grahamc/mayday/blob/c48f7583e622fe2e695a2a929de34679e5818816/exact-source.nix
+
+let
+  # Require that every path specified does exist.
+  #
+  # By default, Nix won't complain if you refer to a missing file
+  # if you don't actually use it:
+  #
+  #     nix-repl> ./bogus
+  #     /home/grahamc/playground/bogus
+  #
+  #     nix-repl> toString ./bogus
+  #     "/home/grahamc/playground/bogus"
+  #
+  # so in order for this interface to be *exact*, we must
+  # specifically require every provided path exists:
+  #
+  #     nix-repl> "${./bogus}"
+  #     error: getting attributes of path
+  #     '/home/grahamc/playground/bogus': No such file or
+  #     directory
+  requireAllPathsExist = paths:
+    let
+      validation = builtins.map (path: "${path}") paths;
+    in
+    builtins.deepSeq validation paths;
+
+  # Break down a given path in to a list of all of the path and
+  # its parent directories.
+  #
+  # `builtins.path` / `builtins.filterSource` will ask about
+  # a containing directory, and we must say YES otherwise it will
+  # not include anything below it.
+  #
+  # Concretely, convert: "/foo/baz/tux" in to:
+  #     [ "/foo/baz/tux" "/foo/baz" "/foo" ]
+  recursivelyPopDir = path:
+    if path == "/" then [ ]
+    else [ path ] ++ (recursivelyPopDir (builtins.dirOf path));
+
+  # Given a list of of strings, dedup the list and return a
+  # list of all unique strings.
+  #
+  # Note: only works on strings ;):
+  #
+  # First convert [ "foo" "foo" "bar" ] in to:
+  #     [
+  #       { name = "foo"; value = ""; }
+  #       { name = "foo"; value = ""; }
+  #       { name = "bar"; value = ""; }
+  #     ]
+  # then convert that to { "foo" = ""; "bar" = ""; }
+  # then get the attribute names, "foo" and "bar".
+  dedup = strings:
+    let
+      name_value_pairs = builtins.map
+        (string: { name = string; value = ""; })
+        strings;
+      attrset_of_strings = builtins.listToAttrs name_value_pairs;
+    in
+    builtins.attrNames attrset_of_strings;
+
+  exactSource = source_root: paths:
+    let
+      all_possible_paths =
+        let
+          # Convert all the paths in to relative paths on disk.
+          # ie: stringPaths will contain [ "/home/grahamc/playground/..." ];
+          # instead of /nix/store paths.
+          string_paths = builtins.map toString
+            (requireAllPathsExist paths);
+
+          all_paths_with_duplicates = builtins.concatMap
+            recursivelyPopDir
+            string_paths;
+        in
+        dedup all_paths_with_duplicates;
+
+      pathIsSpecified = path:
+        builtins.elem path all_possible_paths;
+    in
+    builtins.path {
+      path = source_root;
+      filter = (path: _type: pathIsSpecified path);
+    };
+in
+exactSource
diff --git a/users/Profpatsch/execline/default.nix b/users/Profpatsch/execline/default.nix
new file mode 100644
index 000000000000..752774e6ad0c
--- /dev/null
+++ b/users/Profpatsch/execline/default.nix
@@ -0,0 +1,37 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  exec-helpers = depot.nix.writers.rustSimpleLib
+    {
+      name = "exec-helpers";
+    }
+    (builtins.readFile ./exec_helpers.rs);
+
+  print-one-env = depot.nix.writers.rustSimple
+    {
+      name = "print-one-env";
+      dependencies = [
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    } ''
+    extern crate exec_helpers;
+    use std::os::unix::ffi::OsStrExt;
+    use std::io::Write;
+
+    fn main() {
+      let args = exec_helpers::args("print-one-env", 1);
+      let valname = std::ffi::OsStr::from_bytes(&args[0]);
+      match std::env::var_os(&valname) {
+        None => exec_helpers::die_user_error("print-one-env", format!("Env variable `{:?}` is not set", valname)),
+        Some(val) => std::io::stdout().write_all(&val.as_bytes()).unwrap()
+      }
+    }
+  '';
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    exec-helpers
+    print-one-env
+    ;
+}
diff --git a/users/Profpatsch/execline/exec_helpers.rs b/users/Profpatsch/execline/exec_helpers.rs
new file mode 100644
index 000000000000..a57cbca35391
--- /dev/null
+++ b/users/Profpatsch/execline/exec_helpers.rs
@@ -0,0 +1,149 @@
+use std::ffi::OsStr;
+use std::os::unix::ffi::{OsStrExt, OsStringExt};
+use std::os::unix::process::CommandExt;
+
+pub fn no_args(current_prog_name: &str) -> () {
+    let mut args = std::env::args_os();
+    // remove argv[0]
+    let _ = args.nth(0);
+    if args.len() > 0 {
+        die_user_error(
+            current_prog_name,
+            format!("Expected no arguments, got {:?}", args.collect::<Vec<_>>()),
+        )
+    }
+}
+
+pub fn args(current_prog_name: &str, no_of_positional_args: usize) -> Vec<Vec<u8>> {
+    let mut args = std::env::args_os();
+    // remove argv[0]
+    let _ = args.nth(0);
+    if args.len() != no_of_positional_args {
+        die_user_error(
+            current_prog_name,
+            format!(
+                "Expected {} arguments, got {}, namely {:?}",
+                no_of_positional_args,
+                args.len(),
+                args.collect::<Vec<_>>()
+            ),
+        )
+    }
+    args.map(|arg| arg.into_vec()).collect()
+}
+
+pub fn args_for_exec(
+    current_prog_name: &str,
+    no_of_positional_args: usize,
+) -> (Vec<Vec<u8>>, Vec<Vec<u8>>) {
+    let mut args = std::env::args_os();
+    // remove argv[0]
+    let _ = args.nth(0);
+    let mut args = args.map(|arg| arg.into_vec());
+    let mut pos_args = vec![];
+    // get positional args
+    for i in 1..no_of_positional_args + 1 {
+        pos_args.push(args.nth(0).expect(&format!(
+            "{}: expects {} positional args, only got {}",
+            current_prog_name, no_of_positional_args, i
+        )));
+    }
+    // prog... is the rest of the iterator
+    let prog: Vec<Vec<u8>> = args.collect();
+    (pos_args, prog)
+}
+
+pub fn exec_into_args<'a, 'b, Args, Arg, Env, Key, Val>(
+    current_prog_name: &str,
+    args: Args,
+    env_additions: Env,
+) -> !
+where
+    Args: IntoIterator<Item = Arg>,
+    Arg: AsRef<[u8]>,
+    Env: IntoIterator<Item = (Key, Val)>,
+    Key: AsRef<[u8]>,
+    Val: AsRef<[u8]>,
+{
+    // TODO: is this possible without collecting into a Vec first, just leaving it an IntoIterator?
+    let args = args.into_iter().collect::<Vec<Arg>>();
+    let mut args = args.iter().map(|v| OsStr::from_bytes(v.as_ref()));
+    let prog = args.nth(0).expect(&format!(
+        "{}: first argument must be an executable",
+        current_prog_name
+    ));
+    // TODO: same here
+    let env = env_additions.into_iter().collect::<Vec<(Key, Val)>>();
+    let env = env
+        .iter()
+        .map(|(k, v)| (OsStr::from_bytes(k.as_ref()), OsStr::from_bytes(v.as_ref())));
+    let err = std::process::Command::new(prog).args(args).envs(env).exec();
+    die_missing_executable(
+        current_prog_name,
+        format!(
+            "exec failed: {}, while trying to execing into {:?}",
+            err, prog
+        ),
+    );
+}
+
+/// Exit 1 to signify a generic expected error
+/// (e.g. something that sometimes just goes wrong, like a nix build).
+pub fn die_expected_error<S>(current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    die_with(1, current_prog_name, msg)
+}
+
+/// Exit 100 to signify a user error (“the user is holding it wrong”).
+/// This is a permanent error, if the program is executed the same way
+/// it should crash with 100 again.
+pub fn die_user_error<S>(current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    die_with(100, current_prog_name, msg)
+}
+
+/// Exit 101 to signify an unexpected crash (failing assertion or panic).
+/// This is the same exit code that `panic!()` emits.
+pub fn die_panic<S>(current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    die_with(101, current_prog_name, msg)
+}
+
+/// Exit 111 to signify a temporary error (such as resource exhaustion)
+pub fn die_temporary<S>(current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    die_with(111, current_prog_name, msg)
+}
+
+/// Exit 126 to signify an environment problem
+/// (the user has set up stuff incorrectly so the program cannot work)
+pub fn die_environment_problem<S>(current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    die_with(126, current_prog_name, msg)
+}
+
+/// Exit 127 to signify a missing executable.
+pub fn die_missing_executable<S>(current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    die_with(127, current_prog_name, msg)
+}
+
+fn die_with<S>(status: i32, current_prog_name: &str, msg: S) -> !
+where
+    S: AsRef<str>,
+{
+    eprintln!("{}: {}", current_prog_name, msg.as_ref());
+    std::process::exit(status)
+}
diff --git a/users/Profpatsch/git-db/default.nix b/users/Profpatsch/git-db/default.nix
new file mode 100644
index 000000000000..ad5d927677bf
--- /dev/null
+++ b/users/Profpatsch/git-db/default.nix
@@ -0,0 +1,10 @@
+{ depot, pkgs, lib, ... }:
+
+depot.nix.writers.rustSimple
+{
+  name = "git-db";
+  dependencies = [
+    depot.third_party.rust-crates.git2
+  ];
+}
+  (builtins.readFile ./git-db.rs)
diff --git a/users/Profpatsch/git-db/git-db.rs b/users/Profpatsch/git-db/git-db.rs
new file mode 100644
index 000000000000..c8019bf03661
--- /dev/null
+++ b/users/Profpatsch/git-db/git-db.rs
@@ -0,0 +1,90 @@
+extern crate git2;
+use std::os::unix::ffi::OsStrExt;
+use std::path::PathBuf;
+
+const DEFAULT_BRANCH: &str = "refs/heads/main";
+
+fn main() {
+    let git_db_dir = std::env::var_os("GIT_DB_DIR").expect("set GIT_DB_DIR");
+    let git_db = PathBuf::from(git_db_dir).join("git");
+
+    std::fs::create_dir_all(&git_db).unwrap();
+
+    let repo = git2::Repository::init_opts(
+        &git_db,
+        git2::RepositoryInitOptions::new()
+            .bare(true)
+            .mkpath(true)
+            .description("git-db database")
+            .initial_head(DEFAULT_BRANCH),
+    )
+    .expect(&format!(
+        "unable to create or open bare git repo at {}",
+        &git_db.display()
+    ));
+
+    let mut index = repo.index().expect("cannot get the git index file");
+    eprintln!("{:#?}", index.version());
+    index.clear().expect("could not clean the index");
+
+    let now = std::time::SystemTime::now()
+        .duration_since(std::time::SystemTime::UNIX_EPOCH)
+        .expect("unable to get system time");
+
+    let now_git_time = git2::IndexTime::new(
+        now.as_secs() as i32, // lol
+        u32::from(now.subsec_nanos()),
+    );
+
+    let data = "hi, it’s me".as_bytes();
+
+    index
+        .add_frombuffer(
+            &git2::IndexEntry {
+            mtime: now_git_time,
+            ctime: now_git_time,
+            // don’t make sense
+            dev: 0,
+            ino: 0,
+            mode: /*libc::S_ISREG*/ 0b1000 << (3+9) | /* read write for owner */ 0o644,
+            uid: 0,
+            gid: 0,
+            file_size: data.len() as u32, // lol again
+            id: git2::Oid::zero(),
+            flags: 0,
+            flags_extended: 0,
+            path: "hi.txt".as_bytes().to_owned(),
+        },
+            data,
+        )
+        .expect("could not add data to index");
+
+    let oid = index.write_tree().expect("could not write index tree");
+
+    let to_add_tree = repo
+        .find_tree(oid)
+        .expect("we just created this tree, where did it go?");
+
+    let parent_commits = match repo.find_reference(DEFAULT_BRANCH) {
+        Ok(ref_) => vec![ref_.peel_to_commit().expect(&format!(
+            "reference {} does not point to a commit",
+            DEFAULT_BRANCH
+        ))],
+        Err(err) => match err.code() {
+            // no commit exists yet
+            git2::ErrorCode::NotFound => vec![],
+            _ => panic!("could not read latest commit from {}", DEFAULT_BRANCH),
+        },
+    };
+    repo.commit(
+        Some(DEFAULT_BRANCH),
+        &git2::Signature::now("Mr. Authorboy", "author@example.com").unwrap(),
+        &git2::Signature::now("Mr. Commiterboy", "committer@example.com").unwrap(),
+        "This is my first commit!\n\
+         \n\
+         I wonder if it supports extended commit descriptions?\n",
+        &to_add_tree,
+        &parent_commits.iter().collect::<Vec<_>>()[..],
+    )
+    .expect("could not commit the index we just wrote");
+}
diff --git a/users/Profpatsch/imap-idle.nix b/users/Profpatsch/imap-idle.nix
new file mode 100644
index 000000000000..84af5d0e54a9
--- /dev/null
+++ b/users/Profpatsch/imap-idle.nix
@@ -0,0 +1,17 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  imap-idle = depot.nix.writers.rustSimple
+    {
+      name = "imap-idle";
+      dependencies = [
+        depot.users.Profpatsch.arglib.netencode.rust
+        depot.third_party.rust-crates.imap
+        depot.third_party.rust-crates.epoll
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    }
+    (builtins.readFile ./imap-idle.rs);
+
+in
+imap-idle
diff --git a/users/Profpatsch/imap-idle.rs b/users/Profpatsch/imap-idle.rs
new file mode 100644
index 000000000000..937847b8798a
--- /dev/null
+++ b/users/Profpatsch/imap-idle.rs
@@ -0,0 +1,140 @@
+extern crate exec_helpers;
+// extern crate arglib_netencode;
+// extern crate netencode;
+extern crate epoll;
+extern crate imap;
+
+// use netencode::dec;
+use imap::extensions::idle::SetReadTimeout;
+use std::convert::TryFrom;
+use std::fs::File;
+use std::io::{Read, Write};
+use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::time::Duration;
+
+/// Implements an UCSPI client that wraps fd 6 & 7
+/// and implements Write and Read with a timeout.
+/// See https://cr.yp.to/proto/ucspi.txt
+#[derive(Debug)]
+struct UcspiClient {
+    read: File,
+    read_epoll_fd: RawFd,
+    read_timeout: Option<Duration>,
+    write: File,
+}
+
+impl UcspiClient {
+    /// Use fd 6 and 7 to connect to the net, as is specified.
+    /// Unsafe because fd 6 and 7 are global resources and we don’t mutex them.
+    pub unsafe fn new_from_6_and_7() -> std::io::Result<Self> {
+        unsafe {
+            let read_epoll_fd = epoll::create(false)?;
+            Ok(UcspiClient {
+                read: File::from_raw_fd(6),
+                read_epoll_fd,
+                read_timeout: None,
+                write: File::from_raw_fd(7),
+            })
+        }
+    }
+}
+
+/// Emulates set_read_timeout() like on a TCP socket with an epoll on read.
+/// The BSD socket API is rather bad, so fd != fd,
+/// and if we cast the `UcspiClient` fds to `TcpStream` instead of `File`,
+/// we’d break any UCSPI client programs that *don’t* connect to TCP.
+/// Instead we use the (linux) `epoll` API in read to wait on the timeout.
+impl SetReadTimeout for UcspiClient {
+    fn set_read_timeout(&mut self, timeout: Option<Duration>) -> imap::Result<()> {
+        self.read_timeout = timeout;
+        Ok(())
+    }
+}
+
+impl Read for UcspiClient {
+    // TODO: test the epoll code with a short timeout
+    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+        const NO_DATA: u64 = 0;
+        // in order to implement the read_timeout,
+        // we use epoll to wait for either data or time out
+        epoll::ctl(
+            self.read_epoll_fd,
+            epoll::ControlOptions::EPOLL_CTL_ADD,
+            self.read.as_raw_fd(),
+            epoll::Event::new(epoll::Events::EPOLLIN, NO_DATA),
+        )?;
+        let UNUSED = epoll::Event::new(epoll::Events::EPOLLIN, NO_DATA);
+        let wait = epoll::wait(
+            self.read_epoll_fd,
+            match self.read_timeout {
+                Some(duration) => {
+                    i32::try_from(duration.as_millis()).expect("duration too big for epoll")
+                }
+                None => -1, // infinite
+            },
+            // event that was generated; but we don’t care
+            &mut vec![UNUSED; 1][..],
+        );
+        // Delete the listen fd from the epoll fd before reacting
+        // (otherwise it fails on the next read with `EPOLL_CTL_ADD`)
+        epoll::ctl(
+            self.read_epoll_fd,
+            epoll::ControlOptions::EPOLL_CTL_DEL,
+            self.read.as_raw_fd(),
+            UNUSED,
+        )?;
+        match wait {
+            // timeout happened (0 events)
+            Ok(0) => Err(std::io::Error::new(
+                std::io::ErrorKind::TimedOut,
+                "ucspi read timeout",
+            )),
+            // its ready for reading, we can read
+            Ok(_) => self.read.read(buf),
+            // error
+            err => err,
+        }
+    }
+}
+
+/// Just proxy through the `Write` of the write fd.
+impl Write for UcspiClient {
+    fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+        self.write.write(buf)
+    }
+    fn flush(&mut self) -> std::io::Result<()> {
+        self.write.flush()
+    }
+}
+
+/// Connect to IMAP account and listen for new mails on the INBOX.
+fn main() {
+    exec_helpers::no_args("imap-idle");
+
+    // TODO: use arglib_netencode
+    let username = std::env::var("IMAP_USERNAME").expect("username");
+    let password = std::env::var("IMAP_PASSWORD").expect("password");
+
+    let net = unsafe { UcspiClient::new_from_6_and_7().expect("no ucspi client for you") };
+    let client = imap::Client::new(net);
+    let mut session = client
+        .login(username, password)
+        .map_err(|(err, _)| err)
+        .expect("unable to login");
+    eprintln!("{:#?}", session);
+    let list = session.list(None, Some("*"));
+    eprintln!("{:#?}", list);
+    let mailbox = session.examine("INBOX");
+    eprintln!("{:#?}", mailbox);
+    fn now() -> String {
+        String::from_utf8_lossy(&std::process::Command::new("date").output().unwrap().stdout)
+            .trim_right()
+            .to_string()
+    }
+    loop {
+        eprintln!("{}: idling on INBOX", now());
+        let mut handle = session.idle().expect("cannot idle on INBOX");
+        let () = handle.wait_keepalive().expect("waiting on idle failed");
+        eprintln!("{}: The mailbox has changed!", now());
+    }
+}
diff --git a/users/Profpatsch/importDhall.nix b/users/Profpatsch/importDhall.nix
new file mode 100644
index 000000000000..1947ad1ce123
--- /dev/null
+++ b/users/Profpatsch/importDhall.nix
@@ -0,0 +1,93 @@
+{ pkgs, depot, lib, ... }:
+let
+
+  # import the dhall file as nix expression via dhall-nix.
+  # Converts the normalized dhall expression to a nix file,
+  # puts it in the store and imports it.
+  # Types are erased, functions are converted to nix functions,
+  # unions values are nix functions that take a record of match
+  # functions for their alternatives.
+  # TODO: document better
+  importDhall =
+    {
+      # Root path of the dhall file tree to import (will be filtered by files)
+      root
+    , # A list of files which should be taken from `root` (relative paths).
+      # This is for minimizing the amount of things that have to be copied to the store.
+      # TODO: can you have directory prefixes?
+      files
+    , # The path of the dhall file which should be evaluated, relative to `root`, has to be in `files`
+      main
+    , # List of dependencies (TODO: what is a dependency?)
+      deps
+    , # dhall type of `main`, or `null` if anything should be possible.
+      type ? null
+    }:
+    let
+      absRoot = path: toString root + "/" + path;
+      src =
+        depot.users.Profpatsch.exactSource
+          root
+          # exactSource wants nix paths, but I think relative paths
+          # as strings are more intuitive.
+          ([ (absRoot main) ] ++ (map absRoot files));
+
+      cache = ".cache";
+      cacheDhall = "${cache}/dhall";
+
+      hadTypeAnnot = type != null;
+      typeAnnot = lib.optionalString hadTypeAnnot ": ${type}";
+
+      convert = pkgs.runCommandLocal "dhall-to-nix" { inherit deps; } ''
+        mkdir -p ${cacheDhall}
+        for dep in $deps; do
+          ${pkgs.xorg.lndir}/bin/lndir -silent $dep/${cacheDhall} ${cacheDhall}
+        done
+
+        export XDG_CACHE_HOME=$(pwd)/${cache}
+        # go into the source directory, so that the type can import files.
+        # TODO: This is a bit of a hack hrm.
+        cd "${src}"
+        printf 'Generating dhall nix code. Run
+        %s --file %s
+        to reproduce
+        ' \
+          ${pkgs.dhall}/bin/dhall \
+          ${absRoot main}
+        ${if hadTypeAnnot then ''
+            printf '%s' ${lib.escapeShellArg "${src}/${main} ${typeAnnot}"} \
+              | ${pkgs.dhall-nix}/bin/dhall-to-nix \
+              > $out
+          ''
+          else ''
+            printf 'No type annotation given, the dhall expression type was:\n'
+            ${pkgs.dhall}/bin/dhall type --file "${src}/${main}"
+            printf '%s' ${lib.escapeShellArg "${src}/${main}"} \
+              | ${pkgs.dhall-nix}/bin/dhall-to-nix \
+              > $out
+          ''}
+
+      '';
+    in
+    import convert;
+
+
+  # read dhall file in as JSON, then import as nix expression.
+  # The dhall file must not try to import from non-local URLs!
+  readDhallFileAsJson = dhallType: file:
+    let
+      convert = pkgs.runCommandLocal "dhall-to-json" { } ''
+        printf '%s' ${lib.escapeShellArg "${file} : ${dhallType}"} \
+          | ${pkgs.dhall-json}/bin/dhall-to-json \
+          > $out
+      '';
+    in
+    builtins.fromJSON (builtins.readFile convert);
+
+in
+{
+  inherit
+    importDhall
+    readDhallFileAsJson
+    ;
+}
diff --git a/users/Profpatsch/lens.nix b/users/Profpatsch/lens.nix
new file mode 100644
index 000000000000..28f7506bddae
--- /dev/null
+++ b/users/Profpatsch/lens.nix
@@ -0,0 +1,137 @@
+{ ... }:
+let
+  id = x: x;
+
+  const = x: y: x;
+
+  comp = f: g: x: f (g x);
+
+  _ = v: f: f v;
+
+  # Profunctor (p :: Type -> Type -> Type)
+  Profunctor = rec {
+    # dimap :: (a -> b) -> (c -> d) -> p b c -> p a d
+    dimap = f: g: x: lmap f (rmap g x);
+    # lmap :: (a -> b) -> p b c -> p a c
+    lmap = f: dimap f id;
+    # rmap :: (c -> d) -> p b c -> p b d
+    rmap = g: dimap id g;
+  };
+
+  # Profunctor (->)
+  profunctorFun = Profunctor // {
+    # dimap :: (a -> b) -> (c -> d) -> (b -> c) -> a -> d
+    dimap = ab: cd: bc: a: cd (bc (ab a));
+    # lmap :: (a -> b) -> (b -> c) -> (a -> c)
+    lmap = ab: bc: a: bc (ab a);
+    # rmap :: (c -> d) -> (b -> c) -> (b -> d)
+    rmap = cd: bc: b: cd (bc b);
+  };
+
+  tuple = fst: snd: {
+    inherit fst snd;
+  };
+
+  swap = { fst, snd }: {
+    fst = snd;
+    snd = fst;
+  };
+
+  # Profunctor p => Strong (p :: Type -> Type -> Type)
+  Strong = pro: pro // rec {
+    # firstP :: p a b -> p (a, c) (b, c)
+    firstP = pab: pro.dimap swap swap (pro.secondP pab);
+    # secondP :: p a b -> p (c, a) (c, b)
+    secondP = pab: pro.dimap swap swap (pro.firstP pab);
+  };
+
+  # Strong (->)
+  strongFun = Strong profunctorFun // {
+    # firstP :: (a -> b) -> (a, c) -> (b, c)
+    firstP = f: { fst, snd }: { fst = f fst; inherit snd; };
+    # secondP :: (a -> b) -> (c, a) -> (c, b)
+    secondP = f: { snd, fst }: { snd = f snd; inherit fst; };
+  };
+
+  # Iso s t a b :: forall p. Profunctor p -> p a b -> p s t
+
+  # iso :: (s -> a) -> (b -> t) -> Iso s t a b
+  iso = pro: pro.dimap;
+
+  # Lens s t a b :: forall p. Strong p -> p a b -> p s t
+
+  # lens :: (s -> a) -> (s -> b -> t) -> Lens s t a b
+  lens = strong: get: set: pab:
+    lensP
+      strong
+      (s: tuple (get s) (b: set s b))
+      pab;
+
+  # lensP :: (s -> (a, b -> t)) -> Lens s t a b
+  lensP = strong: to: pab:
+    strong.dimap
+      to
+      ({ fst, snd }: snd fst)
+      (strong.firstP pab);
+
+  # first element of a tuple
+  # _1 :: Lens (a, c) (b, c) a b
+  _1 = strong: strong.firstP;
+
+  # second element of a tuple
+  # _2 :: Lens (c, a) (c, b) a b
+  _2 = strong: strong.secondP;
+
+  # a the given field in the record
+  # field :: (f :: String) -> Lens { f :: a; ... } { f :: b; ... } a b
+  field = name: strong:
+    lens
+      strong
+      (attrs: attrs.${name})
+      (attrs: a: attrs // { ${name} = a; });
+
+  # Setter :: (->) a b -> (->) s t
+  # Setter :: (a -> b) -> (s -> t)
+
+
+  # Subclasses of profunctor for (->).
+  # We only have Strong for now, but when we implement Choice we need to add it here.
+  profunctorSubclassesFun = strongFun;
+
+  # over :: Setter s t a b -> (a -> b) -> s -> t
+  over = setter:
+    # A setter needs to be instanced to the profunctor-subclass instances of (->).
+    (setter profunctorSubclassesFun);
+
+  # set :: Setter s t a b -> b -> s -> t
+  set = setter: b: over setter (const b);
+
+  # combine a bunch of optics, for the subclass instance of profunctor you give it.
+  optic = accessors: profunctorSubclass:
+    builtins.foldl' comp id
+      (map (accessor: accessor profunctorSubclass) accessors);
+
+
+in
+{
+  inherit
+    id
+    _
+    const
+    comp
+    Profunctor
+    profunctorFun
+    Strong
+    strongFun
+    iso
+    lens
+    optic
+    _1
+    _2
+    field
+    tuple
+    swap
+    over
+    set
+    ;
+}
diff --git a/users/Profpatsch/lib.nix b/users/Profpatsch/lib.nix
new file mode 100644
index 000000000000..879d87755d56
--- /dev/null
+++ b/users/Profpatsch/lib.nix
@@ -0,0 +1,108 @@
+{ depot, pkgs, ... }:
+let
+  bins = depot.nix.getBins pkgs.coreutils [ "printf" "echo" "cat" "printenv" "tee" ]
+    // depot.nix.getBins pkgs.bash [ "bash" ]
+    // depot.nix.getBins pkgs.fdtools [ "multitee" ]
+  ;
+
+  # Print `msg` and and argv to stderr, then execute into argv
+  debugExec = msg: depot.nix.writeExecline "debug-exec" { } [
+    "if"
+    [
+      "fdmove"
+      "-c"
+      "1"
+      "2"
+      "if"
+      [ bins.printf "%s: " msg ]
+      "if"
+      [ bins.echo "$@" ]
+    ]
+    "$@"
+  ];
+
+  # Print stdin to stderr and stdout
+  eprint-stdin = depot.nix.writeExecline "eprint-stdin" { } [
+    "pipeline"
+    [ bins.multitee "0-1,2" ]
+    "$@"
+  ];
+
+  # Assume the input on stdin is netencode, pretty print it to stderr and forward it to stdout
+  eprint-stdin-netencode = depot.nix.writeExecline "eprint-stdin-netencode" { } [
+    "pipeline"
+    [
+      # move stdout to 3
+      "fdmove"
+      "3"
+      "1"
+      # the multitee copies stdin to 1 (the other pipeline end) and 3 (the stdout of the outer pipeline block)
+      "pipeline"
+      [ bins.multitee "0-1,3" ]
+      # make stderr the stdout of pretty, merging with the stderr of pretty
+      "fdmove"
+      "-c"
+      "1"
+      "2"
+      depot.users.Profpatsch.netencode.pretty
+    ]
+    "$@"
+  ];
+
+  # print the given environment variable in $1 to stderr, then execute into the rest of argv
+  eprintenv = depot.nix.writeExecline "eprintenv" { readNArgs = 1; } [
+    "ifelse"
+    [ "fdmove" "-c" "1" "2" bins.printenv "$1" ]
+    [ "$@" ]
+    "if"
+    [ depot.tools.eprintf "eprintenv: could not find \"\${1}\" in the environment\n" ]
+    "$@"
+  ];
+
+  # Split stdin into two commands, given by a block and the rest of argv
+  #
+  # Example (execline):
+  #
+  #   pipeline [ echo foo ]
+  #   split-stdin [ fdmove 1 2 foreground [ cat ] echo "bar" ] cat
+  #
+  #   stdout: foo\n
+  #   stderr: foo\nbar\n
+  split-stdin = depot.nix.writeExecline "split-stdin" { argMode = "env"; } [
+    "pipeline"
+    [
+      # this is horrible yes but the quickest way I knew how to implement it
+      "runblock"
+      "1"
+      bins.bash
+      "-c"
+      ''${bins.tee} >("$@")''
+      "bash-split-stdin"
+    ]
+    "runblock"
+    "-r"
+    "1"
+  ];
+
+  # remove everything but a few selected environment variables
+  runInEmptyEnv = keepVars:
+    let
+      importas = pkgs.lib.concatMap (var: [ "importas" "-i" var var ]) keepVars;
+      # we have to explicitely call export here, because PATH is probably empty
+      export = pkgs.lib.concatMap (var: [ "${pkgs.execline}/bin/export" var ''''${${var}}'' ]) keepVars;
+    in
+    depot.nix.writeExecline "empty-env" { }
+      (importas ++ [ "emptyenv" ] ++ export ++ [ "${pkgs.execline}/bin/exec" "$@" ]);
+
+
+in
+{
+  inherit
+    debugExec
+    eprint-stdin
+    eprint-stdin-netencode
+    eprintenv
+    split-stdin
+    runInEmptyEnv
+    ;
+}
diff --git a/users/Profpatsch/netencode/README.md b/users/Profpatsch/netencode/README.md
new file mode 100644
index 000000000000..8dc39f633761
--- /dev/null
+++ b/users/Profpatsch/netencode/README.md
@@ -0,0 +1,115 @@
+# netencode 0.1-unreleased
+
+[bencode][] and [netstring][]-inspired pipe format that should be trivial to generate correctly in every context (only requires a `byte_length()` and a `printf()`), easy to parse (100 lines of code or less), mostly human-decipherable for easy debugging, and support nested record and sum types.
+
+
+## scalars
+
+Scalars have the format `[type prefix][size]:[value],`.
+
+where size is a natural number without leading zeroes.
+
+### unit
+
+The unit (`u`) has only one value.
+
+* The unit is: `u,`
+
+### numbers
+
+Naturals (`n`) and Integers (`i`), with a maximum size in bits.
+
+Bit sizes are specified in 2^n increments, 1 to 9 (`n1`..`n9`, `i1`..`n9`).
+
+* Natural `1234` that fits in 32 bits (2^5): `n5:1234,`
+* Integer `-42` that fits in 8 bits (2^3): `i3:-42,`
+* Integer `23` that fits in 64 bits (2^6): `i6:23,`
+* Integer `-1` that fits in 512 bits (2^9): `i9:-1,`
+* Natural `0` that fits in 1 bit (2^1): `n1:0,`
+
+An implementation can define the biggest numbers it supports, and has to throw an error for anything bigger. It has to support everything smaller, so for example if you support up to i6/n6, you have to support 1–6 as well. An implementation could support up to the current architecture’s wordsize for example.
+
+Floats are not supported, you can implement fixed-size decimals or ratios using integers.
+
+### booleans
+
+A boolean is represented as `n1`.
+
+* `n1:0,`: false
+* `n1:1,`: true
+
+TODO: should we add `f,` and `t,`?
+
+### text
+
+Text (`t`) that *must* be encoded as UTF-8, starting with its length in bytes:
+
+* The string `hello world` (11 bytes): `t11:hello world,`
+* The string `今日は` (9 bytes): `t9:今日は,`
+* The string `:,` (2 bytes): `t2::,,`
+* The empty sting `` (0 bytes): `t0:,`
+
+### binary
+
+Arbitrary binary strings (`b`) that can contain any data, starting with its length in bytes.
+
+* The ASCII string `hello world` as binary data (11 bytes): `b11:hello world,`
+* The empty binary string (0 bytes): `b0:,`
+* The bytestring with `^D` (1 byte): `b1:,`
+
+Since the binary strings are length-prefixd, they can contain `\0` and no escaping is required. Care has to be taken in languages with `\0`-terminated bytestrings.
+
+Use text (`t`) if you have utf-8 encoded data.
+
+## tagged values
+
+### tags
+
+A tag (`<`) gives a value a name. The tag is UTF-8 encoded, starting with its length in bytes and proceeding with the value.
+
+* The tag `foo` (3 bytes) tagging the text `hello` (5 bytes): `<3:foo|t5:hello,`
+* The tag `` (0 bytes) tagging the 8-bit integer 0: `<0:|i3:0,`
+
+### records (products/records), also maps
+
+A record (`{`) is a concatenation of tags (`<`). It needs to be closed with `}`.
+
+If tag names repeat the *earlier* ones should be ignored.
+Using the last tag corresponds with the way most languages handle converting a list of tuples to Maps, by using a for-loop and Map.insert without checking the contents first. Otherwise you’d have to revert the list first or remember which keys you already inserted.
+
+Ordering of tags in a record does not matter.
+
+Similar to text, records start with the length of their *whole encoded content*, in bytes. This makes it possible to treat their contents as opaque bytestrings.
+
+* There is no empty record. (TODO: make the empty record the unit type, remove `u,`?)
+* A record with one empty field, `foo`: `{9:<3:foo|u,}`
+* A record with two fields, `foo` and `x`: `{21:<3:foo|u,<1:x|t3:baz,}`
+* The same record: `{21:<1:x|t3:baz,<3:foo|u,}`
+* The same record (later occurences of fields are ignored): `{28:<1:x|t3:baz,<3:foo|u,<1:x|u,}`
+
+### sums (tagged unions)
+
+Simply a tagged value. The tag marker `<` indicates it is a sum if it appears outside of a record.
+
+## lists
+
+A list (`[`) imposes an ordering on a sequence of values. It needs to be closed with `]`. Values in it are simply concatenated.
+
+Similar to records, lists start with the length of their whole encoded content.
+
+* The empty list: `[0:]`
+* The list with one element, the string `foo`: `[7:t3:foo,]`
+* The list with text `foo` followed by i3 `-42`: `[14:t3:foo,i3:-42,]`
+* The list with `Some` and `None` tags: `[33:<4:Some|t3:foo,<4None|u,<4None|u,]`
+
+## motivation
+
+TODO
+
+## guarantees
+
+TODO: do I want unique representation (bijection like bencode?) This would put more restrictions on the generator, like sorting records in lexicographic order, but would make it possible to compare without decoding
+
+
+[bencode]: https://en.wikipedia.org/wiki/Bencode
+[netstring]: https://en.wikipedia.org/wiki/Netstring
diff --git a/users/Profpatsch/netencode/default.nix b/users/Profpatsch/netencode/default.nix
new file mode 100644
index 000000000000..d38925814832
--- /dev/null
+++ b/users/Profpatsch/netencode/default.nix
@@ -0,0 +1,160 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  netencode-rs = depot.nix.writers.rustSimpleLib
+    {
+      name = "netencode";
+      dependencies = [
+        depot.third_party.rust-crates.nom
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    }
+    (builtins.readFile ./netencode.rs);
+
+  gen = import ./gen.nix { inherit lib; };
+
+  pretty-rs = depot.nix.writers.rustSimpleLib
+    {
+      name = "netencode-pretty";
+      dependencies = [
+        netencode-rs
+      ];
+    }
+    (builtins.readFile ./pretty.rs);
+
+  pretty = depot.nix.writers.rustSimple
+    {
+      name = "netencode-pretty";
+      dependencies = [
+        netencode-rs
+        pretty-rs
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    } ''
+    extern crate netencode;
+    extern crate netencode_pretty;
+    extern crate exec_helpers;
+
+    fn main() {
+      let (_, prog) = exec_helpers::args_for_exec("netencode-pretty", 0);
+      let mut buf = vec![];
+      let u = netencode::u_from_stdin_or_die_user_error("netencode-pretty", &mut buf);
+      match netencode_pretty::Pretty::from_u(u).print_multiline(&mut std::io::stdout()) {
+        Ok(()) => {},
+        Err(err) => exec_helpers::die_temporary("netencode-pretty", format!("could not write to stdout: {}", err))
+      }
+    }
+  '';
+
+  netencode-mustache = depot.nix.writers.rustSimple
+    {
+      name = "netencode_mustache";
+      dependencies = [
+        depot.users.Profpatsch.arglib.netencode.rust
+        netencode-rs
+        depot.third_party.rust-crates.mustache
+      ];
+    }
+    (builtins.readFile ./netencode-mustache.rs);
+
+
+  record-get = depot.nix.writers.rustSimple
+    {
+      name = "record-get";
+      dependencies = [
+        netencode-rs
+        depot.users.Profpatsch.execline.exec-helpers
+        depot.users.Profpatsch.arglib.netencode.rust
+      ];
+    } ''
+    extern crate netencode;
+    extern crate arglib_netencode;
+    extern crate exec_helpers;
+    use netencode::{encode, dec};
+    use netencode::dec::{Decoder, DecodeError};
+
+    fn main() {
+        let mut buf = vec![];
+        let args = exec_helpers::args("record-get", 1);
+        let field = match std::str::from_utf8(&args[0]) {
+            Ok(f) => f,
+            Err(_e) => exec_helpers::die_user_error("record-get", format!("The field name needs to be valid unicode"))
+        };
+        let u = netencode::u_from_stdin_or_die_user_error("record-get", &mut buf);
+        match (dec::RecordDot {field, inner: dec::AnyU }).dec(u) {
+            Ok(u) => encode(&mut std::io::stdout(), &u).expect("encoding to stdout failed"),
+            Err(DecodeError(err)) => exec_helpers::die_user_error("record-get", err)
+        }
+    }
+  '';
+
+  record-splice-env = depot.nix.writers.rustSimple
+    {
+      name = "record-splice-env";
+      dependencies = [
+        netencode-rs
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    } ''
+    extern crate netencode;
+    extern crate exec_helpers;
+    use netencode::dec::{Record, Try, ScalarAsBytes, Decoder, DecodeError};
+
+    fn main() {
+        let mut buf = vec![];
+        let u = netencode::u_from_stdin_or_die_user_error("record-splice-env", &mut buf);
+        let (_, prog) = exec_helpers::args_for_exec("record-splice-env", 0);
+        match Record(Try(ScalarAsBytes)).dec(u) {
+            Ok(map) => {
+                exec_helpers::exec_into_args(
+                    "record-splice-env",
+                    prog,
+                    // some elements can’t be decoded as scalars, so just ignore them
+                    map.into_iter().filter_map(|(k, v)| v.map(|v2| (k, v2)))
+                );
+            },
+            Err(DecodeError(err)) => exec_helpers::die_user_error("record-splice-env", err),
+        }
+    }
+  '';
+
+  env-splice-record = depot.nix.writers.rustSimple
+    {
+      name = "env-splice-record";
+      dependencies = [
+        netencode-rs
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    } ''
+    extern crate netencode;
+    extern crate exec_helpers;
+    use netencode::{T};
+    use std::os::unix::ffi::OsStringExt;
+
+    fn main() {
+        exec_helpers::no_args("env-splice-record");
+        let mut res = std::collections::HashMap::new();
+        for (key, val) in std::env::vars_os() {
+          match (String::from_utf8(key.into_vec()), String::from_utf8(val.into_vec())) {
+            (Ok(k), Ok(v)) => { let _ = res.insert(k, T::Text(v)); },
+            // same as in record-splice-env, we ignore non-utf8 variables
+            (_, _) => {},
+          }
+        }
+        netencode::encode(&mut std::io::stdout(), &T::Record(res).to_u()).unwrap()
+    }
+  '';
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    netencode-rs
+    pretty-rs
+    pretty
+    netencode-mustache
+    record-get
+    record-splice-env
+    env-splice-record
+    gen
+    ;
+}
diff --git a/users/Profpatsch/netencode/gen.nix b/users/Profpatsch/netencode/gen.nix
new file mode 100644
index 000000000000..efc9629ca0df
--- /dev/null
+++ b/users/Profpatsch/netencode/gen.nix
@@ -0,0 +1,73 @@
+{ lib }:
+let
+
+  netstring = tag: suffix: s:
+    "${tag}${toString (builtins.stringLength s)}:${s}${suffix}";
+
+  unit = "u,";
+
+  n1 = b: if b then "n1:1," else "n1:0,";
+
+  n = i: n: "n${toString i}:${toString n},";
+  i = i: n: "i${toString i}:${toString n},";
+
+  n3 = n 3;
+  n6 = n 6;
+  n7 = n 7;
+
+  i3 = i 3;
+  i6 = i 6;
+  i7 = i 7;
+
+  text = netstring "t" ",";
+  binary = netstring "b" ",";
+
+  tag = key: val: netstring "<" "|" key + val;
+
+  concatStrings = builtins.concatStringsSep "";
+
+  record = lokv: netstring "{" "}"
+    (concatStrings (map ({ key, val }: tag key val) lokv));
+
+  list = l: netstring "[" "]" (concatStrings l);
+
+  dwim = val:
+    let
+      match = {
+        "bool" = n1;
+        "int" = i6;
+        "string" = text;
+        "set" = attrs:
+          # it could be a derivation, then just return the path
+          if attrs.type or "" == "derivation" then text "${attrs}"
+          else
+            record (lib.mapAttrsToList
+              (k: v: {
+                key = k;
+                val = dwim v;
+              })
+              attrs);
+        "list" = l: list (map dwim l);
+      };
+    in
+    match.${builtins.typeOf val} val;
+
+in
+{
+  inherit
+    unit
+    n1
+    n3
+    n6
+    n7
+    i3
+    i6
+    i7
+    text
+    binary
+    tag
+    record
+    list
+    dwim
+    ;
+}
diff --git a/users/Profpatsch/netencode/netencode-mustache.rs b/users/Profpatsch/netencode/netencode-mustache.rs
new file mode 100644
index 000000000000..73ed5be1ded2
--- /dev/null
+++ b/users/Profpatsch/netencode/netencode-mustache.rs
@@ -0,0 +1,52 @@
+extern crate arglib_netencode;
+extern crate mustache;
+extern crate netencode;
+
+use mustache::Data;
+use netencode::T;
+use std::collections::HashMap;
+use std::io::Read;
+use std::os::unix::ffi::OsStrExt;
+
+fn netencode_to_mustache_data_dwim(t: T) -> Data {
+    match t {
+        // TODO: good idea?
+        T::Unit => Data::Null,
+        T::N1(b) => Data::Bool(b),
+        T::N3(u) => Data::String(u.to_string()),
+        T::N6(u) => Data::String(u.to_string()),
+        T::N7(u) => Data::String(u.to_string()),
+        T::I3(i) => Data::String(i.to_string()),
+        T::I6(i) => Data::String(i.to_string()),
+        T::I7(i) => Data::String(i.to_string()),
+        T::Text(s) => Data::String(s),
+        T::Binary(b) => unimplemented!(),
+        T::Sum(tag) => unimplemented!(),
+        T::Record(xs) => Data::Map(
+            xs.into_iter()
+                .map(|(key, val)| (key, netencode_to_mustache_data_dwim(val)))
+                .collect::<HashMap<_, _>>(),
+        ),
+        T::List(xs) => Data::Vec(
+            xs.into_iter()
+                .map(|x| netencode_to_mustache_data_dwim(x))
+                .collect::<Vec<_>>(),
+        ),
+    }
+}
+
+pub fn from_stdin() -> () {
+    let data = netencode_to_mustache_data_dwim(arglib_netencode::arglib_netencode(
+        "netencode-mustache",
+        Some(std::ffi::OsStr::new("TEMPLATE_DATA")),
+    ));
+    let mut stdin = String::new();
+    std::io::stdin().read_to_string(&mut stdin).unwrap();
+    mustache::compile_str(&stdin)
+        .and_then(|templ| templ.render_data(&mut std::io::stdout(), &data))
+        .unwrap()
+}
+
+pub fn main() {
+    from_stdin()
+}
diff --git a/users/Profpatsch/netencode/netencode.rs b/users/Profpatsch/netencode/netencode.rs
new file mode 100644
index 000000000000..bb08dca4aa57
--- /dev/null
+++ b/users/Profpatsch/netencode/netencode.rs
@@ -0,0 +1,891 @@
+extern crate exec_helpers;
+extern crate nom;
+
+use std::collections::HashMap;
+use std::fmt::{Debug, Display};
+use std::io::{Read, Write};
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum T {
+    // Unit
+    Unit,
+    // Boolean
+    N1(bool),
+    // Naturals
+    N3(u8),
+    N6(u64),
+    N7(u128),
+    // Integers
+    I3(i8),
+    I6(i64),
+    I7(i128),
+    // Text
+    // TODO: make into &str
+    Text(String),
+    // TODO: rename to Bytes
+    Binary(Vec<u8>),
+    // Tags
+    // TODO: make into &str
+    // TODO: rename to Tag
+    Sum(Tag<String, T>),
+    // TODO: make into &str
+    Record(HashMap<String, T>),
+    List(Vec<T>),
+}
+
+impl T {
+    pub fn to_u<'a>(&'a self) -> U<'a> {
+        match self {
+            T::Unit => U::Unit,
+            T::N1(b) => U::N1(*b),
+            T::N3(u) => U::N3(*u),
+            T::N6(u) => U::N6(*u),
+            T::N7(u) => U::N7(*u),
+            T::I3(i) => U::I3(*i),
+            T::I6(i) => U::I6(*i),
+            T::I7(i) => U::I7(*i),
+            T::Text(t) => U::Text(t.as_str()),
+            T::Binary(v) => U::Binary(v),
+            T::Sum(Tag { tag, val }) => U::Sum(Tag {
+                tag: tag.as_str(),
+                val: Box::new(val.to_u()),
+            }),
+            T::Record(map) => U::Record(map.iter().map(|(k, v)| (k.as_str(), v.to_u())).collect()),
+            T::List(l) => U::List(l.iter().map(|v| v.to_u()).collect::<Vec<U<'a>>>()),
+        }
+    }
+
+    pub fn encode<'a>(&'a self) -> Vec<u8> {
+        match self {
+            // TODO: don’t go via U, inefficient
+            o => o.to_u().encode(),
+        }
+    }
+}
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum U<'a> {
+    Unit,
+    // Boolean
+    N1(bool),
+    // Naturals
+    N3(u8),
+    N6(u64),
+    N7(u128),
+    // Integers
+    I3(i8),
+    I6(i64),
+    I7(i128),
+    // Text
+    Text(&'a str),
+    Binary(&'a [u8]),
+    // TODO: the U-recursion we do here means we can’t be breadth-lazy anymore
+    // like we originally planned; maybe we want to go `U<'a>` → `&'a [u8]` again?
+    // Tags
+    // TODO: rename to Tag
+    Sum(Tag<&'a str, U<'a>>),
+    Record(HashMap<&'a str, U<'a>>),
+    List(Vec<U<'a>>),
+}
+
+impl<'a> U<'a> {
+    pub fn encode(&self) -> Vec<u8> {
+        let mut c = std::io::Cursor::new(vec![]);
+        encode(&mut c, self);
+        c.into_inner()
+    }
+
+    pub fn to_t(&self) -> T {
+        match self {
+            U::Unit => T::Unit,
+            U::N1(b) => T::N1(*b),
+            U::N3(u) => T::N3(*u),
+            U::N6(u) => T::N6(*u),
+            U::N7(u) => T::N7(*u),
+            U::I3(i) => T::I3(*i),
+            U::I6(i) => T::I6(*i),
+            U::I7(i) => T::I7(*i),
+            U::Text(t) => T::Text((*t).to_owned()),
+            U::Binary(v) => T::Binary((*v).to_owned()),
+            U::Sum(Tag { tag, val }) => T::Sum(Tag {
+                tag: (*tag).to_owned(),
+                val: Box::new(val.to_t()),
+            }),
+            U::Record(map) => T::Record(
+                map.iter()
+                    .map(|(k, v)| ((*k).to_owned(), v.to_t()))
+                    .collect::<HashMap<String, T>>(),
+            ),
+            U::List(l) => T::List(l.iter().map(|v| v.to_t()).collect::<Vec<T>>()),
+        }
+    }
+}
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub struct Tag<S, A> {
+    // TODO: make into &str
+    pub tag: S,
+    pub val: Box<A>,
+}
+
+impl<S, A> Tag<S, A> {
+    fn map<F, B>(self, f: F) -> Tag<S, B>
+    where
+        F: Fn(A) -> B,
+    {
+        Tag {
+            tag: self.tag,
+            val: Box::new(f(*self.val)),
+        }
+    }
+}
+
+fn encode_tag<W: Write>(w: &mut W, tag: &str, val: &U) -> std::io::Result<()> {
+    write!(w, "<{}:{}|", tag.len(), tag)?;
+    encode(w, val)?;
+    Ok(())
+}
+
+pub fn encode<W: Write>(w: &mut W, u: &U) -> std::io::Result<()> {
+    match u {
+        U::Unit => write!(w, "u,"),
+        U::N1(b) => {
+            if *b {
+                write!(w, "n1:1,")
+            } else {
+                write!(w, "n1:0,")
+            }
+        }
+        U::N3(n) => write!(w, "n3:{},", n),
+        U::N6(n) => write!(w, "n6:{},", n),
+        U::N7(n) => write!(w, "n7:{},", n),
+        U::I3(i) => write!(w, "i3:{},", i),
+        U::I6(i) => write!(w, "i6:{},", i),
+        U::I7(i) => write!(w, "i7:{},", i),
+        U::Text(s) => {
+            write!(w, "t{}:", s.len());
+            w.write_all(s.as_bytes());
+            write!(w, ",")
+        }
+        U::Binary(s) => {
+            write!(w, "b{}:", s.len());
+            w.write_all(&s);
+            write!(w, ",")
+        }
+        U::Sum(Tag { tag, val }) => encode_tag(w, tag, val),
+        U::Record(m) => {
+            let mut c = std::io::Cursor::new(vec![]);
+            for (k, v) in m {
+                encode_tag(&mut c, k, v)?;
+            }
+            write!(w, "{{{}:", c.get_ref().len())?;
+            w.write_all(c.get_ref())?;
+            write!(w, "}}")
+        }
+        U::List(l) => {
+            let mut c = std::io::Cursor::new(vec![]);
+            for u in l {
+                encode(&mut c, u)?;
+            }
+            write!(w, "[{}:", c.get_ref().len())?;
+            w.write_all(c.get_ref())?;
+            write!(w, "]")
+        }
+    }
+}
+
+pub fn text(s: String) -> T {
+    T::Text(s)
+}
+
+pub fn u_from_stdin_or_die_user_error<'a>(prog_name: &'_ str, stdin_buf: &'a mut Vec<u8>) -> U<'a> {
+    std::io::stdin().lock().read_to_end(stdin_buf);
+    let u = match parse::u_u(stdin_buf) {
+        Ok((rest, u)) => match rest {
+            b"" => u,
+            _ => exec_helpers::die_user_error(
+                prog_name,
+                format!(
+                    "stdin contained some soup after netencode value: {:?}",
+                    String::from_utf8_lossy(rest)
+                ),
+            ),
+        },
+        Err(err) => exec_helpers::die_user_error(
+            prog_name,
+            format!("unable to parse netencode from stdin: {:?}", err),
+        ),
+    };
+    u
+}
+
+pub mod parse {
+    use super::{Tag, T, U};
+
+    use std::collections::HashMap;
+    use std::ops::Neg;
+    use std::str::FromStr;
+
+    use nom::branch::alt;
+    use nom::bytes::streaming::{tag, take};
+    use nom::character::streaming::{char, digit1};
+    use nom::combinator::{flat_map, map, map_parser, map_res, opt};
+    use nom::error::{context, ErrorKind, ParseError};
+    use nom::sequence::tuple;
+    use nom::IResult;
+
+    fn unit_t(s: &[u8]) -> IResult<&[u8], ()> {
+        let (s, _) = context("unit", tag("u,"))(s)?;
+        Ok((s, ()))
+    }
+
+    fn usize_t(s: &[u8]) -> IResult<&[u8], usize> {
+        context(
+            "usize",
+            map_res(map_res(digit1, |n| std::str::from_utf8(n)), |s| {
+                s.parse::<usize>()
+            }),
+        )(s)
+    }
+
+    fn sized(begin: char, end: char) -> impl Fn(&[u8]) -> IResult<&[u8], &[u8]> {
+        move |s: &[u8]| {
+            // This is the point where we check the descriminator;
+            // if the beginning char does not match, we can immediately return.
+            let (s, _) = char(begin)(s)?;
+            let (s, (len, _)) = tuple((usize_t, char(':')))(s)?;
+            let (s, (res, _)) = tuple((take(len), char(end)))(s)?;
+            Ok((s, res))
+        }
+    }
+
+    fn uint_t<'a, I: FromStr + 'a>(t: &'static str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], I> {
+        move |s: &'a [u8]| {
+            let (s, (_, _, int, _)) = tuple((
+                tag(t.as_bytes()),
+                char(':'),
+                map_res(map_res(digit1, |n: &[u8]| std::str::from_utf8(n)), |s| {
+                    s.parse::<I>()
+                }),
+                char(','),
+            ))(s)?;
+            Ok((s, int))
+        }
+    }
+
+    fn bool_t<'a>() -> impl Fn(&'a [u8]) -> IResult<&'a [u8], bool> {
+        context(
+            "bool",
+            alt((map(tag("n1:0,"), |_| false), map(tag("n1:1,"), |_| true))),
+        )
+    }
+
+    fn int_t<'a, I: FromStr + Neg<Output = I>>(
+        t: &'static str,
+    ) -> impl Fn(&'a [u8]) -> IResult<&[u8], I> {
+        context(t, move |s: &'a [u8]| {
+            let (s, (_, _, neg, int, _)) = tuple((
+                tag(t.as_bytes()),
+                char(':'),
+                opt(char('-')),
+                map_res(map_res(digit1, |n: &[u8]| std::str::from_utf8(n)), |s| {
+                    s.parse::<I>()
+                }),
+                char(','),
+            ))(s)?;
+            let res = match neg {
+                Some(_) => -int,
+                None => int,
+            };
+            Ok((s, res))
+        })
+    }
+
+    fn tag_t(s: &[u8]) -> IResult<&[u8], Tag<String, T>> {
+        // recurses into the main parser
+        map(tag_g(t_t), |Tag { tag, val }| Tag {
+            tag: tag.to_string(),
+            val,
+        })(s)
+    }
+
+    fn tag_g<'a, P, O>(inner: P) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], Tag<&'a str, O>>
+    where
+        P: Fn(&'a [u8]) -> IResult<&'a [u8], O>,
+    {
+        move |s: &[u8]| {
+            let (s, tag) = sized('<', '|')(s)?;
+            let (s, val) = inner(s)?;
+            Ok((
+                s,
+                Tag {
+                    tag: std::str::from_utf8(tag)
+                        .map_err(|_| nom::Err::Failure((s, ErrorKind::Char)))?,
+                    val: Box::new(val),
+                },
+            ))
+        }
+    }
+
+    /// parse text scalar (`t5:hello,`)
+    fn text(s: &[u8]) -> IResult<&[u8], T> {
+        let (s, res) = text_g(s)?;
+        Ok((s, T::Text(res.to_string())))
+    }
+
+    fn text_g(s: &[u8]) -> IResult<&[u8], &str> {
+        let (s, res) = sized('t', ',')(s)?;
+        Ok((
+            s,
+            std::str::from_utf8(res).map_err(|_| nom::Err::Failure((s, ErrorKind::Char)))?,
+        ))
+    }
+
+    fn binary<'a>() -> impl Fn(&'a [u8]) -> IResult<&'a [u8], T> {
+        map(binary_g(), |b| T::Binary(b.to_owned()))
+    }
+
+    fn binary_g() -> impl Fn(&[u8]) -> IResult<&[u8], &[u8]> {
+        sized('b', ',')
+    }
+
+    fn list_t(s: &[u8]) -> IResult<&[u8], Vec<T>> {
+        list_g(t_t)(s)
+    }
+
+    /// Wrap the inner parser of an `many0`/`fold_many0`, so that the parser
+    /// is not called when the `s` is empty already, preventing it from
+    /// returning `Incomplete` on streaming parsing.
+    fn inner_no_empty_string<'a, P, O>(inner: P) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], O>
+    where
+        O: Clone,
+        P: Fn(&'a [u8]) -> IResult<&'a [u8], O>,
+    {
+        move |s: &'a [u8]| {
+            if s.is_empty() {
+                // This is a bit hacky, `many0` considers the inside done
+                // when a parser returns `Err::Error`, ignoring the actual error content
+                Err(nom::Err::Error((s, nom::error::ErrorKind::Many0)))
+            } else {
+                inner(s)
+            }
+        }
+    }
+
+    fn list_g<'a, P, O>(inner: P) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], Vec<O>>
+    where
+        O: Clone,
+        P: Fn(&'a [u8]) -> IResult<&'a [u8], O>,
+    {
+        map_parser(
+            sized('[', ']'),
+            nom::multi::many0(inner_no_empty_string(inner)),
+        )
+    }
+
+    fn record_t<'a>(s: &'a [u8]) -> IResult<&'a [u8], HashMap<String, T>> {
+        let (s, r) = record_g(t_t)(s)?;
+        Ok((
+            s,
+            r.into_iter()
+                .map(|(k, v)| (k.to_string(), v))
+                .collect::<HashMap<_, _>>(),
+        ))
+    }
+
+    fn record_g<'a, P, O>(inner: P) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], HashMap<&'a str, O>>
+    where
+        O: Clone,
+        P: Fn(&'a [u8]) -> IResult<&'a [u8], O>,
+    {
+        move |s: &'a [u8]| {
+            let (s, map) = map_parser(
+                sized('{', '}'),
+                nom::multi::fold_many0(
+                    inner_no_empty_string(tag_g(&inner)),
+                    HashMap::new(),
+                    |mut acc: HashMap<_, _>, Tag { tag, mut val }| {
+                        // ignore earlier tags with the same name
+                        // according to netencode spec
+                        let _ = acc.insert(tag, *val);
+                        acc
+                    },
+                ),
+            )(s)?;
+            if map.is_empty() {
+                // records must not be empty, according to the spec
+                Err(nom::Err::Failure((s, nom::error::ErrorKind::Many1)))
+            } else {
+                Ok((s, map))
+            }
+        }
+    }
+
+    pub fn u_u(s: &[u8]) -> IResult<&[u8], U> {
+        alt((
+            map(text_g, U::Text),
+            map(binary_g(), U::Binary),
+            map(unit_t, |()| U::Unit),
+            map(tag_g(u_u), |t| U::Sum(t)),
+            map(list_g(u_u), U::List),
+            map(record_g(u_u), U::Record),
+            map(bool_t(), |u| U::N1(u)),
+            map(uint_t("n3"), |u| U::N3(u)),
+            map(uint_t("n6"), |u| U::N6(u)),
+            map(uint_t("n7"), |u| U::N7(u)),
+            map(int_t("i3"), |u| U::I3(u)),
+            map(int_t("i6"), |u| U::I6(u)),
+            map(int_t("i7"), |u| U::I7(u)),
+            // less common
+            map(uint_t("n2"), |u| U::N3(u)),
+            map(uint_t("n4"), |u| U::N6(u)),
+            map(uint_t("n5"), |u| U::N6(u)),
+            map(int_t("i1"), |u| U::I3(u)),
+            map(int_t("i2"), |u| U::I3(u)),
+            map(int_t("i4"), |u| U::I6(u)),
+            map(int_t("i5"), |u| U::I6(u)),
+            // TODO: 8, 9 not supported
+        ))(s)
+    }
+
+    pub fn t_t(s: &[u8]) -> IResult<&[u8], T> {
+        alt((
+            text,
+            binary(),
+            map(unit_t, |_| T::Unit),
+            map(tag_t, |t| T::Sum(t)),
+            map(list_t, |l| T::List(l)),
+            map(record_t, |p| T::Record(p)),
+            map(bool_t(), |u| T::N1(u)),
+            // 8, 64 and 128 bit
+            map(uint_t("n3"), |u| T::N3(u)),
+            map(uint_t("n6"), |u| T::N6(u)),
+            map(uint_t("n7"), |u| T::N7(u)),
+            map(int_t("i3"), |u| T::I3(u)),
+            map(int_t("i6"), |u| T::I6(u)),
+            map(int_t("i7"), |u| T::I7(u)),
+            // less common
+            map(uint_t("n2"), |u| T::N3(u)),
+            map(uint_t("n4"), |u| T::N6(u)),
+            map(uint_t("n5"), |u| T::N6(u)),
+            map(int_t("i1"), |u| T::I3(u)),
+            map(int_t("i2"), |u| T::I3(u)),
+            map(int_t("i4"), |u| T::I6(u)),
+            map(int_t("i5"), |u| T::I6(u)),
+            // TODO: 8, 9 not supported
+        ))(s)
+    }
+
+    #[cfg(test)]
+    mod tests {
+        use super::*;
+
+        #[test]
+        fn test_parse_unit_t() {
+            assert_eq!(unit_t("u,".as_bytes()), Ok(("".as_bytes(), ())));
+        }
+
+        #[test]
+        fn test_parse_bool_t() {
+            assert_eq!(bool_t()("n1:0,".as_bytes()), Ok(("".as_bytes(), false)));
+            assert_eq!(bool_t()("n1:1,".as_bytes()), Ok(("".as_bytes(), true)));
+        }
+
+        #[test]
+        fn test_parse_usize_t() {
+            assert_eq!(usize_t("32foo".as_bytes()), Ok(("foo".as_bytes(), 32)));
+        }
+
+        #[test]
+        fn test_parse_int_t() {
+            assert_eq!(
+                uint_t::<u8>("n3")("n3:42,abc".as_bytes()),
+                Ok(("abc".as_bytes(), 42))
+            );
+            assert_eq!(
+                uint_t::<u8>("n3")("n3:1024,abc".as_bytes()),
+                Err(nom::Err::Error((
+                    "1024,abc".as_bytes(),
+                    nom::error::ErrorKind::MapRes
+                )))
+            );
+            assert_eq!(
+                int_t::<i64>("i6")("i6:-23,abc".as_bytes()),
+                Ok(("abc".as_bytes(), -23))
+            );
+            assert_eq!(
+                int_t::<i128>("i3")("i3:0,:abc".as_bytes()),
+                Ok((":abc".as_bytes(), 0))
+            );
+            assert_eq!(
+                uint_t::<u8>("n7")("n7:09,".as_bytes()),
+                Ok(("".as_bytes(), 9))
+            );
+            // assert_eq!(
+            //     length("c"),
+            //     Err(nom::Err::Error(("c", nom::error::ErrorKind::Digit)))
+            // );
+            // assert_eq!(
+            //     length(":"),
+            //     Err(nom::Err::Error((":", nom::error::ErrorKind::Digit)))
+            // );
+        }
+
+        #[test]
+        fn test_parse_text() {
+            assert_eq!(
+                text("t5:hello,".as_bytes()),
+                Ok(("".as_bytes(), T::Text("hello".to_owned()))),
+                "{}",
+                r"t5:hello,"
+            );
+            assert_eq!(
+                text("t4:fo".as_bytes()),
+                // The content of the text should be 4 long
+                Err(nom::Err::Incomplete(nom::Needed::Size(4))),
+                "{}",
+                r"t4:fo,"
+            );
+            assert_eq!(
+                text("t9:今日は,".as_bytes()),
+                Ok(("".as_bytes(), T::Text("今日は".to_owned()))),
+                "{}",
+                r"t9:今日は,"
+            );
+        }
+
+        #[test]
+        fn test_parse_binary() {
+            assert_eq!(
+                binary()("b5:hello,".as_bytes()),
+                Ok(("".as_bytes(), T::Binary(Vec::from("hello".to_owned())))),
+                "{}",
+                r"b5:hello,"
+            );
+            assert_eq!(
+                binary()("b4:fo".as_bytes()),
+                // The content of the byte should be 4 long
+                Err(nom::Err::Incomplete(nom::Needed::Size(4))),
+                "{}",
+                r"b4:fo,"
+            );
+            assert_eq!(
+                binary()("b4:foob".as_bytes()),
+                // The content is 4 bytes now, but the finishing , is missing
+                Err(nom::Err::Incomplete(nom::Needed::Size(1))),
+                "{}",
+                r"b4:fo,"
+            );
+            assert_eq!(
+                binary()("b9:今日は,".as_bytes()),
+                Ok(("".as_bytes(), T::Binary(Vec::from("今日は".as_bytes())))),
+                "{}",
+                r"b9:今日は,"
+            );
+        }
+
+        #[test]
+        fn test_list() {
+            assert_eq!(
+                list_t("[0:]".as_bytes()),
+                Ok(("".as_bytes(), vec![])),
+                "{}",
+                r"[0:]"
+            );
+            assert_eq!(
+                list_t("[6:u,u,u,]".as_bytes()),
+                Ok(("".as_bytes(), vec![T::Unit, T::Unit, T::Unit,])),
+                "{}",
+                r"[6:u,u,u,]"
+            );
+            assert_eq!(
+                list_t("[15:u,[7:t3:foo,]u,]".as_bytes()),
+                Ok((
+                    "".as_bytes(),
+                    vec![T::Unit, T::List(vec![T::Text("foo".to_owned())]), T::Unit,]
+                )),
+                "{}",
+                r"[15:u,[7:t3:foo,]u,]"
+            );
+        }
+
+        #[test]
+        fn test_record() {
+            assert_eq!(
+                record_t("{21:<1:a|u,<1:b|u,<1:c|u,}".as_bytes()),
+                Ok((
+                    "".as_bytes(),
+                    vec![
+                        ("a".to_owned(), T::Unit),
+                        ("b".to_owned(), T::Unit),
+                        ("c".to_owned(), T::Unit),
+                    ]
+                    .into_iter()
+                    .collect::<HashMap<String, T>>()
+                )),
+                "{}",
+                r"{21:<1:a|u,<1:b|u,<1:c|u,}"
+            );
+            // duplicated keys are ignored (first is taken)
+            assert_eq!(
+                record_t("{25:<1:a|u,<1:b|u,<1:a|i1:-1,}".as_bytes()),
+                Ok((
+                    "".as_bytes(),
+                    vec![("a".to_owned(), T::I3(-1)), ("b".to_owned(), T::Unit),]
+                        .into_iter()
+                        .collect::<HashMap<_, _>>()
+                )),
+                "{}",
+                r"{25:<1:a|u,<1:b|u,<1:a|i1:-1,}"
+            );
+            // empty records are not allowed
+            assert_eq!(
+                record_t("{0:}".as_bytes()),
+                Err(nom::Err::Failure((
+                    "".as_bytes(),
+                    nom::error::ErrorKind::Many1
+                ))),
+                "{}",
+                r"{0:}"
+            );
+        }
+
+        #[test]
+        fn test_parse() {
+            assert_eq!(
+                t_t("n3:255,".as_bytes()),
+                Ok(("".as_bytes(), T::N3(255))),
+                "{}",
+                r"n3:255,"
+            );
+            assert_eq!(
+                t_t("t6:halloo,".as_bytes()),
+                Ok(("".as_bytes(), T::Text("halloo".to_owned()))),
+                "{}",
+                r"t6:halloo,"
+            );
+            assert_eq!(
+                t_t("<3:foo|t6:halloo,".as_bytes()),
+                Ok((
+                    "".as_bytes(),
+                    T::Sum(Tag {
+                        tag: "foo".to_owned(),
+                        val: Box::new(T::Text("halloo".to_owned()))
+                    })
+                )),
+                "{}",
+                r"<3:foo|t6:halloo,"
+            );
+            // { a: Unit
+            // , foo: List <A: Unit | B: List i3> }
+            assert_eq!(
+                t_t("{52:<1:a|u,<3:foo|[33:<1:A|u,<1:A|n1:1,<1:B|[7:i3:127,]]}".as_bytes()),
+                Ok((
+                    "".as_bytes(),
+                    T::Record(
+                        vec![
+                            ("a".to_owned(), T::Unit),
+                            (
+                                "foo".to_owned(),
+                                T::List(vec![
+                                    T::Sum(Tag {
+                                        tag: "A".to_owned(),
+                                        val: Box::new(T::Unit)
+                                    }),
+                                    T::Sum(Tag {
+                                        tag: "A".to_owned(),
+                                        val: Box::new(T::N1(true))
+                                    }),
+                                    T::Sum(Tag {
+                                        tag: "B".to_owned(),
+                                        val: Box::new(T::List(vec![T::I3(127)]))
+                                    }),
+                                ])
+                            )
+                        ]
+                        .into_iter()
+                        .collect::<HashMap<String, T>>()
+                    )
+                )),
+                "{}",
+                r"{52:<1:a|u,<3:foo|[33:<1:A|u,<1:A|n1:1,<1:B|[7:i3:127,]]}"
+            );
+        }
+    }
+}
+
+pub mod dec {
+    use super::*;
+    use std::collections::HashMap;
+
+    pub struct DecodeError(pub String);
+
+    pub trait Decoder<'a> {
+        type A;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError>;
+    }
+
+    /// Any netencode, as `T`.
+    #[derive(Clone, Copy)]
+    pub struct AnyT;
+    /// Any netencode, as `U`.
+    #[derive(Clone, Copy)]
+    pub struct AnyU;
+
+    impl<'a> Decoder<'a> for AnyT {
+        type A = T;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            Ok(u.to_t())
+        }
+    }
+
+    impl<'a> Decoder<'a> for AnyU {
+        type A = U<'a>;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            Ok(u)
+        }
+    }
+
+    /// A text
+    #[derive(Clone, Copy)]
+    pub struct Text;
+
+    /// A bytestring
+    // TODO: rename to Bytes
+    #[derive(Clone, Copy)]
+    pub struct Binary;
+
+    impl<'a> Decoder<'a> for Text {
+        type A = &'a str;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match u {
+                U::Text(t) => Ok(t),
+                other => Err(DecodeError(format!("Cannot decode {:?} into Text", other))),
+            }
+        }
+    }
+
+    impl<'a> Decoder<'a> for Binary {
+        type A = &'a [u8];
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match u {
+                U::Binary(b) => Ok(b),
+                other => Err(DecodeError(format!(
+                    "Cannot decode {:?} into Binary",
+                    other
+                ))),
+            }
+        }
+    }
+
+    /// Any scalar, converted to bytes.
+    #[derive(Clone, Copy)]
+    pub struct ScalarAsBytes;
+
+    impl<'a> Decoder<'a> for ScalarAsBytes {
+        type A = Vec<u8>;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match u {
+                U::N3(u) => Ok(format!("{}", u).into_bytes()),
+                U::N6(u) => Ok(format!("{}", u).into_bytes()),
+                U::N7(u) => Ok(format!("{}", u).into_bytes()),
+                U::I3(i) => Ok(format!("{}", i).into_bytes()),
+                U::I6(i) => Ok(format!("{}", i).into_bytes()),
+                U::I7(i) => Ok(format!("{}", i).into_bytes()),
+                U::Text(t) => Ok(t.as_bytes().to_owned()),
+                U::Binary(b) => Ok(b.to_owned()),
+                o => Err(DecodeError(format!("Cannot decode {:?} into scalar", o))),
+            }
+        }
+    }
+
+    /// A map of Ts (TODO: rename to map)
+    #[derive(Clone, Copy)]
+    pub struct Record<T>(pub T);
+
+    impl<'a, Inner> Decoder<'a> for Record<Inner>
+    where
+        Inner: Decoder<'a>,
+    {
+        type A = HashMap<&'a str, Inner::A>;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match u {
+                U::Record(map) => map
+                    .into_iter()
+                    .map(|(k, v)| self.0.dec(v).map(|v2| (k, v2)))
+                    .collect::<Result<Self::A, _>>(),
+                o => Err(DecodeError(format!("Cannot decode {:?} into record", o))),
+            }
+        }
+    }
+
+    /// Assume a record and project out the field with the given name and type.
+    #[derive(Clone, Copy)]
+    pub struct RecordDot<'a, T> {
+        pub field: &'a str,
+        pub inner: T,
+    }
+
+    impl<'a, Inner> Decoder<'a> for RecordDot<'_, Inner>
+    where
+        Inner: Decoder<'a> + Clone,
+    {
+        type A = Inner::A;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match Record(self.inner.clone()).dec(u) {
+                Ok(mut map) => match map.remove(self.field) {
+                    Some(inner) => Ok(inner),
+                    None => Err(DecodeError(format!(
+                        "Cannot find `{}` in record map",
+                        self.field
+                    ))),
+                },
+                Err(err) => Err(err),
+            }
+        }
+    }
+
+    /// Equals one of the listed `A`s exactly, after decoding.
+    #[derive(Clone)]
+    pub struct OneOf<T, A> {
+        pub inner: T,
+        pub list: Vec<A>,
+    }
+
+    impl<'a, Inner> Decoder<'a> for OneOf<Inner, Inner::A>
+    where
+        Inner: Decoder<'a>,
+        Inner::A: Display + Debug + PartialEq,
+    {
+        type A = Inner::A;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match self.inner.dec(u) {
+                Ok(inner) => match self.list.iter().any(|x| x.eq(&inner)) {
+                    true => Ok(inner),
+                    false => Err(DecodeError(format!(
+                        "{} is not one of {:?}",
+                        inner, self.list
+                    ))),
+                },
+                Err(err) => Err(err),
+            }
+        }
+    }
+
+    /// Try decoding as `T`.
+    #[derive(Clone)]
+    pub struct Try<T>(pub T);
+
+    impl<'a, Inner> Decoder<'a> for Try<Inner>
+    where
+        Inner: Decoder<'a>,
+    {
+        type A = Option<Inner::A>;
+        fn dec(&self, u: U<'a>) -> Result<Self::A, DecodeError> {
+            match self.0.dec(u) {
+                Ok(inner) => Ok(Some(inner)),
+                Err(err) => Ok(None),
+            }
+        }
+    }
+}
diff --git a/users/Profpatsch/netencode/pretty.rs b/users/Profpatsch/netencode/pretty.rs
new file mode 100644
index 000000000000..935c3d4a8a17
--- /dev/null
+++ b/users/Profpatsch/netencode/pretty.rs
@@ -0,0 +1,163 @@
+extern crate netencode;
+
+use netencode::{Tag, T, U};
+
+pub enum Pretty {
+    Single {
+        r#type: char,
+        length: String,
+        val: String,
+        trailer: char,
+    },
+    Tag {
+        r#type: char,
+        length: String,
+        key: String,
+        inner: char,
+        val: Box<Pretty>,
+    },
+    Multi {
+        r#type: char,
+        length: String,
+        vals: Vec<Pretty>,
+        trailer: char,
+    },
+}
+
+impl Pretty {
+    pub fn from_u<'a>(u: U<'a>) -> Pretty {
+        match u {
+            U::Unit => Self::scalar('u', "", ""),
+            U::N1(b) => Self::scalar('n', "1:", if b { "1" } else { "0" }),
+            U::N3(n) => Self::scalar('n', "3:", n),
+            U::N6(n) => Self::scalar('n', "6:", n),
+            U::N7(n) => Self::scalar('n', "7:", n),
+            U::I3(i) => Self::scalar('i', "3:", i),
+            U::I6(i) => Self::scalar('i', "6:", i),
+            U::I7(i) => Self::scalar('i', "7:", i),
+            U::Text(s) => Pretty::Single {
+                r#type: 't',
+                length: format!("{}:", s.len()),
+                val: s.to_string(),
+                trailer: ',',
+            },
+            U::Binary(s) => Pretty::Single {
+                r#type: 'b',
+                length: format!("{}:", s.len()),
+                // For pretty printing we want the string to be visible obviously.
+                // Instead of not supporting binary, let’s use lossy conversion.
+                val: String::from_utf8_lossy(s).into_owned(),
+                trailer: ',',
+            },
+            U::Sum(Tag { tag, val }) => Self::pretty_tag(tag, Self::from_u(*val)),
+            U::Record(m) => Pretty::Multi {
+                r#type: '{',
+                // TODO: we are losing the size here, should we recompute it? Keep it?
+                length: String::from(""),
+                vals: m
+                    .into_iter()
+                    .map(|(k, v)| Self::pretty_tag(k, Self::from_u(v)))
+                    .collect(),
+                trailer: '}',
+            },
+            U::List(l) => Pretty::Multi {
+                r#type: '[',
+                // TODO: we are losing the size here, should we recompute it? Keep it?
+                length: String::from(""),
+                vals: l.into_iter().map(|v| Self::from_u(v)).collect(),
+                trailer: ']',
+            },
+        }
+    }
+
+    fn scalar<D>(r#type: char, length: &str, d: D) -> Pretty
+    where
+        D: std::fmt::Display,
+    {
+        Pretty::Single {
+            r#type,
+            length: length.to_string(),
+            val: format!("{}", d),
+            trailer: ',',
+        }
+    }
+
+    fn pretty_tag(tag: &str, val: Pretty) -> Pretty {
+        Pretty::Tag {
+            r#type: '<',
+            length: format!("{}:", tag.len()),
+            key: tag.to_string(),
+            inner: '|',
+            val: Box::new(val),
+        }
+    }
+
+    pub fn print_multiline<W>(&self, mut w: &mut W) -> std::io::Result<()>
+    where
+        W: std::io::Write,
+    {
+        Self::go(&mut w, self, 0, true);
+        write!(w, "\n")
+    }
+
+    fn go<W>(mut w: &mut W, p: &Pretty, depth: usize, is_newline: bool) -> std::io::Result<()>
+    where
+        W: std::io::Write,
+    {
+        const full: usize = 4;
+        const half: usize = 2;
+        let i = &vec![b' '; depth * full];
+        let iandhalf = &vec![b' '; depth * full + half];
+        let (i, iandhalf) = unsafe {
+            (
+                std::str::from_utf8_unchecked(i),
+                std::str::from_utf8_unchecked(iandhalf),
+            )
+        };
+        if is_newline {
+            write!(&mut w, "{}", i);
+        }
+        match p {
+            Pretty::Single {
+                r#type,
+                length,
+                val,
+                trailer,
+            } => write!(&mut w, "{} {}{}", r#type, val, trailer),
+            Pretty::Tag {
+                r#type,
+                length,
+                key,
+                inner,
+                val,
+            } => {
+                write!(&mut w, "{} {} {}", r#type, key, inner)?;
+                Self::go::<W>(&mut w, val, depth, false)
+            }
+            // if the length is 0 or 1, we print on one line,
+            // only if there’s more than one element we split the resulting value.
+            // we never break lines on arbitrary column sizes, since that is just silly.
+            Pretty::Multi {
+                r#type,
+                length,
+                vals,
+                trailer,
+            } => match vals.len() {
+                0 => write!(&mut w, "{} {}", r#type, trailer),
+                1 => {
+                    write!(&mut w, "{} ", r#type);
+                    Self::go::<W>(&mut w, &vals[0], depth, false)?;
+                    write!(&mut w, "{}", trailer)
+                }
+                more => {
+                    write!(&mut w, "\n{}{} \n", iandhalf, r#type)?;
+                    for v in vals {
+                        Self::go::<W>(&mut w, v, depth + 1, true)?;
+                        write!(&mut w, "\n")?;
+                    }
+                    write!(&mut w, "{}{}", iandhalf, trailer)
+                }
+            },
+        }
+    }
+}
diff --git a/users/Profpatsch/netstring/README.md b/users/Profpatsch/netstring/README.md
new file mode 100644
index 000000000000..b8daea11d158
--- /dev/null
+++ b/users/Profpatsch/netstring/README.md
@@ -0,0 +1,18 @@
+# Netstring
+
+Netstrings are a djb invention. They are intended as a serialization format. Instead of inline control characters like `\n` or `\0` to signal the end of a string, they use a run-length encoding given as the number of bytes, encoded in ASCII, at the beginning of the string.
+
+```
+hello -> 5:hello,
+foo! -> 4:foo!,
+こんにちは -> 15:こんにちは,
+```
+
+They can be used to encode e.g. lists by simply concatenating and reading them in one-by-one.
+
+If you need a more complex encoding, you could start encoding e.g. tuples as netstrings-in-netstrings, or you could use [`netencode`](../netencode/README.md) instead, which is what-if-json-but-netstrings, and takes the idea of netstrings to their logical conclusion.
+
+Resources:
+
+Spec: http://cr.yp.to/proto/netstrings.txt
+Wiki: https://en.wikipedia.org/wiki/Netstring
diff --git a/users/Profpatsch/netstring/default.nix b/users/Profpatsch/netstring/default.nix
new file mode 100644
index 000000000000..e85cf24dd8e6
--- /dev/null
+++ b/users/Profpatsch/netstring/default.nix
@@ -0,0 +1,69 @@
+{ lib, pkgs, depot, ... }:
+let
+  toNetstring = s:
+    "${toString (builtins.stringLength s)}:${s},";
+
+  toNetstringList = xs:
+    lib.concatStrings (map toNetstring xs);
+
+  toNetstringKeyVal = attrs:
+    lib.concatStrings
+      (lib.mapAttrsToList
+        (k: v: toNetstring (toNetstring k + toNetstring v))
+        attrs);
+
+  python-netstring = depot.users.Profpatsch.writers.python3Lib
+    {
+      name = "netstring";
+    } ''
+    def read_netstring(bytes):
+        (int_length, rest) = bytes.split(sep=b':', maxsplit=1)
+        val = rest[:int(int_length)]
+        # has to end on a ,
+        assert(rest[len(val)] == ord(','))
+        return (val, rest[len(val) + 1:])
+
+    def read_netstring_key_val(bytes):
+        (keyvalnet, rest) = read_netstring(bytes)
+        (key, valnet) = read_netstring(keyvalnet)
+        (val, nothing) = read_netstring(valnet)
+        assert(nothing == b"")
+        return (key, val, rest)
+
+    def read_netstring_key_val_list(bytes):
+        rest = bytes
+        res = {}
+        while rest != b"":
+            (key, val, r) = read_netstring_key_val(rest)
+            rest = r
+            res[key] = val
+        return res
+  '';
+
+  rust-netstring = depot.nix.writers.rustSimpleLib
+    {
+      name = "netstring";
+    } ''
+    pub fn to_netstring(s: &[u8]) -> Vec<u8> {
+        let len = s.len();
+        // length of the integer as ascii
+        let i_len = ((len as f64).log10() as usize) + 1;
+        let ns_len = i_len + 1 + len + 1;
+        let mut res = Vec::with_capacity(ns_len);
+        res.extend_from_slice(format!("{}:", len).as_bytes());
+        res.extend_from_slice(s);
+        res.push(b',');
+        res
+    }
+  '';
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    toNetstring
+    toNetstringList
+    toNetstringKeyVal
+    python-netstring
+    rust-netstring
+    ;
+}
diff --git a/users/Profpatsch/netstring/tests/default.nix b/users/Profpatsch/netstring/tests/default.nix
new file mode 100644
index 000000000000..6a1062988f1e
--- /dev/null
+++ b/users/Profpatsch/netstring/tests/default.nix
@@ -0,0 +1,64 @@
+{ depot, lib, pkgs, ... }:
+
+let
+
+  python-netstring-test = depot.users.Profpatsch.writers.python3
+    {
+      name = "python-netstring-test";
+      libraries = p: [
+        depot.users.Profpatsch.netstring.python-netstring
+      ];
+    } ''
+    import netstring
+
+    def assEq(left, right):
+      assert left == right, "{} /= {}".format(str(left), str(right))
+
+    assEq(
+      netstring.read_netstring(b"""${depot.nix.netstring.fromString "hi!"}"""),
+      (b"hi!", b"")
+    )
+
+    assEq(
+      netstring.read_netstring_key_val(
+        b"""${depot.nix.netstring.attrsToKeyValList { foo = "42"; }}"""
+      ),
+      (b'foo', b'42', b"")
+    )
+
+    assEq(
+      netstring.read_netstring_key_val_list(
+        b"""${depot.nix.netstring.attrsToKeyValList { foo = "42"; bar = "hi"; }}"""
+      ),
+      { b'foo': b'42', b'bar': b'hi' }
+    )
+  '';
+
+  rust-netstring-test = depot.nix.writers.rustSimple
+    {
+      name = "rust-netstring-test";
+      dependencies = [
+        depot.users.Profpatsch.netstring.rust-netstring
+      ];
+    } ''
+    extern crate netstring;
+
+    fn main() {
+      assert_eq!(
+        std::str::from_utf8(&netstring::to_netstring(b"hello")).unwrap(),
+        r##"${depot.nix.netstring.fromString "hello"}"##
+      );
+      assert_eq!(
+        std::str::from_utf8(&netstring::to_netstring("こんにちは".as_bytes())).unwrap(),
+        r##"${depot.nix.netstring.fromString "こんにちは"}"##
+      );
+    }
+  '';
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    python-netstring-test
+    rust-netstring-test
+    ;
+}
diff --git a/users/Profpatsch/nix-home/default.nix b/users/Profpatsch/nix-home/default.nix
new file mode 100644
index 000000000000..3f0b7c9c39c5
--- /dev/null
+++ b/users/Profpatsch/nix-home/default.nix
@@ -0,0 +1,203 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  bins = depot.nix.getBins pkgs.stow [ "stow" ]
+    // depot.nix.getBins pkgs.coreutils [ "mkdir" "ln" "printenv" "rm" ]
+    // depot.nix.getBins pkgs.xe [ "xe" ]
+    // depot.nix.getBins pkgs.lr [ "lr" ]
+    // depot.nix.getBins pkgs.nix [ "nix-store" ]
+  ;
+
+  # run stow to populate the target directory with the given stow package, read from stowDir.
+  # Bear in mind that `stowDirOriginPath` should always be semantically bound to the given `stowDir`, otherwise stow might become rather confused.
+  runStow =
+    {
+      # “stow package” to stow (see manpage)
+      # TODO: allow this function to un-stow multiple packages!
+      stowPackage
+    , # “target directory” to stow in (see manpage)
+      targetDir
+    , # The “stow directory” (see manpage), containing “stow packages” (see manpage)
+      stowDir
+    , # representative directory for the stowDir in the file system, against which stow will create relative links.
+      # ATTN: this is always overwritten with the contents of `stowDir`! You shouldn’t re-use the same `stowDirOriginPath` for different `stowDir`s, otherwise there might be surprises.
+      stowDirOriginPath
+    ,
+    }: depot.nix.writeExecline "stow-${stowPackage}" { } [
+      # first, create a temporary stow directory to use as source
+      # (stow will use it to determine the origin of files)
+      "if"
+      [ bins.mkdir "-p" stowDirOriginPath ]
+      # remove old symlinks
+      "if"
+      [
+        "pipeline"
+        [
+          bins.lr
+          "-0"
+          "-t"
+          "depth == 1 && type == l"
+          stowDirOriginPath
+        ]
+        bins.xe
+        "-0"
+        bins.rm
+      ]
+      # create an indirect gc root so our config is not cleaned under our asses by a garbage collect
+      "if"
+      [
+        bins.nix-store
+        "--realise"
+        "--indirect"
+        "--add-root"
+        "${stowDirOriginPath}/.nix-stowdir-gc-root"
+        stowDir
+      ]
+      # populate with new stow targets
+      "if"
+      [
+        "elglob"
+        "-w0"
+        "stowPackages"
+        "${stowDir}/*"
+        bins.ln
+        "--force"
+        "-st"
+        stowDirOriginPath
+        "$stowPackages"
+      ]
+      # stow always looks for $HOME/.stowrc to read more arguments
+      "export"
+      "HOME"
+      "/homeless-shelter"
+      bins.stow
+      # always run restow for now; this does more stat but will remove stale links
+      "--restow"
+      "--dir"
+      stowDirOriginPath
+      "--target"
+      targetDir
+      stowPackage
+    ];
+
+  # create a stow dir from a list of drv paths and a stow package name.
+  makeStowDir =
+    (with depot.nix.yants;
+    defun
+      [
+        (list (struct {
+          originalDir = drv;
+          stowPackage = string;
+        }))
+        drv
+      ])
+      (dirs:
+        depot.nix.runExecline "make-stow-dir"
+          {
+            stdin = lib.pipe dirs [
+              (map depot.users.Profpatsch.netencode.gen.dwim)
+              depot.users.Profpatsch.netstring.toNetstringList
+            ];
+          } [
+          "importas"
+          "out"
+          "out"
+          "if"
+          [ bins.mkdir "-p" "$out" ]
+          "forstdin"
+          "-d"
+          ""
+          "-o"
+          "0"
+          "line"
+          "pipeline"
+          [
+            depot.users.Profpatsch.execline.print-one-env
+            "line"
+          ]
+          depot.users.Profpatsch.netencode.record-splice-env
+          "importas"
+          "-ui"
+          "originalDir"
+          "originalDir"
+          "importas"
+          "-ui"
+          "stowPackage"
+          "stowPackage"
+          bins.ln
+          "-sT"
+          "$originalDir"
+          "\${out}/\${stowPackage}"
+        ]);
+
+  # this is a dumb way of generating a pure list of packages from a depot namespace.
+  readTreeNamespaceDrvs = namespace:
+    lib.pipe namespace [
+      (lib.filterAttrs (_: v: lib.isDerivation v))
+      (lib.mapAttrsToList (k: v: {
+        name = k;
+        drv = v;
+      }))
+    ];
+
+  scriptsStow =
+    lib.pipe { } [
+      (_: makeStowDir [{
+        stowPackage = "scripts";
+        originalDir = pkgs.linkFarm "scripts-farm"
+          ([
+            {
+              name = "scripts/ytextr";
+              path = depot.users.Profpatsch.ytextr;
+            }
+          ]
+          ++
+          (lib.pipe depot.users.Profpatsch.aliases [
+            readTreeNamespaceDrvs
+            (map ({ name, drv }: {
+              name = "scripts/${name}";
+              path = drv;
+            }))
+          ]));
+      }])
+      (d: runStow {
+        stowDir = d;
+        stowPackage = "scripts";
+        targetDir = "/home/philip";
+        stowDirOriginPath = "/home/philip/.local/share/nix-home/stow-origin";
+      })
+    ];
+
+
+
+  terminalEmulatorStow =
+    lib.pipe { } [
+      (_: makeStowDir [{
+        stowPackage = "terminal-emulator";
+        originalDir = pkgs.linkFarm "terminal-emulator-farm"
+          ([
+            {
+              name = "bin/terminal-emulator";
+              path = depot.users.Profpatsch.alacritty;
+            }
+          ]);
+
+      }])
+      (d: runStow {
+        stowDir = d;
+        stowPackage = "terminal-emulator";
+        targetDir = "/home/philip";
+        # TODO: this should only be done once, in a single runStow instead of multiple
+        stowDirOriginPath = "/home/philip/.local/share/nix-home/stow-origin-terminal-emulator";
+      })
+    ];
+
+in
+
+# TODO: run multiple stows with runStow?
+  # TODO: temp setup
+depot.nix.writeExecline "nix-home" { } [
+  "if"
+  [ scriptsStow ]
+  terminalEmulatorStow
+]
diff --git a/users/Profpatsch/nixpkgs-rewriter/MetaStdenvLib.hs b/users/Profpatsch/nixpkgs-rewriter/MetaStdenvLib.hs
new file mode 100644
index 000000000000..3ed96a7b6eac
--- /dev/null
+++ b/users/Profpatsch/nixpkgs-rewriter/MetaStdenvLib.hs
@@ -0,0 +1,80 @@
+{-# LANGUAGE PartialTypeSignatures #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE NamedFieldPuns #-}
+import Nix.Parser
+import Nix.Expr.Types
+import Nix.Expr.Types.Annotated
+import System.Environment (getArgs)
+import System.Exit (die)
+import Data.Fix (Fix(..))
+import qualified Data.Text as Text
+import qualified Data.ByteString.Lazy.Char8 as BL
+import qualified Data.Aeson as A
+import qualified Data.Aeson.Encoding as A
+import Data.Function ((&))
+import qualified System.IO as IO
+import qualified Text.Megaparsec.Pos as MP
+
+main = do
+  (nixFile:_) <- getArgs
+  (parseNixFileLoc nixFile :: IO _) >>= \case
+    Failure err -> do
+      ePutStrLn $ show err
+      die "oh no"
+    Success expr -> do
+      case snd $ match expr of
+        NoArguments -> do
+          ePutStrLn $ "NoArguments in " <> nixFile
+          printPairs mempty
+        YesLib vars -> do
+          ePutStrLn $ "lib in " <> show vars <> " in " <> nixFile
+          printPairs mempty
+        NoLib vars srcSpan -> do
+          ePutStrLn $ nixFile <> " needs lib added"
+          printPairs
+            $ "fileName" A..= nixFile
+            <> "fromLine" A..= (srcSpan & spanBegin & sourceLine)
+            <> "fromColumn" A..= (srcSpan & spanBegin & sourceColumn)
+            <> "toLine" A..= (srcSpan & spanEnd & sourceLine)
+            <> "toColumn" A..= (srcSpan & spanEnd & sourceColumn)
+
+printPairs pairs = BL.putStrLn $ A.encodingToLazyByteString $ A.pairs pairs
+
+ePutStrLn = IO.hPutStrLn IO.stderr
+
+data Descend = YesDesc | NoDesc
+  deriving Show
+data Matched =  NoArguments | NoLib [VarName] SrcSpan | YesLib [VarName]
+  deriving Show
+
+match :: Fix (Compose (Ann SrcSpan) NExprF) -> (Descend, Matched)
+match = \case
+  (AnnE outerSpan (NAbs (ParamSet params _ _) (AnnE innerSpan _))) -> (NoDesc,
+    let vars = map fst params in
+    case (any (== "lib") vars) of
+      True -> YesLib vars
+      False ->
+          -- The span of the arglist is from the beginning of the match
+          -- to the beginning of the inner expression
+          let varSpan = SrcSpan
+                { spanBegin = outerSpan & spanBegin
+                -- -1 to prevent the spans from overlapping
+                , spanEnd = sourcePosMinus1 (innerSpan & spanBegin) }
+          in NoLib vars varSpan)
+  _ -> (NoDesc, NoArguments)
+
+-- | Remove one from a source positon.
+--
+-- That means if the current position is at the very beginning of a line,
+-- jump to the previous line.
+sourcePosMinus1 :: SourcePos -> SourcePos
+sourcePosMinus1 src@(SourcePos { sourceLine, sourceColumn }) =
+  let
+    col = MP.mkPos $ max (MP.unPos sourceColumn - 1) 1
+    line = MP.mkPos $ case MP.unPos sourceColumn of
+      1 -> max (MP.unPos sourceLine - 1) 1
+      _ -> MP.unPos sourceLine
+  in src
+    { sourceLine = line
+    , sourceColumn = col }
diff --git a/users/Profpatsch/nixpkgs-rewriter/default.nix b/users/Profpatsch/nixpkgs-rewriter/default.nix
new file mode 100644
index 000000000000..0740a870aa4a
--- /dev/null
+++ b/users/Profpatsch/nixpkgs-rewriter/default.nix
@@ -0,0 +1,148 @@
+{ depot, pkgs, ... }:
+let
+  inherit (depot.nix)
+    writeExecline
+    ;
+  inherit (depot.users.Profpatsch.lib)
+    debugExec
+    ;
+
+  bins = depot.nix.getBins pkgs.coreutils [ "head" "shuf" ]
+    // depot.nix.getBins pkgs.jq [ "jq" ]
+    // depot.nix.getBins pkgs.findutils [ "xargs" ]
+    // depot.nix.getBins pkgs.gnused [ "sed" ]
+  ;
+
+  export-json-object = pkgs.writers.writePython3 "export-json-object" { } ''
+    import json
+    import sys
+    import os
+
+    d = json.load(sys.stdin)
+
+    if d == {}:
+        sys.exit(0)
+
+    for k, v in d.items():
+        os.environ[k] = str(v)
+
+    os.execvp(sys.argv[1], sys.argv[1:])
+  '';
+
+  meta-stdenv-lib = pkgs.writers.writeHaskell "meta-stdenv-lib"
+    {
+      libraries = [
+        pkgs.haskellPackages.hnix
+        pkgs.haskellPackages.aeson
+      ];
+    } ./MetaStdenvLib.hs;
+
+  replace-between-lines = writeExecline "replace-between-lines" { readNArgs = 1; } [
+    "importas"
+    "-ui"
+    "file"
+    "fileName"
+    "importas"
+    "-ui"
+    "from"
+    "fromLine"
+    "importas"
+    "-ui"
+    "to"
+    "toLine"
+    "if"
+    [ depot.tools.eprintf "%s-%s\n" "$from" "$to" ]
+    (debugExec "adding lib")
+    bins.sed
+    "-e"
+    "\${from},\${to} \${1}"
+    "-i"
+    "$file"
+  ];
+
+  add-lib-if-necessary = writeExecline "add-lib-if-necessary" { readNArgs = 1; } [
+    "pipeline"
+    [ meta-stdenv-lib "$1" ]
+    export-json-object
+    # first replace any stdenv.lib mentions in the arg header
+    # if this is not done, the replace below kills these.
+    # Since we want it anyway ultimately, let’s do it here.
+    "if"
+    [ replace-between-lines "s/stdenv\.lib/lib/" ]
+    # then add the lib argument
+    # (has to be before stdenv, otherwise default arguments might be in the way)
+    replace-between-lines
+    "s/stdenv/lib, stdenv/"
+  ];
+
+  metaString = ''meta = with stdenv.lib; {'';
+
+  replace-stdenv-lib = pkgs.writers.writeBash "replace-stdenv-lib" ''
+    set -euo pipefail
+    sourceDir="$1"
+    for file in $(
+      ${pkgs.ripgrep}/bin/rg \
+        --files-with-matches \
+        --fixed-strings \
+        -e '${metaString}' \
+        "$sourceDir"
+    )
+    do
+      echo "replacing stdenv.lib meta in $file" >&2
+      ${bins.sed} -e '/${metaString}/ s/stdenv.lib/lib/' \
+          -i "$file"
+      ${add-lib-if-necessary} "$file"
+    done
+  '';
+
+  instantiate-nixpkgs-randomly = writeExecline "instantiate-nixpkgs-randomly" { readNArgs = 1; } [
+    "export"
+    "NIXPKGS_ALLOW_BROKEN"
+    "1"
+    "export"
+    "NIXPKGS_ALLOW_UNFREE"
+    "1"
+    "export"
+    "NIXPKGS_ALLOW_INSECURE"
+    "1"
+    "export"
+    "NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM"
+    "1"
+    "pipeline"
+    [
+      "nix"
+      "eval"
+      "--raw"
+      ''(
+          let pkgs = import ''${1} {};
+          in builtins.toJSON (builtins.attrNames pkgs)
+        )''
+    ]
+    "pipeline"
+    [ bins.jq "-r" ".[]" ]
+    "pipeline"
+    [ bins.shuf ]
+    "pipeline"
+    [ bins.head "-n" "1000" ]
+    bins.xargs
+    "-I"
+    "{}"
+    "-n1"
+    "if"
+    [ depot.tools.eprintf "instantiating %s\n" "{}" ]
+    "nix-instantiate"
+    "$1"
+    "-A"
+    "{}"
+  ];
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    instantiate-nixpkgs-randomly
+    # requires hnix, which we don’t want in tvl for now
+    # uncomment manually if you want to use it.
+    #   meta-stdenv-lib
+    #   replace-stdenv-lib
+    ;
+}
diff --git a/users/Profpatsch/read-http.nix b/users/Profpatsch/read-http.nix
new file mode 100644
index 000000000000..d9ad6fc30d94
--- /dev/null
+++ b/users/Profpatsch/read-http.nix
@@ -0,0 +1,19 @@
+{ depot, pkgs, ... }:
+
+let
+
+  read-http = depot.nix.writers.rustSimple
+    {
+      name = "read-http";
+      dependencies = [
+        depot.third_party.rust-crates.ascii
+        depot.third_party.rust-crates.httparse
+        depot.users.Profpatsch.netencode.netencode-rs
+        depot.users.Profpatsch.arglib.netencode.rust
+        depot.users.Profpatsch.execline.exec-helpers
+      ];
+    }
+    (builtins.readFile ./read-http.rs);
+
+in
+read-http
diff --git a/users/Profpatsch/read-http.rs b/users/Profpatsch/read-http.rs
new file mode 100644
index 000000000000..efaded87e6cd
--- /dev/null
+++ b/users/Profpatsch/read-http.rs
@@ -0,0 +1,249 @@
+extern crate arglib_netencode;
+extern crate ascii;
+extern crate exec_helpers;
+extern crate httparse;
+extern crate netencode;
+
+use exec_helpers::{die_expected_error, die_temporary, die_user_error};
+use std::collections::HashMap;
+use std::io::{Read, Write};
+use std::os::unix::io::FromRawFd;
+
+use netencode::dec::Decoder;
+use netencode::{dec, T, U};
+
+enum What {
+    Request,
+    Response,
+}
+
+// reads a http request (stdin), and writes all headers to stdout, as netencoded record.
+// The keys are text, but can be lists of text iff headers appear multiple times, so beware.
+fn main() -> std::io::Result<()> {
+    exec_helpers::no_args("read-http");
+
+    let args = dec::RecordDot {
+        field: "what",
+        inner: dec::OneOf {
+            list: vec!["request", "response"],
+            inner: dec::Text,
+        },
+    };
+    let what: What = match args.dec(arglib_netencode::arglib_netencode("read-http", None).to_u()) {
+        Ok("request") => What::Request,
+        Ok("response") => What::Response,
+        Ok(v) => panic!("shouldn’t happen!, value was: {}", v),
+        Err(dec::DecodeError(err)) => die_user_error("read-http", err),
+    };
+
+    fn read_stdin_to_complete<F>(mut parse: F) -> ()
+    where
+        F: FnMut(&[u8]) -> httparse::Result<usize>,
+    {
+        let mut res = httparse::Status::Partial;
+        loop {
+            if let httparse::Status::Complete(_) = res {
+                return;
+            }
+            let mut buf = [0; 2048];
+            match std::io::stdin().read(&mut buf[..]) {
+                Ok(size) => {
+                    if size == 0 {
+                        break;
+                    }
+                }
+                Err(err) => {
+                    die_temporary("read-http", format!("could not read from stdin, {:?}", err))
+                }
+            }
+            match parse(&buf) {
+                Ok(status) => {
+                    res = status;
+                }
+                Err(err) => {
+                    die_temporary("read-http", format!("httparse parsing failed: {:#?}", err))
+                }
+            }
+        }
+    }
+
+    fn normalize_headers<'a>(headers: &'a [httparse::Header]) -> HashMap<String, U<'a>> {
+        let mut res = HashMap::new();
+        for httparse::Header { name, value } in headers {
+            let val = ascii::AsciiStr::from_ascii(*value)
+                .expect(&format!(
+                    "read-http: we require header values to be ASCII, but the header {} was {:?}",
+                    name, value
+                ))
+                .as_str();
+            // lowercase the header names, since the standard doesn’t care
+            // and we want unique strings to match against
+            let name_lower = name.to_lowercase();
+            match res.insert(name_lower, U::Text(val)) {
+                None => (),
+                Some(U::Text(t)) => {
+                    let name_lower = name.to_lowercase();
+                    let _ = res.insert(name_lower, U::List(vec![U::Text(t), U::Text(val)]));
+                    ()
+                }
+                Some(U::List(mut l)) => {
+                    let name_lower = name.to_lowercase();
+                    l.push(U::Text(val));
+                    let _ = res.insert(name_lower, U::List(l));
+                    ()
+                }
+                Some(o) => panic!("read-http: header not text nor list: {:?}", o),
+            }
+        }
+        res
+    }
+
+    // tries to read until the end of the http header (deliniated by two newlines "\r\n\r\n")
+    fn read_till_end_of_header<R: Read>(buf: &mut Vec<u8>, reader: R) -> Option<()> {
+        let mut chonker = Chunkyboi::new(reader, 4096);
+        loop {
+            // TODO: attacker can send looooong input, set upper maximum
+            match chonker.next() {
+                Some(Ok(chunk)) => {
+                    buf.extend_from_slice(&chunk);
+                    if chunk.windows(4).any(|c| c == b"\r\n\r\n") {
+                        return Some(());
+                    }
+                }
+                Some(Err(err)) => {
+                    die_temporary("read-http", format!("error reading from stdin: {:?}", err))
+                }
+                None => return None,
+            }
+        }
+    }
+
+    // max header size chosen arbitrarily
+    let mut headers = [httparse::EMPTY_HEADER; 128];
+    let stdin = std::io::stdin();
+
+    match what {
+        Request => {
+            let mut req = httparse::Request::new(&mut headers);
+            let mut buf: Vec<u8> = vec![];
+            match read_till_end_of_header(&mut buf, stdin.lock()) {
+                Some(()) => match req.parse(&buf) {
+                    Ok(httparse::Status::Complete(_body_start)) => {}
+                    Ok(httparse::Status::Partial) => {
+                        die_expected_error("read-http", "httparse should have gotten a full header")
+                    }
+                    Err(err) => die_expected_error(
+                        "read-http",
+                        format!("httparse response parsing failed: {:#?}", err),
+                    ),
+                },
+                None => die_expected_error(
+                    "read-http",
+                    format!("httparse end of stdin reached before able to parse request headers"),
+                ),
+            }
+            let method = req.method.expect("method must be filled on complete parse");
+            let path = req.path.expect("path must be filled on complete parse");
+            write_dict_req(method, path, &normalize_headers(req.headers))
+        }
+        Response => {
+            let mut resp = httparse::Response::new(&mut headers);
+            let mut buf: Vec<u8> = vec![];
+            match read_till_end_of_header(&mut buf, stdin.lock()) {
+                Some(()) => match resp.parse(&buf) {
+                    Ok(httparse::Status::Complete(_body_start)) => {}
+                    Ok(httparse::Status::Partial) => {
+                        die_expected_error("read-http", "httparse should have gotten a full header")
+                    }
+                    Err(err) => die_expected_error(
+                        "read-http",
+                        format!("httparse response parsing failed: {:#?}", err),
+                    ),
+                },
+                None => die_expected_error(
+                    "read-http",
+                    format!("httparse end of stdin reached before able to parse response headers"),
+                ),
+            }
+            let code = resp.code.expect("code must be filled on complete parse");
+            let reason = resp
+                .reason
+                .expect("reason must be filled on complete parse");
+            write_dict_resp(code, reason, &normalize_headers(resp.headers))
+        }
+    }
+}
+
+fn write_dict_req<'a, 'buf>(
+    method: &'buf str,
+    path: &'buf str,
+    headers: &'a HashMap<String, U<'a>>,
+) -> std::io::Result<()> {
+    let mut http = vec![("method", U::Text(method)), ("path", U::Text(path))]
+        .into_iter()
+        .collect();
+    write_dict(http, headers)
+}
+
+fn write_dict_resp<'a, 'buf>(
+    code: u16,
+    reason: &'buf str,
+    headers: &'a HashMap<String, U<'a>>,
+) -> std::io::Result<()> {
+    let mut http = vec![
+        ("status", U::N6(code as u64)),
+        ("status-text", U::Text(reason)),
+    ]
+    .into_iter()
+    .collect();
+    write_dict(http, headers)
+}
+
+fn write_dict<'buf, 'a>(
+    mut http: HashMap<&str, U<'a>>,
+    headers: &'a HashMap<String, U<'a>>,
+) -> std::io::Result<()> {
+    match http.insert(
+        "headers",
+        U::Record(
+            headers
+                .iter()
+                .map(|(k, v)| (k.as_str(), v.clone()))
+                .collect(),
+        ),
+    ) {
+        None => (),
+        Some(_) => panic!("read-http: headers already in dict"),
+    };
+    netencode::encode(&mut std::io::stdout(), &U::Record(http))?;
+    Ok(())
+}
+
+// iter helper
+
+struct Chunkyboi<T> {
+    inner: T,
+    buf: Vec<u8>,
+}
+
+impl<R: Read> Chunkyboi<R> {
+    fn new(inner: R, chunksize: usize) -> Self {
+        let buf = vec![0; chunksize];
+        Chunkyboi { inner, buf }
+    }
+}
+
+impl<R: Read> Iterator for Chunkyboi<R> {
+    type Item = std::io::Result<Vec<u8>>;
+
+    fn next(&mut self) -> Option<std::io::Result<Vec<u8>>> {
+        match self.inner.read(&mut self.buf) {
+            Ok(0) => None,
+            Ok(read) => {
+                // clone a new buffer so we can reuse the internal one
+                Some(Ok(self.buf[..read].to_owned()))
+            }
+            Err(err) => Some(Err(err)),
+        }
+    }
+}
diff --git a/users/Profpatsch/reverse-haskell-deps.hs b/users/Profpatsch/reverse-haskell-deps.hs
new file mode 100644
index 000000000000..6b644df9ecc6
--- /dev/null
+++ b/users/Profpatsch/reverse-haskell-deps.hs
@@ -0,0 +1,72 @@
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE MultiWayIf #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+import qualified Text.HTML.TagSoup as Tag
+import qualified Data.Text as Text
+import Data.Text (Text)
+import qualified Data.List as List
+import Data.Maybe
+import Text.Nicify
+import qualified Text.Read as Read
+import Numeric.Natural
+import Data.Either
+import qualified Data.ByteString as ByteString
+import qualified Data.Text.Encoding
+
+parseNat :: Text.Text -> Maybe Natural
+parseNat = Read.readMaybe . Text.unpack
+
+printNice :: Show a => a -> IO ()
+printNice = putStrLn . nicify . show
+
+type Tag = Tag.Tag Text.Text
+
+main = do
+  reverseHtml <- readStdinUtf8
+  printNice $ List.sortOn snd $ packagesAndReverseDeps reverseHtml
+
+  where
+    readStdinUtf8 = Data.Text.Encoding.decodeUtf8 <$> ByteString.getContents
+
+-- | reads the table provided by https://packdeps.haskellers.com/reverse
+-- figuring out all sections (starting with the link to the package name),
+-- then figuring out the name of the package and the first column,
+-- which is the number of reverse dependencies of the package
+packagesAndReverseDeps reverseHtml = do
+  let tags = Tag.parseTags reverseHtml
+  let sections =  Tag.partitions (isJust . reverseLink) tags
+  let sectionNames = map (fromJust . reverseLink . head) sections
+  mapMaybe
+    (\(name :: Text.Text, sect) -> do
+        reverseDeps <- firstNaturalNumber sect
+        pure (sectionPackageName name sect, reverseDeps) :: Maybe (Text.Text, Natural))
+    $ zip sectionNames sections
+
+
+  where
+    reverseLink = \case
+      Tag.TagOpen "a" attrs -> mapFind attrReverseLink attrs
+      _ -> Nothing
+
+    attrReverseLink = \case
+      ("href", lnk) -> if
+          | "packdeps.haskellers.com/reverse/" `Text.isInfixOf` lnk -> Just lnk
+          | otherwise -> Nothing
+      _ -> Nothing
+
+    sectionPackageName :: Text -> [Tag] -> Text
+    sectionPackageName sectionName = \case
+      (_: Tag.TagText name : _) -> name
+      (_: el : _) -> sectionName
+      xs -> sectionName
+
+
+    firstNaturalNumber :: [Tag] -> Maybe Natural
+    firstNaturalNumber =
+      mapFind (\case
+        Tag.TagText t -> parseNat t
+        _ -> Nothing)
+
+    mapFind :: (a -> Maybe b) -> [a] -> Maybe b
+    mapFind f xs = fromJust . f <$> List.find (isJust . f) xs
diff --git a/users/Profpatsch/reverse-haskell-deps.nix b/users/Profpatsch/reverse-haskell-deps.nix
new file mode 100644
index 000000000000..6df7bc6329cd
--- /dev/null
+++ b/users/Profpatsch/reverse-haskell-deps.nix
@@ -0,0 +1,31 @@
+{ depot, pkgs, ... }:
+
+# Parses https://packdeps.haskellers.com/reverse
+# and outputs the amount of reverse dependencies of each hackage package.
+
+let
+
+  rev = depot.nix.writeExecline "reverse-haskell-deps" { } [
+    "pipeline"
+    [
+      "${pkgs.curl}/bin/curl"
+      "-L"
+      "https://packdeps.haskellers.com/reverse"
+    ]
+    rev-hs
+
+  ];
+
+  rev-hs = pkgs.writers.writeHaskell "revers-haskell-deps-hs"
+    {
+      libraries = [
+        pkgs.haskellPackages.nicify-lib
+        pkgs.haskellPackages.tagsoup
+      ];
+
+    }
+    ./reverse-haskell-deps.hs;
+
+
+in
+rev
diff --git a/users/Profpatsch/solarized.dhall b/users/Profpatsch/solarized.dhall
new file mode 100644
index 000000000000..01e14d64f4ba
--- /dev/null
+++ b/users/Profpatsch/solarized.dhall
@@ -0,0 +1,39 @@
+-- SOLARIZED HEX     16/8 TERMCOL  XTERM/HEX   L*A*B      RGB         HSB
+-- --------- ------- ---- -------  ----------- ---------- ----------- -----------
+-- base03    #002b36  8/4 brblack  234 #1c1c1c 15 -12 -12   0  43  54 193 100  21
+-- base02    #073642  0/4 black    235 #262626 20 -12 -12   7  54  66 192  90  26
+-- base01    #586e75 10/7 brgreen  240 #585858 45 -07 -07  88 110 117 194  25  46
+-- base00    #657b83 11/7 bryellow 241 #626262 50 -07 -07 101 123 131 195  23  51
+-- base0     #839496 12/6 brblue   244 #808080 60 -06 -03 131 148 150 186  13  59
+-- base1     #93a1a1 14/4 brcyan   245 #8a8a8a 65 -05 -02 147 161 161 180   9  63
+-- base2     #eee8d5  7/7 white    254 #e4e4e4 92 -00  10 238 232 213  44  11  93
+-- base3     #fdf6e3 15/7 brwhite  230 #ffffd7 97  00  10 253 246 227  44  10  99
+-- yellow    #b58900  3/3 yellow   136 #af8700 60  10  65 181 137   0  45 100  71
+-- orange    #cb4b16  9/3 brred    166 #d75f00 50  50  55 203  75  22  18  89  80
+-- red       #dc322f  1/1 red      160 #d70000 50  65  45 220  50  47   1  79  86
+-- magenta   #d33682  5/5 magenta  125 #af005f 50  65 -05 211  54 130 331  74  83
+-- violet    #6c71c4 13/5 brmagenta 61 #5f5faf 50  15 -45 108 113 196 237  45  77
+-- blue      #268bd2  4/4 blue      33 #0087ff 55 -10 -45  38 139 210 205  82  82
+-- cyan      #2aa198  6/6 cyan      37 #00afaf 60 -35 -05  42 161 152 175  74  63
+-- green     #859900  2/2 green     64 #5f8700 60 -20  65 133 153   0  68 100  60
+{
+  hex  =
+    {
+      base03 = "#002b36",
+      base02 = "#073642",
+      base01 = "#586e75",
+      base00 = "#657b83",
+      base0 = "#839496",
+      base1 = "#93a1a1",
+      base2 = "#eee8d5",
+      base3 = "#fdf6e3",
+      yellow = "#b58900",
+      orange    = "#cb4b16",
+      red       = "#dc322f",
+      magenta   = "#d33682",
+      violet    = "#6c71c4",
+      blue      = "#268bd2",
+      cyan      = "#2aa198",
+      green     = "#859900",
+    }
+}
diff --git a/users/Profpatsch/struct-edit/default.nix b/users/Profpatsch/struct-edit/default.nix
new file mode 100644
index 000000000000..11a7200ce427
--- /dev/null
+++ b/users/Profpatsch/struct-edit/default.nix
@@ -0,0 +1,13 @@
+{ depot, ... }:
+depot.nix.buildGo.program {
+  name = "struct-edit";
+  srcs = [
+    ./main.go
+  ];
+  deps = [
+    depot.third_party.gopkgs."github.com".charmbracelet.bubbletea
+    depot.third_party.gopkgs."github.com".charmbracelet.lipgloss
+    depot.third_party.gopkgs."github.com".muesli.termenv
+    depot.third_party.gopkgs."github.com".mattn.go-isatty
+  ];
+}
diff --git a/users/Profpatsch/struct-edit/main.go b/users/Profpatsch/struct-edit/main.go
new file mode 100644
index 000000000000..c1a701338534
--- /dev/null
+++ b/users/Profpatsch/struct-edit/main.go
@@ -0,0 +1,431 @@
+package main
+
+import (
+	json "encoding/json"
+	"fmt"
+	"log"
+	"os"
+	"sort"
+	"strings"
+
+	tea "github.com/charmbracelet/bubbletea"
+	lipgloss "github.com/charmbracelet/lipgloss"
+	// termenv "github.com/muesli/termenv"
+	// isatty "github.com/mattn/go-isatty"
+)
+
+// Keeps the full data structure and a path that indexes our current position into it.
+type model struct {
+	path []index
+	data val
+}
+
+// an index into a value, uint for lists and string for maps.
+// nil for any scalar value.
+// TODO: use an actual interface for these
+type index interface{}
+
+/// recursive value that we can represent.
+type val struct {
+	// the “type” of value; see tag const belove
+	tag tag
+	// last known position of our cursor
+	last_index index
+	// documentation (TODO)
+	doc string
+	// the actual value;
+	// the actual structure is behind a pointer so we can replace the struct.
+	// determined by the tag
+	// tagString -> *string
+	// tagFloat -> *float64
+	// tagList -> *[]val
+	// tagMap -> *map[string]val
+	val interface{}
+}
+
+type tag string
+
+const (
+	tagString tag = "string"
+	tagFloat  tag = "float"
+	tagList   tag = "list"
+	tagMap    tag = "map"
+)
+
+// print a value, flat
+func (v val) Render() string {
+	s := ""
+	switch v.tag {
+	case tagString:
+		s += *v.val.(*string)
+	case tagFloat:
+		s += fmt.Sprint(*v.val.(*float64))
+	case tagList:
+		s += "[ "
+		vs := []string{}
+		for _, enum := range v.enumerate() {
+			vs = append(vs, enum.v.Render())
+		}
+		s += strings.Join(vs, ", ")
+		s += " ]"
+	case tagMap:
+		s += "{ "
+		vs := []string{}
+		for _, enum := range v.enumerate() {
+			vs = append(vs, fmt.Sprintf("%s: %s", enum.i.(string), enum.v.Render()))
+		}
+		s += strings.Join(vs, ", ")
+		s += " }"
+	default:
+		s += fmt.Sprintf("<unknown: %v>", v)
+	}
+	return s
+}
+
+// render an index, depending on the type
+func renderIndex(i index) (s string) {
+	switch i := i.(type) {
+	case nil:
+		s = ""
+	// list index
+	case uint:
+		s = "*"
+	// map index
+	case string:
+		s = i + ":"
+	}
+	return
+}
+
+// take an arbitrary (within restrictions) go value and construct a val from it
+func makeVal(i interface{}) val {
+	var v val
+	switch i := i.(type) {
+	case string:
+		v = val{
+			tag:        tagString,
+			last_index: index(nil),
+			doc:        "",
+			val:        &i,
+		}
+	case float64:
+		v = val{
+			tag:        tagFloat,
+			last_index: index(nil),
+			doc:        "",
+			val:        &i,
+		}
+	case []interface{}:
+		ls := []val{}
+		for _, i := range i {
+			ls = append(ls, makeVal(i))
+		}
+		v = val{
+			tag:        tagList,
+			last_index: pos1Inner(tagList, &ls),
+			doc:        "",
+			val:        &ls,
+		}
+	case map[string]interface{}:
+		ls := map[string]val{}
+		for k, i := range i {
+			ls[k] = makeVal(i)
+		}
+		v = val{
+			tag:        tagMap,
+			last_index: pos1Inner(tagMap, &ls),
+			doc:        "",
+			val:        &ls,
+		}
+	default:
+		log.Fatalf("makeVal: cannot read json of type %T", i)
+	}
+	return v
+}
+
+// return an index that points at the first entry in val
+func (v val) pos1() index {
+	return v.enumerate()[0].i
+}
+
+func pos1Inner(tag tag, v interface{}) index {
+	return enumerateInner(tag, v)[0].i
+}
+
+type enumerate struct {
+	i index
+	v val
+}
+
+// enumerate gives us a stable ordering of elements in this val.
+// for scalars it’s just a nil index & the val itself.
+// Guaranteed to always return at least one element.
+func (v val) enumerate() (e []enumerate) {
+	e = enumerateInner(v.tag, v.val)
+	if e == nil {
+		e = append(e, enumerate{
+			i: nil,
+			v: v,
+		})
+	}
+	return
+}
+
+// like enumerate, but returns an empty slice for scalars without inner vals.
+func enumerateInner(tag tag, v interface{}) (e []enumerate) {
+	switch tag {
+	case tagString:
+		fallthrough
+	case tagFloat:
+		e = nil
+	case tagList:
+		for i, v := range *v.(*[]val) {
+			e = append(e, enumerate{i: index(uint(i)), v: v})
+		}
+	case tagMap:
+		// map sorting order is not stable (actually randomized thank jabber)
+		// so let’s sort them
+		keys := []string{}
+		m := *v.(*map[string]val)
+		for k, _ := range m {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		for _, k := range keys {
+			e = append(e, enumerate{i: index(k), v: m[k]})
+		}
+	default:
+		log.Fatalf("unknown val tag %s, %v", tag, v)
+	}
+	return
+}
+
+func (m model) PathString() string {
+	s := "/ "
+	var is []string
+	for _, v := range m.path {
+		is = append(is, fmt.Sprintf("%v", v))
+	}
+	s += strings.Join(is, " / ")
+	return s
+}
+
+// walk the given path down in data, to get the value at that point.
+// Assumes that all path indexes are valid indexes into data.
+// Returns a pointer to the value at point, in order to be able to change it.
+func walk(data *val, path []index) (*val, bool, error) {
+	res := data
+	atPath := func(index int) string {
+		return fmt.Sprintf("at path %v", path[:index+1])
+	}
+	errf := func(ty string, val interface{}, index int) error {
+		return fmt.Errorf("walk: can’t walk into %s %v %s", ty, val, atPath(index))
+	}
+	for i, p := range path {
+		switch res.tag {
+		case tagString:
+			return nil, true, nil
+		case tagFloat:
+			return nil, true, nil
+		case tagList:
+			switch p := p.(type) {
+			case uint:
+				list := *res.val.(*[]val)
+				if int(p) >= len(list) || p < 0 {
+					return nil, false, fmt.Errorf("index out of bounds %s", atPath(i))
+				}
+				res = &list[p]
+			default:
+				return nil, false, fmt.Errorf("not a list index %s", atPath(i))
+			}
+		case tagMap:
+			switch p := p.(type) {
+			case string:
+				m := *res.val.(*map[string]val)
+				if a, ok := m[p]; ok {
+					res = &a
+				} else {
+					return nil, false, fmt.Errorf("index %s not in map %s", p, atPath(i))
+				}
+			default:
+				return nil, false, fmt.Errorf("not a map index %v %s", p, atPath(i))
+			}
+
+		default:
+			return nil, false, errf(string(res.tag), res.val, i)
+		}
+	}
+	return res, false, nil
+}
+
+// descend into the selected index. Assumes that the index is valid.
+// Will not descend into scalars.
+func (m model) descend() (model, error) {
+	// TODO: two walks?!
+	this, _, err := walk(&m.data, m.path)
+	if err != nil {
+		return m, err
+	}
+	newPath := append(m.path, this.last_index)
+	_, bounce, err := walk(&m.data, newPath)
+	if err != nil {
+		return m, err
+	}
+	// only descend if we *can*
+	if !bounce {
+		m.path = newPath
+	}
+	return m, nil
+}
+
+// ascend to one level up. stops at the root.
+func (m model) ascend() (model, error) {
+	if len(m.path) > 0 {
+		m.path = m.path[:len(m.path)-1]
+		_, _, err := walk(&m.data, m.path)
+		return m, err
+	}
+	return m, nil
+}
+
+/// go to the next item, or wraparound
+func (min model) next() (m model, err error) {
+	m = min
+	this, _, err := walk(&m.data, m.path)
+	if err != nil {
+		return
+	}
+	enumL := this.enumerate()
+	setNext := false
+	for _, enum := range enumL {
+		if setNext {
+			this.last_index = enum.i
+			setNext = false
+			break
+		}
+		if enum.i == this.last_index {
+			setNext = true
+		}
+	}
+	// wraparound
+	if setNext {
+		this.last_index = enumL[0].i
+	}
+	return
+}
+
+/// go to the previous item, or wraparound
+func (min model) prev() (m model, err error) {
+	m = min
+	this, _, err := walk(&m.data, m.path)
+	if err != nil {
+		return
+	}
+	enumL := this.enumerate()
+	// last element, wraparound
+	prevIndex := enumL[len(enumL)-1].i
+	for _, enum := range enumL {
+		if enum.i == this.last_index {
+			this.last_index = prevIndex
+			break
+		}
+		prevIndex = enum.i
+	}
+	return
+}
+
+/// bubbletea implementations
+
+func (m model) Init() tea.Cmd {
+	return nil
+}
+
+func initialModel(v interface{}) model {
+	val := makeVal(v)
+	return model{
+		path: []index{},
+		data: val,
+	}
+}
+
+func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
+	var err error
+	switch msg := msg.(type) {
+	case tea.KeyMsg:
+		switch msg.String() {
+		case "ctrl+c", "q":
+			return m, tea.Quit
+
+		case "up":
+			m, err = m.prev()
+
+		case "down":
+			m, err = m.next()
+
+		case "right":
+			m, err = m.descend()
+
+		case "left":
+			m, err = m.ascend()
+
+			// 	case "enter":
+			// 		_, ok := m.selected[m.cursor]
+			// 		if ok {
+			// 			delete(m.selected, m.cursor)
+			// 		} else {
+			// 			m.selected[m.cursor] = struct{}{}
+			// 		}
+		}
+
+	}
+	if err != nil {
+		log.Fatal(err)
+	}
+	return m, nil
+}
+
+var pathColor = lipgloss.NewStyle().
+	// light blue
+	Foreground(lipgloss.Color("12"))
+
+var selectedColor = lipgloss.NewStyle().
+	Bold(true)
+
+func (m model) View() string {
+	s := pathColor.Render(m.PathString())
+	cur, _, err := walk(&m.data, m.path)
+	if err != nil {
+		log.Fatal(err)
+	}
+	s += cur.doc + "\n"
+	s += "\n"
+	for _, enum := range cur.enumerate() {
+		is := renderIndex(enum.i)
+		if is != "" {
+			s += is + " "
+		}
+		if enum.i == cur.last_index {
+			s += selectedColor.Render(enum.v.Render())
+		} else {
+			s += enum.v.Render()
+		}
+		s += "\n"
+	}
+
+	// s += fmt.Sprintf("%v\n", m)
+	// s += fmt.Sprintf("%v\n", cur)
+
+	return s
+}
+
+func main() {
+	var input interface{}
+	err := json.NewDecoder(os.Stdin).Decode(&input)
+	if err != nil {
+		log.Fatal("json from stdin: ", err)
+	}
+	p := tea.NewProgram(initialModel(input))
+	if err := p.Start(); err != nil {
+		log.Fatal("bubbletea TUI error: ", err)
+	}
+}
diff --git a/users/Profpatsch/toINI.nix b/users/Profpatsch/toINI.nix
new file mode 100644
index 000000000000..537505d30bbc
--- /dev/null
+++ b/users/Profpatsch/toINI.nix
@@ -0,0 +1,79 @@
+{ lib, ... }:
+let
+  /* Generate an INI-style config file from an attrset
+   * specifying the global section (no header), and a
+   * list of sections which contain name/value pairs.
+   *
+   * generators.toINI {} {
+   *   globalSection = [
+   *     { name = "someGlobalKey"; value = "hi"; }
+   *   ];
+   *   sections = [
+   *     { name = "foo"; value = [
+   *         { name = "hi"; value = "${pkgs.hello}"; }
+   *         { name = "ciao"; value = "bar"; }
+   *       ];
+   *     }
+   *     { name = "baz";
+   *       value = [ { name = "also, integers"; value = 42; } ];
+   *     }
+   *   ];
+   * }
+   *
+   *> someGlobalKey=hi
+   *>
+   *> [foo]
+   *> hi=/nix/store/y93qql1p5ggfnaqjjqhxcw0vqw95rlz0-hello-2.10
+   *> ciao=bar
+   *>
+   *> [baz]
+   *> also, integers=42
+   *>
+   *
+   * The mk* configuration attributes can generically change
+   * the way sections and key-value strings are generated.
+   *
+   * Order of the sections and of keys is preserved,
+   * duplicate keys are allowed.
+   */
+  toINI =
+    {
+      # apply transformations (e.g. escapes) to section names
+      mkSectionName ? (name: lib.strings.escape [ "[" "]" ] name)
+    , # format a setting line from key and value
+      mkKeyValue ? lib.generators.mkKeyValueDefault { } "="
+    ,
+    }: { globalSection, sections }:
+    let
+      mkSection = sectName: sectValues: ''
+        [${mkSectionName sectName}]
+      '' + toKeyValue { inherit mkKeyValue; } sectValues;
+      # map input to ini sections
+      mkSections = lib.strings.concatMapStringsSep "\n"
+        ({ name, value }: mkSection name value)
+        sections;
+      mkGlobalSection =
+        if globalSection == [ ]
+        then ""
+        else toKeyValue { inherit mkKeyValue; } globalSection
+          + "\n";
+    in
+    mkGlobalSection
+    + mkSections;
+
+  /* Generate a name-value-style config file from a list.
+   *
+   * mkKeyValue is the same as in toINI.
+   */
+  toKeyValue =
+    { mkKeyValue ? lib.generators.mkKeyValueDefault { } "="
+    ,
+    }:
+    let
+      mkLine = k: v: mkKeyValue k v + "\n";
+      mkLines = k: v: [ (mkLine k v) ];
+    in
+    nameValues: lib.strings.concatStrings (lib.concatLists (map ({ name, value }: mkLines name value) nameValues));
+
+in
+toINI
diff --git a/users/Profpatsch/tree-sitter.nix b/users/Profpatsch/tree-sitter.nix
new file mode 100644
index 000000000000..2224da2a3b8c
--- /dev/null
+++ b/users/Profpatsch/tree-sitter.nix
@@ -0,0 +1,209 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  bins = depot.nix.getBins pkgs.coreutils [ "head" "printf" "cat" ]
+    // depot.nix.getBins pkgs.ncurses [ "tput" ]
+    // depot.nix.getBins pkgs.bc [ "bc" ]
+    // depot.nix.getBins pkgs.ocamlPackages.sexp [ "sexp" ];
+
+  print-ast = depot.nix.writers.rustSimple
+    {
+      name = "print-ast";
+      dependencies = with depot.third_party.rust-crates; [
+        libloading
+        tree-sitter
+      ];
+    } ''
+    extern crate libloading;
+    extern crate tree_sitter;
+    use std::mem;
+    use std::io::{Read};
+    use libloading::{Library, Symbol};
+    use tree_sitter::{Language, Parser};
+
+    /// Load the shared lib FILE and return the language under SYMBOL-NAME.
+    /// Inspired by the rust source of emacs-tree-sitter.
+    fn _load_language(file: String, symbol_name: String) -> Result<Language, libloading::Error> {
+        let lib = Library::new(file)?;
+        let tree_sitter_lang: Symbol<'_, unsafe extern "C" fn() -> _> =
+            unsafe { lib.get(symbol_name.as_bytes())? };
+        let language: Language = unsafe { tree_sitter_lang() };
+        // Avoid segmentation fault by not unloading the lib, as language is a static piece of data.
+        // TODO: Attach an Rc<Library> to Language instead.
+        mem::forget(lib);
+        Ok(language)
+    }
+
+    fn main() {
+      let mut args = std::env::args();
+      let so = args.nth(1).unwrap();
+      let symbol_name = args.nth(0).unwrap();
+      let file = args.nth(0).unwrap();
+      let mut parser = Parser::new();
+      let lang = _load_language(so, symbol_name).unwrap();
+      parser.set_language(lang).unwrap();
+      let bytes = std::fs::read(&file).unwrap();
+      print!("{}", parser.parse(&bytes, None).unwrap().root_node().to_sexp());
+    }
+
+
+  '';
+
+  tree-sitter-nix = buildTreeSitterGrammar {
+    language = "tree-sitter-nix";
+    source = pkgs.fetchFromGitHub {
+      owner = "cstrahan";
+      repo = "tree-sitter-nix";
+      rev = "791b5ff0e4f0da358cbb941788b78d436a2ca621";
+      sha256 = "1y5b3wh3fcmbgq8r2i97likzfp1zp02m58zacw5a1cjqs5raqz66";
+    };
+  };
+
+  watch-file-modified = depot.nix.writers.rustSimple
+    {
+      name = "watch-file-modified";
+      dependencies = [
+        depot.third_party.rust-crates.inotify
+        depot.users.Profpatsch.netstring.rust-netstring
+      ];
+    } ''
+    extern crate inotify;
+    extern crate netstring;
+    use inotify::{EventMask, WatchMask, Inotify};
+    use std::io::Write;
+
+    fn main() {
+        let mut inotify = Inotify::init()
+            .expect("Failed to initialize inotify");
+
+        let file = std::env::args().nth(1).unwrap();
+
+        let file_watch = inotify
+            .add_watch(
+                &file,
+                WatchMask::MODIFY
+            )
+            .expect("Failed to add inotify watch");
+
+        let mut buffer = [0u8; 4096];
+        loop {
+            let events = inotify
+                .read_events_blocking(&mut buffer)
+                .expect("Failed to read inotify events");
+
+            for event in events {
+                if event.wd == file_watch {
+                  std::io::stdout().write(&netstring::to_netstring(file.as_bytes()));
+                  std::io::stdout().flush();
+                }
+            }
+        }
+    }
+
+  '';
+
+  # clear screen and set LINES and COLUMNS to terminal height & width
+  clear-screen = depot.nix.writeExecline "clear-screen" { } [
+    "if"
+    [ bins.tput "clear" ]
+    "backtick"
+    "-in"
+    "LINES"
+    [ bins.tput "lines" ]
+    "backtick"
+    "-in"
+    "COLUMNS"
+    [ bins.tput "cols" ]
+    "$@"
+  ];
+
+  print-nix-file = depot.nix.writeExecline "print-nix-file" { readNArgs = 1; } [
+    "pipeline"
+    [ print-ast "${tree-sitter-nix}/parser" "tree_sitter_nix" "$1" ]
+    "pipeline"
+    [ bins.sexp "print" ]
+    clear-screen
+    "importas"
+    "-ui"
+    "lines"
+    "LINES"
+    "backtick"
+    "-in"
+    "ls"
+    [
+      "pipeline"
+      # when you pull out bc to decrement an integer it’s time to switch to python lol
+      [ bins.printf "x=%s; --x\n" "$lines" ]
+      bins.bc
+    ]
+    "importas"
+    "-ui"
+    "l"
+    "ls"
+    bins.head
+    "-n\${l}"
+  ];
+
+  print-nix-file-on-update = depot.nix.writeExecline "print-nix-file-on-update" { readNArgs = 1; } [
+    "if"
+    [ print-nix-file "$1" ]
+    "pipeline"
+    [ watch-file-modified "$1" ]
+    "forstdin"
+    "-d"
+    ""
+    "file"
+    "importas"
+    "file"
+    "file"
+    print-nix-file
+    "$file"
+  ];
+
+  # copied from nixpkgs
+  buildTreeSitterGrammar =
+    {
+      # language name
+      language
+      # source for the language grammar
+    , source
+    }:
+
+    pkgs.stdenv.mkDerivation {
+
+      pname = "${language}-grammar";
+      inherit (pkgs.tree-sitter) version;
+
+      src = source;
+
+      buildInputs = [ pkgs.tree-sitter ];
+
+      dontUnpack = true;
+      configurePhase = ":";
+      buildPhase = ''
+        runHook preBuild
+        scanner_cc="$src/src/scanner.cc"
+        if [ ! -f "$scanner_cc" ]; then
+          scanner_cc=""
+        fi
+        $CXX -I$src/src/ -c $scanner_cc
+        $CC -I$src/src/ -shared -o parser -Os  scanner.o $src/src/parser.c -lstdc++
+        runHook postBuild
+      '';
+      installPhase = ''
+        runHook preInstall
+        mkdir $out
+        mv parser $out/
+        runHook postInstall
+      '';
+    };
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    print-ast
+    tree-sitter-nix
+    print-nix-file-on-update
+    watch-file-modified
+    ;
+}
diff --git a/users/Profpatsch/writers/default.nix b/users/Profpatsch/writers/default.nix
new file mode 100644
index 000000000000..02f39da02dbe
--- /dev/null
+++ b/users/Profpatsch/writers/default.nix
@@ -0,0 +1,97 @@
+{ depot, pkgs, lib, ... }:
+let
+  bins = depot.nix.getBins pkgs.s6-portable-utils [ "s6-mkdir" "s6-cat" "s6-ln" "s6-ls" "s6-touch" ]
+    // depot.nix.getBins pkgs.coreutils [ "printf" ];
+
+  inherit (depot.nix.yants) defun struct restrict attrs list string drv any;
+
+  inherit (depot.nix) drvSeqL;
+
+  FlakeError =
+    restrict
+      "flake error"
+      (s: lib.any (prefix: (builtins.substring 0 1 s) == prefix)
+        [ "E" "W" ])
+      string;
+  Libraries = defun [ (attrs any) (list drv) ];
+
+  python3 =
+    { name
+    , libraries ? (_: [ ])
+    , flakeIgnore ? [ ]
+    }: pkgs.writers.writePython3 name {
+      libraries = Libraries libraries pkgs.python3Packages;
+      flakeIgnore =
+        let
+          ignoreTheseErrors = [
+            # whitespace after {
+            "E201"
+            # whitespace before }
+            "E202"
+            # fuck 4-space indentation
+            "E121"
+            "E111"
+            # who cares about blank lines …
+            # … at end of files
+            "W391"
+            # … between functions
+            "E302"
+            "E305"
+          ];
+        in
+        list FlakeError (ignoreTheseErrors ++ flakeIgnore);
+    };
+
+  # TODO: add the same flake check as the pyhon3 writer
+  python3Lib = { name, libraries ? (_: [ ]) }: moduleString:
+    let
+      srcTree = depot.nix.runExecline.local name { stdin = moduleString; } [
+        "importas"
+        "out"
+        "out"
+        "if"
+        [ bins.s6-mkdir "-p" "\${out}/${name}" ]
+        "if"
+        [
+          "redirfd"
+          "-w"
+          "1"
+          "\${out}/setup.py"
+          bins.printf
+          ''
+            from distutils.core import setup
+
+            setup(
+              name='%s',
+              packages=['%s']
+            )
+          ''
+          name
+          name
+        ]
+        "if"
+        [
+          # redirect stdin to the init py
+          "redirfd"
+          "-w"
+          "1"
+          "\${out}/${name}/__init__.py"
+          bins.s6-cat
+        ]
+      ];
+    in
+    pkgs.python3Packages.buildPythonPackage {
+      inherit name;
+      src = srcTree;
+      propagatedBuildInputs = libraries pkgs.python3Packages;
+      doCheck = false;
+    };
+
+
+in
+{
+  inherit
+    python3
+    python3Lib
+    ;
+}
diff --git a/users/Profpatsch/writers/tests/default.nix b/users/Profpatsch/writers/tests/default.nix
new file mode 100644
index 000000000000..d0d62d3b0e1b
--- /dev/null
+++ b/users/Profpatsch/writers/tests/default.nix
@@ -0,0 +1,56 @@
+{ depot, pkgs, ... }:
+
+let
+  inherit (depot.users.Profpatsch.writers)
+    python3Lib
+    python3
+    ;
+
+  inherit (pkgs)
+    coreutils
+    ;
+
+  run = drv: depot.nix.runExecline.local "run-${drv.name}" { } [
+    "if"
+    [ drv ]
+    "importas"
+    "out"
+    "out"
+    "${coreutils}/bin/touch"
+    "$out"
+  ];
+
+  pythonTransitiveLib = python3Lib
+    {
+      name = "transitive";
+    } ''
+    def transitive(s):
+      return s + " 1 2 3"
+  '';
+
+  pythonTestLib = python3Lib
+    {
+      name = "test_lib";
+      libraries = _: [ pythonTransitiveLib ];
+    } ''
+    import transitive
+    def test():
+      return transitive.transitive("test")
+  '';
+
+  pythonWithLib = run (python3
+    {
+      name = "python-with-lib";
+      libraries = _: [ pythonTestLib ];
+    } ''
+    import test_lib
+
+    assert(test_lib.test() == "test 1 2 3")
+  '');
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    pythonWithLib
+    ;
+}
diff --git a/users/Profpatsch/ytextr/create-symlink-farm.nix b/users/Profpatsch/ytextr/create-symlink-farm.nix
new file mode 100644
index 000000000000..7b3a45b91681
--- /dev/null
+++ b/users/Profpatsch/ytextr/create-symlink-farm.nix
@@ -0,0 +1,19 @@
+{
+  # list of package attribute names to get at run time
+  packageNamesAtRuntimeJsonPath
+,
+}:
+let
+  pkgs = import <nixpkgs> { };
+
+  getPkg = pkgName: pkgs.${pkgName};
+
+  packageNamesAtRuntime = builtins.fromJSON (builtins.readFile packageNamesAtRuntimeJsonPath);
+
+  runtime = map getPkg packageNamesAtRuntime;
+
+in
+pkgs.symlinkJoin {
+  name = "symlink-farm";
+  paths = runtime;
+}
diff --git a/users/Profpatsch/ytextr/default.nix b/users/Profpatsch/ytextr/default.nix
new file mode 100644
index 000000000000..ac630603b90c
--- /dev/null
+++ b/users/Profpatsch/ytextr/default.nix
@@ -0,0 +1,82 @@
+{ depot, pkgs, lib, ... }:
+
+# ytextr is a wrapper arount yt-dlp (previously youtube-dl)
+# that extracts a single video according to my preferred settings.
+#
+# It will be sandboxed to the current directory, since I don’t particularly
+# trust the massive codebase of that tool (with hundreds of contributors).
+#
+# Since the rules for downloading videos is usually against the wishes
+# of proprietary vendors, and a video is many megabytes anyway,
+# it will be fetched from the most recent nixpkgs unstable channel before running.
+
+let
+  bins = depot.nix.getBins pkgs.nix [ "nix-build" ]
+    // depot.nix.getBins pkgs.bubblewrap [ "bwrap" ];
+
+  # Run a command, with the given packages in scope, and `packageNamesAtRuntime` being fetched at the start in the given nix `channel`.
+  nix-run-with-channel =
+    {
+      # The channel to get `packageNamesAtRuntime` from
+      channel
+    , # executable to run with `packageNamesAtRuntime` in PATH
+      # and the argv
+      executable
+    , # A list of nixpkgs package attribute names that should be put into PATH when running `command`.
+      packageNamesAtRuntime
+    ,
+    }: depot.nix.writeExecline "nix-run-with-channel-${channel}" { } [
+      # TODO: prevent race condition by writing a temporary gc root
+      "backtick"
+      "-iE"
+      "storepath"
+      [
+        bins.nix-build
+        "-I"
+        "nixpkgs=channel:${channel}"
+        "--arg"
+        "packageNamesAtRuntimeJsonPath"
+        (pkgs.writeText "packageNamesAtRuntime.json" (builtins.toJSON packageNamesAtRuntime))
+        ./create-symlink-farm.nix
+      ]
+      "importas"
+      "-ui"
+      "PATH"
+      "PATH"
+      "export"
+      "PATH"
+      "\${storepath}/bin:\${PATH}"
+      executable
+      "$@"
+    ];
+
+in
+nix-run-with-channel {
+  channel = "nixos-unstable";
+  packageNamesAtRuntime = [ "yt-dlp" ];
+  executable = depot.nix.writeExecline "ytextr" { readNArgs = 1; } [
+    "getcwd"
+    "-E"
+    "cwd"
+    bins.bwrap
+    "--ro-bind"
+    "/nix/store"
+    "/nix/store"
+    "--ro-bind"
+    "/etc"
+    "/etc"
+    "--bind"
+    "$cwd"
+    "$cwd"
+    "yt-dlp"
+    "--no-playlist"
+    "--write-sub"
+    "--all-subs"
+    "--embed-subs"
+    "--merge-output-format"
+    "mkv"
+    "-f"
+    "bestvideo[height<=?1080]+bestaudio/best"
+    "$1"
+  ];
+}