about summary refs log tree commit diff
path: root/users/Profpatsch/blog
diff options
context:
space:
mode:
Diffstat (limited to 'users/Profpatsch/blog')
-rw-r--r--users/Profpatsch/blog/README.md7
-rw-r--r--users/Profpatsch/blog/default.nix481
-rw-r--r--users/Profpatsch/blog/notes/an-idealized-conflang.md298
-rw-r--r--users/Profpatsch/blog/notes/preventing-oom.md33
-rw-r--r--users/Profpatsch/blog/notes/private-trackers-are-markets.md46
-rw-r--r--users/Profpatsch/blog/notes/rust-string-conversions.md53
-rw-r--r--users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md123
7 files changed, 1041 insertions, 0 deletions
diff --git a/users/Profpatsch/blog/README.md b/users/Profpatsch/blog/README.md
new file mode 100644
index 000000000000..0753ebdea542
--- /dev/null
+++ b/users/Profpatsch/blog/README.md
@@ -0,0 +1,7 @@
+# (Parts of) my website
+
+This is a part of https://profpatsch.de/, notably the blog posts.
+
+The other parts can be found in [vuizvui](https://github.com/openlab-aux/vuizvui/tree/master/pkgs/profpatsch/profpatsch.de). It’s a mess.
+
+And yes, this implements a webserver & routing engine with nix, execline & s6 utils. “Bis einer weint”, as we say in German.
diff --git a/users/Profpatsch/blog/default.nix b/users/Profpatsch/blog/default.nix
new file mode 100644
index 000000000000..f233eda9bbdc
--- /dev/null
+++ b/users/Profpatsch/blog/default.nix
@@ -0,0 +1,481 @@
+{ depot, pkgs, lib, ... }:
+
+let
+  bins = depot.nix.getBins pkgs.lowdown [ "lowdown" ]
+    // depot.nix.getBins pkgs.cdb [ "cdbget" "cdbmake" "cdbdump" ]
+    // depot.nix.getBins pkgs.coreutils [ "mv" "cat" "printf" "test" ]
+    // depot.nix.getBins pkgs.s6-networking [ "s6-tcpserver" ]
+    // depot.nix.getBins pkgs.time [ "time" ]
+  ;
+
+  # /
+  # TODO: use
+  toplevel = [
+    {
+      route = [ "notes" ];
+      name = "Notes";
+      page = { cssFile }: router cssFile;
+    }
+    {
+      route = [ "projects" ];
+      name = "Projects";
+      # page = projects;
+    }
+  ];
+
+  # /notes/*
+  notes = [
+    {
+      route = [ "notes" "private-trackers-are-markets" ];
+      name = "Private bittorrent trackers are markets";
+      page = { cssFile }: markdownToHtml {
+        name = "private-trackers-are-markets";
+        markdown = ./notes/private-trackers-are-markets.md;
+        inherit cssFile;
+      };
+    }
+    {
+      route = [ "notes" "an-idealized-conflang" ];
+      name = "An Idealized Configuration Language";
+      page = { cssFile }: markdownToHtml {
+        name = "an-idealized-conflang";
+        markdown = ./notes/an-idealized-conflang.md;
+        inherit cssFile;
+      };
+    }
+    {
+      route = [ "notes" "rust-string-conversions" ];
+      name = "Converting between different String types in Rust";
+      page = { cssFile }: markdownToHtml {
+        name = "rust-string-conversions";
+        markdown = ./notes/rust-string-conversions.md;
+        inherit cssFile;
+      };
+    }
+    {
+      route = [ "notes" "preventing-oom" ];
+      name = "Preventing out-of-memory (OOM) errors on Linux";
+      page = { cssFile }: markdownToHtml {
+        name = "preventing-oom";
+        markdown = ./notes/preventing-oom.md;
+        inherit cssFile;
+      };
+    }
+  ];
+
+  projects = [
+    {
+      name = "lorri";
+      description = "<code>nix-shell</code> replacement for projects";
+      link = "https://github.com/nix-community/lorri";
+    }
+    {
+      name = "netencode";
+      description = ''A human-readble nested data exchange format inspired by <a href="https://en.wikipedia.org/wiki/Netstring">netstrings</a> and <a href="https://en.wikipedia.org/wiki/Bencode">bencode</a>.'';
+      link = depotCgitLink { relativePath = "users/Profpatsch/netencode/README.md"; };
+    }
+    {
+      name = "yarn2nix";
+      description = ''nix dependency generator for the <a href="https://yarnpkg.com/"><code>yarn</code> Javascript package manager</a>'';
+      link = "https://github.com/Profpatsch/yarn2nix";
+    }
+  ];
+
+  posts = [
+    {
+      date = "2017-05-04";
+      title = "Ligature Emulation in Emacs";
+      subtitle = "It’s not pretty, but the results are";
+      description = "How to set up ligatures using <code>prettify-symbols-mode</code> and the Hasklig/FiraCode fonts.";
+      page = { cssFile }: markdownToHtml {
+        name = "2017-05-04-ligature-emluation-in-emacs";
+        markdown = ./posts/2017-05-04-ligature-emulation-in-emacs.md;
+        inherit cssFile;
+      };
+      route = [ "posts" "2017-05-04-ligature-emluation-in-emacs" ];
+      tags = [ "emacs" ];
+    }
+  ];
+
+  # convert a markdown file to html via lowdown
+  markdownToHtml =
+    { name
+    , # the file to convert
+      markdown
+    , # css file to add to the final result, as { route }
+      cssFile
+    }:
+    depot.nix.runExecline "${name}.html" { } ([
+      "importas"
+      "out"
+      "out"
+      (depot.users.Profpatsch.lib.debugExec "")
+      bins.lowdown
+      "-s"
+      "-Thtml"
+    ] ++
+    (lib.optional (cssFile != null) ([ "-M" "css=${mkRoute cssFile.route}" ]))
+    ++ [
+      "-o"
+      "$out"
+      markdown
+    ]);
+
+  # takes a { route … } attrset and converts the route lists to an absolute path
+  fullRoute = attrs: lib.pipe attrs [
+    (map (x@{ route, ... }: x // { route = mkRoute route; }))
+  ];
+
+  # a cdb from route to a netencoded version of data for each route
+  router = cssFile: lib.pipe (notes ++ posts) [
+    (map (r: with depot.users.Profpatsch.lens;
+    lib.pipe r [
+      (over (field "route") mkRoute)
+      (over (field "page") (_ { inherit cssFile; }))
+    ]))
+    (map (x: {
+      name = x.route;
+      value = depot.users.Profpatsch.netencode.gen.dwim x;
+    }))
+    lib.listToAttrs
+    (cdbMake "router")
+  ];
+
+  # Create a link to the given source file/directory, given the relative path in the depot repo.
+  # Checks that the file exists at evaluation time.
+  depotCgitLink =
+    {
+      # relative path from the depot root (without leading /).
+      relativePath
+    }:
+      assert
+      (lib.assertMsg
+        (builtins.pathExists (depot.path.origSrc + ("/" + relativePath)))
+        "depotCgitLink: path /${relativePath} does not exist in depot, and depot.path was ${toString depot.path}");
+      "https://code.tvl.fyi/tree/${relativePath}";
+
+  # look up a route by path ($1)
+  router-lookup = cssFile: depot.nix.writeExecline "router-lookup" { readNArgs = 1; } [
+    cdbLookup
+    (router cssFile)
+    "$1"
+  ];
+
+  runExeclineStdout = name: args: cmd: depot.nix.runExecline name args ([
+    "importas"
+    "-ui"
+    "out"
+    "out"
+    "redirfd"
+    "-w"
+    "1"
+    "$out"
+  ] ++ cmd);
+
+  notes-index-html =
+    let o = fullRoute notes;
+    in ''
+      <ul>
+      ${scope o (o: ''
+        <li><a href="${str o.route}">${esc o.name}</a></li>
+      '')}
+      </ul>
+    '';
+
+  notes-index = pkgs.writeText "notes-index.html" notes-index-html;
+
+  # A simple mustache-inspired string interpolation combinator
+  # that takes an object and a template (a function from o to string)
+  # and returns a string.
+  scope = o: tpl:
+    if builtins.typeOf o == "list" then
+      lib.concatMapStringsSep "\n" tpl o
+    else if builtins.typeOf o == "set" then
+      tpl o
+    else throw "${lib.generators.toPretty {} o} not allowed in template";
+
+  # string-escape html (TODO)
+  str = s: s;
+  # html-escape (TODO)
+  esc = s: s;
+  html = s: s;
+
+  projects-index-html =
+    let o = projects;
+    in ''
+      <dl>
+      ${scope o (o: ''
+        <dt><a href="${str o.link}">${esc o.name}</a></dt>
+        <dd>${html o.description}</dd>
+      '')}
+      </dl>
+    '';
+
+  projects-index = pkgs.writeText "projects-index.html" projects-index-html;
+
+  posts-index-html =
+    let o = fullRoute posts;
+    in ''
+      <dl>
+      ${scope o (o: ''
+        <dt>${str o.date} <a href="${str o.route}">${esc o.title}</a></dt>
+        <dd>${html o.description}</dd>
+      '')}
+      </dl>
+    '';
+
+  posts-index = pkgs.writeText "projects-index.html" posts-index-html;
+
+  arglibNetencode = val: depot.nix.writeExecline "arglib-netencode" { } [
+    "export"
+    "ARGLIB_NETENCODE"
+    (depot.users.Profpatsch.netencode.gen.dwim val)
+    "$@"
+  ];
+
+  # A simple http server that serves the site. Yes, it’s horrible.
+  site-server = { cssFile, port }: depot.nix.writeExecline "blog-server" { } [
+    (depot.users.Profpatsch.lib.runInEmptyEnv [ "PATH" ])
+    bins.s6-tcpserver
+    "127.0.0.1"
+    port
+    bins.time
+    "--format=time: %es"
+    "--"
+    runOr
+    return400
+    "pipeline"
+    [
+      (arglibNetencode {
+        what = "request";
+      })
+      depot.users.Profpatsch.read-http
+    ]
+    depot.users.Profpatsch.netencode.record-splice-env
+    runOr
+    return500
+    "importas"
+    "-i"
+    "path"
+    "path"
+    "if"
+    [ depot.tools.eprintf "GET \${path}\n" ]
+    runOr
+    return404
+    "backtick"
+    "-ni"
+    "TEMPLATE_DATA"
+    [
+      # TODO: factor this out of here, this is routing not serving
+      "ifelse"
+      [ bins.test "$path" "=" "/notes" ]
+      [
+        "export"
+        "content-type"
+        "text/html"
+        "export"
+        "serve-file"
+        notes-index
+        depot.users.Profpatsch.netencode.env-splice-record
+      ]
+      "ifelse"
+      [ bins.test "$path" "=" "/projects" ]
+      [
+        "export"
+        "content-type"
+        "text/html"
+        "export"
+        "serve-file"
+        projects-index
+        depot.users.Profpatsch.netencode.env-splice-record
+      ]
+      "ifelse"
+      [ bins.test "$path" "=" "/posts" ]
+      [
+        "export"
+        "content-type"
+        "text/html"
+        "export"
+        "serve-file"
+        posts-index
+        depot.users.Profpatsch.netencode.env-splice-record
+      ]
+      # TODO: ignore potential query arguments. See 404 message
+      "pipeline"
+      [ (router-lookup cssFile) "$path" ]
+      depot.users.Profpatsch.netencode.record-splice-env
+      "importas"
+      "-ui"
+      "page"
+      "page"
+      "export"
+      "content-type"
+      "text/html"
+      "export"
+      "serve-file"
+      "$page"
+      depot.users.Profpatsch.netencode.env-splice-record
+    ]
+    runOr
+    return500
+    "if"
+    [
+      "pipeline"
+      [
+        bins.printf
+        ''
+          HTTP/1.1 200 OK
+          Content-Type: {{{content-type}}}; charset=UTF-8
+          Connection: close
+
+        ''
+      ]
+      depot.users.Profpatsch.netencode.netencode-mustache
+    ]
+    "pipeline"
+    [ "importas" "t" "TEMPLATE_DATA" bins.printf "%s" "$t" ]
+    depot.users.Profpatsch.netencode.record-splice-env
+    "importas"
+    "-ui"
+    "serve-file"
+    "serve-file"
+    bins.cat
+    "$serve-file"
+  ];
+
+  # run argv or $1 if argv returns a failure status code.
+  runOr = depot.nix.writeExecline "run-or" { readNArgs = 1; } [
+    "foreground"
+    [ "$@" ]
+    "importas"
+    "?"
+    "?"
+    "ifelse"
+    [ bins.test "$?" "-eq" "0" ]
+    [ ]
+    "if"
+    [ depot.tools.eprintf "runOr: exited \${?}, running \${1}\n" ]
+    "$1"
+  ];
+
+  return400 = depot.nix.writeExecline "return400" { } [
+    bins.printf
+    "%s"
+    ''
+      HTTP/1.1 400 Bad Request
+      Content-Type: text/plain; charset=UTF-8
+      Connection: close
+
+    ''
+  ];
+
+  return404 = depot.nix.writeExecline "return404" { } [
+    bins.printf
+    "%s"
+    ''
+      HTTP/1.1 404 Not Found
+      Content-Type: text/plain; charset=UTF-8
+      Connection: close
+
+      This page doesn’t exist! Query arguments are not handled at the moment.
+    ''
+  ];
+
+  return500 = depot.nix.writeExecline "return500" { } [
+    bins.printf
+    "%s"
+    ''
+      HTTP/1.1 500 Internal Server Error
+      Content-Type: text/plain; charset=UTF-8
+      Connection: close
+
+      Encountered an internal server error. Please try again.
+    ''
+  ];
+
+  capture-stdin = depot.nix.writers.rustSimple
+    {
+      name = "capture-stdin";
+      dependencies = [ depot.users.Profpatsch.execline.exec-helpers ];
+    } ''
+    extern crate exec_helpers;
+    use std::io::Read;
+    fn main() {
+      let (args, prog) = exec_helpers::args_for_exec("capture-stdin", 1);
+      let valname = &args[1];
+      let mut v : Vec<u8> = vec![];
+      std::io::stdin().lock().read_to_end(&mut v).unwrap();
+      exec_helpers::exec_into_args("capture-stdin", prog, vec![(valname, v)]);
+    }
+  '';
+
+  # go from a list of path elements to an absolute route string
+  mkRoute = route: "/" + lib.concatMapStringsSep "/" urlencodeAscii route;
+
+  # urlencodes, but only ASCII characters
+  # https://en.wikipedia.org/wiki/Percent-encoding
+  urlencodeAscii = urlPiece:
+    let
+      raw = [ "!" "#" "$" "%" "&" "'" "(" ")" "*" "+" "," "/" ":" ";" "=" "?" "@" "[" "]" ];
+      enc = [ "%21" "%23" "%24" "%25" "%26" "%27" "%28" "%29" "%2A" "%2B" "%2C" "%2F" "%3A" "%3B" "%3D" "%3F" "%40" "%5B" "%5D" ];
+      rest = [ "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "-" "_" "." "~" ];
+    in
+    assert lib.assertMsg (lib.all (c: builtins.elem c (raw ++ rest)) (lib.stringToCharacters urlPiece))
+      "urlencodeAscii: the urlPiece must only contain valid url ASCII characters, was: ${urlPiece}";
+    builtins.replaceStrings raw enc urlPiece;
+
+
+  # create a cdb record entry, as required by the cdbmake tool
+  cdbRecord = key: val:
+    "+${toString (builtins.stringLength key)},${toString (builtins.stringLength val)}:"
+    + "${key}->${val}\n";
+
+  # create a full cdbmake input from an attribute set of keys to values (strings)
+  cdbRecords =
+    with depot.nix.yants;
+    defun [ (attrs (either drv string)) string ]
+      (attrs:
+        (lib.concatStrings (lib.mapAttrsToList cdbRecord attrs)) + "\n");
+
+  # run cdbmake on a list of key/value pairs (strings
+  cdbMake = name: attrs: depot.nix.runExecline "${name}.cdb"
+    {
+      stdin = cdbRecords attrs;
+    } [
+    "importas"
+    "out"
+    "out"
+    depot.users.Profpatsch.lib.eprint-stdin
+    "if"
+    [ bins.cdbmake "db" "tmp" ]
+    bins.mv
+    "db"
+    "$out"
+  ];
+
+  # look up a key ($2) in the given cdb ($1)
+  cdbLookup = depot.nix.writeExecline "cdb-lookup" { readNArgs = 2; } [
+    # cdb ($1) on stdin
+    "redirfd"
+    "-r"
+    "0"
+    "$1"
+    # key ($2) lookup
+    bins.cdbget
+    "$2"
+  ];
+
+in
+depot.nix.readTree.drvTargets {
+  inherit
+    router
+    depotCgitLink
+    site-server
+    notes-index
+    notes-index-html
+    projects-index
+    projects-index-html
+    posts-index-html
+    ;
+
+}
diff --git a/users/Profpatsch/blog/notes/an-idealized-conflang.md b/users/Profpatsch/blog/notes/an-idealized-conflang.md
new file mode 100644
index 000000000000..5c6b39f6e81b
--- /dev/null
+++ b/users/Profpatsch/blog/notes/an-idealized-conflang.md
@@ -0,0 +1,298 @@
+tags: netencode, json
+date: 2022-03-31
+certainty: likely
+status: initial
+title: An idealized Configuration Language
+
+# An Idealized Configuration Language
+
+JSON brought us one step closer to what an idealized configuration language is,
+which I define as “data, stripped of all externalities of the system it is working in”.
+
+Specifically, JSON is very close to what I consider the minimal properties to represent structured data.
+
+## A short history, according to me
+
+In the beginning, Lisp defined s-expressions as a stand-in for an actual syntax.
+Then, people figured out that it’s also a way to represent structured data.
+It has scalars, which can be nested into lists, recursively.
+
+```
+(this is (a (list) (of lists)))
+```
+
+This provides the first three rules of our idealized language:
+
+1. A **scalar** is a primitive value that is domain-specific.
+   We can assume a bunch of bytes here, or a text or an integer.
+   
+2. A **list** gives an ordering to `0..n` (or `1..n`) values
+   
+3. Both a scalar and a list are the *same kind* of “thing” (from here on called **value**),
+   lists can be created from arbitrary values *recursively*
+   (for example scalars, or lists of scalars and other lists)
+
+
+Later, ASN.1 came and had the important insight that the same idealized data structure
+can be represented in different fashions,
+for example as a binary-efficient version and a human-readable format.
+
+Then, XML “graced” the world for a decade or two, and the main lesson from it was
+that you don’t want to mix markup languages and configuration languages,
+and that you don’t want a committee to design these things.
+
+---
+
+In the meantime, Brendan Eich designed Javascript. Its prototype-based object system
+arguably stripped down the rituals of existing OO-systems.
+Douglas Crockford later extracted the object format (minus functions) into a syntax, and we got JSON.
+
+```
+{
+  "foo": [
+    { "nested": "attrs" },
+    "some text"
+  ],
+  "bar": 42
+}
+```
+
+JSON adds another fundamental idea into the mix:
+
+4. **Records** are unordered collections of `name`/`value` pairs.
+   A `name` is defined to be a unicode string, so a semantic descriptor of the nested `value`.
+
+Unfortunately, the JSON syntax does not actually specify any semantics of records (`objects` in JSON lingo),
+in particular it does not mention what the meaning is if a `name` appears twice in one record.
+
+If records can have multiple entries with the same `name`, suddenly ordering becomes important!
+But wait, remember earlier we defined *lists* to impose ordering on two values.
+So in order to rectify that problem, we say that
+
+5. A `name` can only appear in a record *once*, names must be unique.
+
+This is the current state of the programming community at large,
+where most “modern” configuration languages basically use a version of the JSON model
+as their underlying data structure. (However not all of them use the same version.)
+
+## Improving JSON’s data model
+
+We are not yet at the final “idealized” configuration language, though.
+
+Modern languages like Standard ML define their data types as a mixture of 
+
+* *records* (“structs” in the C lingo)
+* and *sums* (which you can think about as enums that can hold more `value`s inside them)
+
+This allows to express the common pattern where some fields in a record are only meaningful
+if another field—the so-called `tag`-field—is set to a specific value.
+
+An easy example: if a request can fail with an error message or succeed with a result.
+
+You could model that as 
+
+```
+{
+  "was_error": true,
+  "error_message": "there was an error"
+}
+```
+
+or
+
+```
+{
+  "was_error": false,
+  "result": 42
+}
+```
+
+in your JSON representation.
+
+But in a ML-like language (like, for example, Rust), you would instead model it as
+
+```
+type RequestResult 
+  = Error { error_message: String }
+  | Success { result: i64 }
+```
+
+where the distinction in `Error` or `Success` makes it clear that `error_message` and `result`
+only exist in one of these cases, not the other.
+
+We *can* encode exactly that idea into JSON in multiple ways, but not a “blessed” way.
+
+For example, another way to encode the above would be
+
+```
+{ 
+  "Error": { 
+    "error_message": "there was an error"
+  }
+}
+```
+
+and
+
+```
+{ 
+  "Success": { 
+    "result": 42
+  }
+}
+```
+
+Particularly notice the difference between the language representation, where the type is “closed”only `Success` or `Error` can happen—
+and the data representation where the type is “open”, more cases could potentially exist.
+
+This is an important differentiation from a type system:
+Our idealized configuration language just gives more structure to a bag of data,
+it does not restrict which value can be where.
+Think of a value in an unityped language, like Python.
+
+
+So far we have the notion of 
+
+1. a scalar (a primitive)
+2. a list (ordering on values)
+3. a record (unordered collection of named values)
+
+and in order to get the “open” `tag`ged enumeration values, we introduce
+
+4. a `tag`, which gives a name to a value
+
+We can then redefine `record` to mean “an unordered collection of `tag`ged values”,
+which further reduces the amount of concepts needed.
+
+And that’s it, this is the full idealized configuration language.
+
+
+## Some examples of data modelling with tags
+
+This is all well and good, but what does it look like in practice?
+
+For these examples I will be using JSON with a new `< "tag": value >` syntax
+to represent `tag`s.
+
+From a compatibility standpoint, `tag`s (or sum types) have dual properties to record types.
+
+With a record, when you have a producer that *adds* a field to it, the consumer will still be able to handle the record (provided the semantics of the existing fields is not changed by the new field).
+
+With a tag, *removing* a tag from the producer will mean that the consumer will still be able to handle the tag. It might do one “dead” check on the removed `tag`, but can still handle the remaining ones just fine.
+
+<!-- TODO: some illustration here -->
+    
+An example of how that is applied in practice is that in `protobuf3`, fields of a record are *always* optional fields.
+
+We can model optional fields by wrapping them in `< "Some": value >` or `< "None": {} >` (where the actual value of the `None` is ignored or always an empty record).
+
+So a protobuf with the fields `foo: int` and `bar: string` has to be parsed by the receiver als containing *four* possibilities:
+
+№|foo|bar|
+|--:|---|---|
+|1|`<"None":{}>`|`<"None":{}>`|
+|2|`<"Some":42>`|`<"None":{}>`|
+|3|`<"None":{}>`|`<"Some":"x">`|
+|4|`<"Some":42>`|`<"Some":"x">`|
+
+Now, iff the receiver actually handles all four possibilities
+(and doesn’t just crash if a field is not set, as customary in million-dollar-mistake languages),
+it’s easy to see how removing a field from the producer is semantically equal to always setting it to `<"None":{}>`.
+Since all receivers should be ready to receive `None` for every field, this provides a simple forward-compatibility scheme.
+
+We can abstract this to any kind of tag value:
+If you start with “more” tags, you give yourself space to remove them later without breaking compatibility, typically called “forward compatibility”.
+
+
+## To empty list/record or not to
+
+Something to think about is whether records and fields should be defined
+to always contain at least one element.
+
+As it stands, JSON has multiple ways of expressing the “empty value”:
+
+* `null`
+* `[]`
+* `{}`
+* `""`
+* *leave out the field*
+
+and two of those come from the possibility of having empty structured values.
+
+## Representations of this language
+
+This line of thought originally fell out of me designing [`netencode`](https://code.tvl.fyi/tree/users/Profpatsch/netencode/README.md)
+as a small human-debuggable format for pipeline serialization.
+
+In addition to the concepts mentioned here (especially tags),
+it provides a better set of scalars than JSON (specifically arbitrary bytestrings),
+but it cannot practically be written or modified by hand,
+which might be a good thing depending on how you look at it.
+
+---
+
+The way that is compatible with the rest of the ecosystem is probably to use a subset of json
+to represent our idealized language.
+
+There is multiple ways of encoding tags in json, which each have their pros and cons.
+
+The most common is probably the “tag field” variant, where the tag is pulled into the nested record:
+
+```
+{
+  "_tag": "Success",
+  "result": 42
+}
+```
+
+Which has the advantage that people know how to deal with it and that it’s easy to “just add another field”,
+plus it is backward-compatible when you had a record in the first place.
+
+It has multiple disadvantages however:
+
+* If your value wasn’t a record (e.g. an int) before, you have to put it in a record and assign an arbitrary name to its field
+* People are not forced to “unwrap” the tag first, so they are going to forget to check it
+* The magic “_tag” name cannot be used by any of the record’s fields
+
+
+An in-between version of this with less downsides is to always push a json record onto the stack:
+
+```
+{
+  "tag": "Success",
+  "value": {
+    "result": 42
+  }
+}
+```
+
+This makes it harder for people to miss checking the `tag`, but still possible of course.
+It also makes it easily possible to inspect the contents of `value` without knowing the
+exhaustive list of `tag`s, which can be useful in practice (though often not sound!).
+It also gets rid of the “_tag” field name clash problem.
+
+Disadvantages:
+
+* Breaks the backwards-compatibility with an existing record-based approach if you want to introduce `tag`s
+* Verbosity of representation
+* hard to distinguish a record with the `tag` and `value` fields from a `tag`ed value (though you know the type layout of your data on a higher level, don’t you? ;) )
+
+
+The final, “most pure” representation is the one I gave in the original introduction:
+
+```
+{
+  "Success": {
+    "result": 42
+  }
+}
+```
+
+Now you *have* to match on the `tag` name first, before you can actually access your data,
+and it’s less verbose than the above representation.
+
+Disavantages:
+
+* You also have to *know* what `tag`s to expect, it’s harder to query cause you need to extract the keys and values from the dict and then take the first one.
+* Doing a “tag backwards compat” check is harder,
+  because you can’t just check whether `_tag` or `tag`/`value` are the keys in the dict.
diff --git a/users/Profpatsch/blog/notes/preventing-oom.md b/users/Profpatsch/blog/notes/preventing-oom.md
new file mode 100644
index 000000000000..59ea4f747700
--- /dev/null
+++ b/users/Profpatsch/blog/notes/preventing-oom.md
@@ -0,0 +1,33 @@
+tags: linux
+date: 2020-01-25
+certainty: likely
+status: initial
+title: Preventing out-of-memory (OOM) errors on Linux
+
+# Preventing out-of-memory (OOM) errors on Linux
+
+I’ve been running out of memory more and more often lately. I don’t use any swap space because I am of the opinion that 16GB of memory should be sufficient for most daily and professional tasks. Which is generally true, however sometimes I have a runaway filling my memory. Emacs is very good at doing this for example, prone to filling your RAM when you open json files with very long lines.
+
+In theory, the kernel OOM killer should come in and save the day, but the Linux OOM killer is notorious for being extremely … conservative. It will try to free every internal structure it can before even thinking about touching any userspace processes. At that point, the desktop usually stopped responding minutes ago.
+
+Luckily the kernel provides memory statistics for the whole system, as well as single process, and the [`earlyoom`](https://github.com/rfjakob/earlyoom) tool uses those to keep memory usage under a certain limit. It will start killing processes, “heaviest” first, until the given upper memory limit is satisfied again.
+
+On NixOS, I set:
+
+```nix
+{
+  services.earlyoom = {
+    enable = true;
+    freeMemThreshold = 5; # <%5 free
+  };
+}
+```
+
+and after activation, this simple test shows whether the daemon is working:
+
+```shell
+$ tail /dev/zero
+fish: “tail /dev/zero” terminated by signal SIGTERM (Polite quit request)
+```
+
+`tail /dev/zero` searches for the last line of the file `/dev/zero`, and since it cannot know that there is no next line and no end to the stream of `\0` this file produces, it will fill the RAM as quickly as physically possible. Before it can fill it completely, `earlyoom` recognizes that the limit was breached, singles out the `tail` command as the process using the most amount of memory, and sends it a `SIGTERM`.
diff --git a/users/Profpatsch/blog/notes/private-trackers-are-markets.md b/users/Profpatsch/blog/notes/private-trackers-are-markets.md
new file mode 100644
index 000000000000..88fe5f07e5d7
--- /dev/null
+++ b/users/Profpatsch/blog/notes/private-trackers-are-markets.md
@@ -0,0 +1,46 @@
+# Private bittorrent trackers are markets
+
+Private bittorrent trackers have a currency called ratio,
+which is the bits you upload divided the bits you download.
+
+You have to keep the ratio above a certain lower limit,
+otherwise you get banned from the market or have to cut a deal with the moderators → bancruptcy
+
+New liquidity (?) is introduced to the market by so-called “freeleech” events or tokens,
+which essentially allow you to exchange a token (or some time in the case of time-restricted freeleech)
+for some data, which can then be seeded to generate future profits without spending ratio.
+
+Sometimes, ratio is pulled from the market by allowing to exchange it into website perks,
+like forum titles or other benefits like chat-memberships. This has a deflationary effect.
+It could be compared to “vanity items” in MMOs, which don’t grant a mechanical advantage in the market.
+Is there a real-world equivalent? i.e. allowing rich people to exchange some of their worth
+for vanity items instead of investing it for future gain?
+
+Sometimes, ratio can be traded for more than just transferred bits,
+for example by requesting a torrent for a certain album or movie,
+paying some ratio for the fulfillment of the request.
+
+---
+
+Based on how bittorrent works, usually multiple people “seed” a torrent.
+This means multiple people can answer a request for trading ratio.
+Part of the request (i.e. the first 30% of a movie)
+can be fulfilled by one party, part of it by a second or even more parties.
+
+For small requests (e.g. albums), often the time between announcing the trade
+and filling the trade is important for who is able to fill it.
+Getting a 1 second head-start vastly increases your chance of a handshake
+and starting the transmission, so on average you get a vastly higher ratio gain from that torrent.
+Meaning that using a bittorrent client which is fast to answer as a seeder will lead to better outcomes.
+This could be compared to mechanisms seen in high-speed trading.
+
+---
+
+Of course these market-mechanisms are in service of a wider policy goal,
+which is to ensure the constant availability of as much high-quality data as possible.
+There is more mechanisms at play on these trackers that all contribute to this goal
+(possible keywords to research: trumping, freeleech for underseeded torrents).
+
+In general, it is important to remember that markets are only a tool,
+never an end in themselves, as neoliberalists would like us to believe.
+They always are in service of a wider goal or policy. We live in a society.
diff --git a/users/Profpatsch/blog/notes/rust-string-conversions.md b/users/Profpatsch/blog/notes/rust-string-conversions.md
new file mode 100644
index 000000000000..99071ef9d370
--- /dev/null
+++ b/users/Profpatsch/blog/notes/rust-string-conversions.md
@@ -0,0 +1,53 @@
+# Converting between different String types in Rust
+
+```
+let s: String = ...
+let st: &str = ...
+let u: &[u8] = ...
+let b: [u8; 3] = b"foo"
+let v: Vec<u8> = ...
+let os: OsString = ...
+let ost: OsStr = ...
+
+From       To         Use                                    Comment
+----       --         ---                                    -------
+&str     -> String    String::from(st)
+&str     -> &[u8]     st.as_bytes()
+&str     -> Vec<u8>   st.as_bytes().to_owned()               via &[u8]
+&str     -> &OsStr    OsStr::new(st)
+
+String   -> &str      &s                                     alt. s.as_str()
+String   -> &[u8]     s.as_bytes()
+String   -> Vec<u8>   s.into_bytes()
+String   -> OsString  OsString::from(s)
+
+&[u8]    -> &str      str::from_utf8(u).unwrap()
+&[u8]    -> String    String::from_utf8(u).unwrap()
+&[u8]    -> Vec<u8>   u.to_owned()
+&[u8]    -> &OsStr    OsStr::from_bytes(u)                   use std::os::unix::ffi::OsStrExt;
+
+[u8; 3]  -> &[u8]     &b[..]                                 byte literal
+[u8; 3]  -> &[u8]     "foo".as_bytes()                       alternative via utf8 literal
+
+Vec<u8>  -> &str      str::from_utf8(&v).unwrap()            via &[u8]
+Vec<u8>  -> String    String::from_utf8(v)
+Vec<u8>  -> &[u8]     &v
+Vec<u8>  -> OsString  OsString::from_vec(v)                  use std::os::unix::ffi::OsStringExt;
+
+&OsStr   -> &str      ost.to_str().unwrap()
+&OsStr   -> String    ost.to_os_string().into_string()       via OsString
+                         .unwrap()
+&OsStr   -> Cow<str>  ost.to_string_lossy()                  Unicode replacement characters
+&OsStr   -> OsString  ost.to_os_string()
+&OsStr   -> &[u8]     ost.as_bytes()                         use std::os::unix::ffi::OsStringExt;
+
+OsString -> String    os.into_string().unwrap()              returns original OsString on failure
+OsString -> &str      os.to_str().unwrap()
+OsString -> &OsStr    os.as_os_str()
+OsString -> Vec<u8>   os.into_vec()                          use std::os::unix::ffi::OsStringExt;
+```
+
+
+## Source
+
+Original source is [this document on Pastebin](https://web.archive.org/web/20190710121935/https://pastebin.com/Mhfc6b9i)
diff --git a/users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md b/users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md
new file mode 100644
index 000000000000..ba80888badd8
--- /dev/null
+++ b/users/Profpatsch/blog/posts/2017-05-04-ligature-emulation-in-emacs.md
@@ -0,0 +1,123 @@
+title: Ligature Emulation in Emacs
+date: 2017-05-04
+
+Monday was (yet another)
+[NixOS hackathon][hackathon] at [OpenLab Augsburg][ola].
+[Maximilian][mhuber] was there and to my amazement
+he got working ligatures in his Haskell files in Emacs! Ever since Hasklig
+updated its format to use ligatures and private Unicode code points a while ago,
+the hack I had used in my config stopped working.
+
+Encouraged by that I decided to take a look on Tuesday. Long story short, I was
+able to [get it working in a pretty satisfying way][done].
+
+[hackathon]: https://www.meetup.com/Munich-NixOS-Meetup/events/239077247/
+[mhuber]: https://github.com/maximilianhuber
+[ola]: https://openlab-augsburg.de
+[done]: https://github.com/i-tu/Hasklig/issues/84#issuecomment-298803495
+
+What’s left to do is package it into a module and push to melpa.
+
+
+### elisp still sucks, but it’s bearable, sometimes
+
+I’m the kind of person who, when trying to fix something elisp related, normally
+gives up two hours later and three macro calls deep. Yes, homoiconic,
+non-lexically-scoped, self-rewriting code is not exactly my fetish.
+This time the task and the library (`prettify-symbols-mode`) were simple enough
+for that to not happen.
+
+Some interesting technical trivia:
+
+- elisp literal character syntax is `?c`. `?\t` is the tab character
+- You join characters by `(string c1 c2 c3 ...)`
+- [dash.el][dash] is pretty awesome and does what a functional programmer
+  expects. Also, Rainbow Dash.
+- Hasklig and FiraCode multi-column symbols actually [only occupy one column, on
+  the far right of the glyph][glyph]. `my-correct-symbol-bounds` fixes emacs’
+  rendering in that case.
+
+
+[dash]: https://github.com/magnars/dash.el
+[glyph]: https://github.com/tonsky/FiraCode/issues/211#issuecomment-239082368
+
+
+## Appendix A
+
+For reference, here’s the complete code as it stands now. Feel free to paste
+into your config; let’s make it [MIT][mit]. Maybe link to this site, in case there are
+updates.
+
+[mit]: https://opensource.org/licenses/MIT
+
+```elisp
+ (defun my-correct-symbol-bounds (pretty-alist)
+    "Prepend a TAB character to each symbol in this alist,
+this way compose-region called by prettify-symbols-mode
+will use the correct width of the symbols
+instead of the width measured by char-width."
+    (mapcar (lambda (el)
+              (setcdr el (string ?\t (cdr el)))
+              el)
+            pretty-alist))
+
+  (defun my-ligature-list (ligatures codepoint-start)
+    "Create an alist of strings to replace with
+codepoints starting from codepoint-start."
+    (let ((codepoints (-iterate '1+ codepoint-start (length ligatures))))
+      (-zip-pair ligatures codepoints)))
+
+  ; list can be found at https://github.com/i-tu/Hasklig/blob/master/GlyphOrderAndAliasDB#L1588
+  (setq my-hasklig-ligatures
+    (let* ((ligs '("&&" "***" "*>" "\\\\" "||" "|>" "::"
+                   "==" "===" "==>" "=>" "=<<" "!!" ">>"
+                   ">>=" ">>>" ">>-" ">-" "->" "-<" "-<<"
+                   "<*" "<*>" "<|" "<|>" "<$>" "<>" "<-"
+                   "<<" "<<<" "<+>" ".." "..." "++" "+++"
+                   "/=" ":::" ">=>" "->>" "<=>" "<=<" "<->")))
+      (my-correct-symbol-bounds (my-ligature-list ligs #Xe100))))
+
+  ;; nice glyphs for haskell with hasklig
+  (defun my-set-hasklig-ligatures ()
+    "Add hasklig ligatures for use with prettify-symbols-mode."
+    (setq prettify-symbols-alist
+          (append my-hasklig-ligatures prettify-symbols-alist))
+    (prettify-symbols-mode))
+
+  (add-hook 'haskell-mode-hook 'my-set-hasklig-ligatures)
+```
+
+## Appendix B (Update 1): FiraCode integration
+
+I also created a mapping for [FiraCode][fira]. You need to grab the [additional
+symbol font][symbol] that adds (most) ligatures to the unicode private use area.
+Consult your system documentation on how to add it to your font cache.
+Next add `"Fira Code"` and `"Fira Code Symbol"` to your font preferences. Symbol
+only contains the additional characters, so you need both.
+
+If you are on NixOS, the font package should be on the main branch shortly, [I
+added a package][symbol-pkg].
+
+[fira]: https://github.com/tonsky/FiraCode/
+[symbol]: https://github.com/tonsky/FiraCode/issues/211#issuecomment-239058632
+[symbol-pkg]: https://github.com/NixOS/nixpkgs/pull/25517
+
+Here’s the mapping adjusted for FiraCode:
+
+```elisp
+  (setq my-fira-code-ligatures
+    (let* ((ligs '("www" "**" "***" "**/" "*>" "*/" "\\\\" "\\\\\\"
+                  "{-" "[]" "::" ":::" ":=" "!!" "!=" "!==" "-}"
+                  "--" "---" "-->" "->" "->>" "-<" "-<<" "-~"
+                  "#{" "#[" "##" "###" "####" "#(" "#?" "#_" "#_("
+                  ".-" ".=" ".." "..<" "..." "?=" "??" ";;" "/*"
+                  "/**" "/=" "/==" "/>" "//" "///" "&&" "||" "||="
+                  "|=" "|>" "^=" "$>" "++" "+++" "+>" "=:=" "=="
+                  "===" "==>" "=>" "=>>" "<=" "=<<" "=/=" ">-" ">="
+                  ">=>" ">>" ">>-" ">>=" ">>>" "<*" "<*>" "<|" "<|>"
+                  "<$" "<$>" "<!--" "<-" "<--" "<->" "<+" "<+>" "<="
+                  "<==" "<=>" "<=<" "<>" "<<" "<<-" "<<=" "<<<" "<~"
+                  "<~~" "</" "</>" "~@" "~-" "~=" "~>" "~~" "~~>" "%%"
+                  "x" ":" "+" "+" "*")))
+      (my-correct-symbol-bounds (my-ligature-list ligs #Xe100))))
+```