diff options
Diffstat (limited to 'web')
40 files changed, 1335 insertions, 865 deletions
diff --git a/web/blog/.skip-subtree b/web/blog/.skip-subtree new file mode 100644 index 000000000000..e7fa50d49bdd --- /dev/null +++ b/web/blog/.skip-subtree @@ -0,0 +1 @@ +Subdirectories contain blog posts and static assets only diff --git a/web/blog/default.nix b/web/blog/default.nix new file mode 100644 index 000000000000..73b1bc20e3e7 --- /dev/null +++ b/web/blog/default.nix @@ -0,0 +1,44 @@ +# This creates the static files that make up my blog from the Markdown +# files in this repository. +# +# All blog posts are rendered from Markdown by cheddar. +{ pkgs, lib, ... }@args: + +with pkgs.nix.yants; + +let + # Type definition for a single blog post. + post = struct "blog-post" { + key = string; # + title = string; + date = int; + + # Path to the Markdown file containing the post content. + content = path; + + # Should this post be included in the index? (defaults to true) + listed = option bool; + + # Is this a draft? (adds a banner indicating that the link should + # not be shared) + draft = option bool; + + # Previously each post title had a numeric ID. For these numeric + # IDs, redirects are generated so that old URLs stay compatible. + oldKey = option string; + }; + + posts = list post (import ./posts.nix); + fragments = import ./fragments.nix args; + + rendered = pkgs.third_party.runCommandNoCC "tazjins-blog" {} '' + mkdir -p $out + + ${lib.concatStringsSep "\n" (map (post: + "cp ${fragments.renderPost post} $out/${post.key}.html" + ) posts)} + ''; +in { + inherit post posts rendered; + static = ./static; +} diff --git a/web/blog/fragments.nix b/web/blog/fragments.nix new file mode 100644 index 000000000000..c910ac014e5e --- /dev/null +++ b/web/blog/fragments.nix @@ -0,0 +1,72 @@ +# This file defines various fragments of the blog, such as the header +# and footer, as functions that receive arguments to be templated into +# them. +# +# An entire post is rendered by `renderPost`, which assembles the +# fragments together in a runCommand execution. +# +# The post overview is rendered by 'postList'. +{ pkgs, lib, ... }: + +let + inherit (builtins) filter map hasAttr replaceStrings toFile; + inherit (pkgs.third_party) runCommandNoCC writeText; + + escape = replaceStrings [ "<" ">" "&" "'" ] [ "<" ">" "&" "'" ]; + + header = title: '' + <!DOCTYPE html> + <head> + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <meta name="description" content="tazjin's blog"> + <link rel="stylesheet" type="text/css" href="/static/tazjin.css" media="all"> + <link rel="icon" type="image/webp" href="/static/favicon.webp"> + <link rel="alternate" type="application/rss+xml" title="RSS-Feed" href="/rss.xml"> + <title>tazjin's blog: ${escape title}</title> + </head> + <body class="light"> + <header> + <h1><a class="blog-title" href="/">tazjin's interblag</a> </h1> + <hr> + </header> + ''; + + footer = '' + <hr> + <footer> + <p class="footer"> + <a class="uncoloured-link" href="https://tazj.in">homepage</a> + | + <a class="uncoloured-link" href="https://git.tazj.in/about">code</a> + | + <a class="uncoloured-link" href="https://twitter.com/tazjin">twitter</a> + </p> + <p class="lod">ಠ_ಠ</p> + </footer> + </body> + ''; + + renderPost = post: runCommandNoCC "${post.key}.html" {} '' + cat ${toFile "header.html" (header post.title)} > $out + + # Write the post title & date + echo '<article><h2 class="inline">${escape post.title}</h2>' >> $out + echo '<aside class="date">' >> $out + date --date="@${toString post.date}" '+%Y-%m-%d' >> $out + echo '</aside>' >> $out + + # Write the actual post through cheddar's about-filter mechanism + cat ${post.content} | ${pkgs.tools.cheddar}/bin/cheddar --about-filter ${post.content} >> $out + echo '</article>' >> $out + + cat ${toFile "footer.html" footer} >> $out + ''; + + # Generate a post list for all listed, non-draft posts. + isDraft = post: (hasAttr "draft" post) && post.draft; + isUnlisted = post: (hasAttr "listed" post) && !post.listed; + includePost = post: !(isDraft post) && !(isUnlisted post); +in { + inherit renderPost; +} diff --git a/web/blog/posts.nix b/web/blog/posts.nix new file mode 100644 index 000000000000..7d510b061190 --- /dev/null +++ b/web/blog/posts.nix @@ -0,0 +1,44 @@ +# This file defines all the blog posts. +[ + { + key = "best-tools"; + title = "tazjin's best tools"; + date = 1576800001; + content = ./posts/best-tools.md; + } + { + key = "reversing-watchguard-vpn"; + title = "Reverse-engineering WatchGuard Mobile VPN"; + date = 1486830338; + content = ./posts/reversing-watchguard-vpn.md; + oldKey = "1486830338"; + } + { + key = "make-object-t-again"; + title = "Make Object <T> Again!"; + date = 1476807384; + content = ./posts/make-object-t-again.md; + oldKey = "1476807384"; + } + { + key = "the-smu-problem"; + title = "The SMU-problem of messaging apps"; + date = 1450354078; + content =./posts/the-smu-problem.md; + oldKey = "1450354078"; + } + { + key = "sick-in-sweden"; + title = "Being sick in Sweden"; + date = 1423995834; + content = ./posts/sick-in-sweden.md; + oldKey = "1423995834"; + } + { + key = "nsa-zettabytes"; + title = "The NSA's 5 zettabytes of data"; + date = 1375310627; + content = ./posts/nsa-zettabytes.md; + oldKey = "1375310627"; + } +] diff --git a/web/blog/posts/best-tools.md b/web/blog/posts/best-tools.md new file mode 100644 index 000000000000..55608239828a --- /dev/null +++ b/web/blog/posts/best-tools.md @@ -0,0 +1,161 @@ +In the spirit of various other "Which X do you use?"-pages I thought it would be +fun to have a little post here that describes which tools I've found to work +well for myself. + +When I say "tools" here, it's not about software - it's about real, physical +tools! + +If something goes on this list that's because I think it's seriously a +best-in-class type of product. + +<!-- markdown-toc start - Don't edit this section. Run M-x markdown-toc-refresh-toc --> +- [Media & Tech](#media--tech) + - [Keyboard](#keyboard) + - [Speakers](#speakers) + - [Headphones](#headphones) + - [Earphones](#earphones) + - [Phone](#phone) +- [Other stuff](#other-stuff) + - [Toothbrush](#toothbrush) + - [Shavers](#shavers) + - [Shoulder bag](#shoulder-bag) + - [Wallet](#wallet) +<!-- markdown-toc end --> + +--------- + +# Media & Tech + +## Keyboard + +The best keyboard that money will buy you at the moment is the [Kinesis +Advantage][advantage]. There's a variety of contoured & similarly shaped +keyboards on the market, but the Kinesis is the only one I've tried that has +properly implemented the keywell concept. + +I struggle with RSI issues and the Kinesis actually makes it possible for me to +type for longer periods of time, which always leads to extra discomfort on +laptop keyboards and such. + +Honestly, the Kinesis is probably the best piece of equipment on this entire +list. I own several of them and there will probably be more in the future. They +last forever and your wrists will thank you in the future, even if you do not +suffer from RSI yet. + +[advantage]: https://kinesis-ergo.com/shop/advantage2/ + +## Speakers + +The speakers that I've hooked up to my audio setup (including both record player +& Chromecast / TV) are the [Teufel Motiv 2][motiv-2]. I've had these for over a +decade and they're incredibly good, but unfortunately Teufel no longer makes +them. + +It's possible to grab a pair on eBay occasionally, so keep an eye out if you're +interested! + +[motiv-2]: https://www.teufelaudio.com/uk/pc/motiv-2-p167.html + +## Headphones + +I use the [Bose QC35][qc35] (note: link goes to a newer generation than the one +I own) for their outstanding noise cancelling functionality and decent sound. + +When I first bought them I didn't expect them to end up on this list as the +firmware had issues that made them only barely usable, but Bose has managed to +iron these problems out over time. + +I avoid using Bluetooth when outside and fortunately the QC35 come with an +optional cable that you can plug into any good old 3.5mm jack. + +[qc35]: https://www.bose.co.uk/en_gb/products/headphones/over_ear_headphones/quietcomfort-35-wireless-ii.html + +### Earphones + +Actually, to follow up on the above - most of the time I'm not using (over-ear) +headphones, but (in-ear) earphones - specifically the (**wired!!!**) [Apple +EarPods][earpods]. + +Apple will probably stop selling these soon because they've gotten into the +habit of cancelling all of their good products, so I have a stash of these +around. You will usually find no fewer than 3-4 of them lying around in my +flat. + +[earpods]: https://www.apple.com/uk/shop/product/MNHF2ZM/A/earpods-with-35mm-headphone-plug + +## Phone + +The best phone I have used in recent years is the [iPhone SE][se]. It was the +*last* phone that had a reasonable size (up to 4"") *and* a 3.5mm headphone +jack. + +Unfortunately, it runs iOS. Despite owning a whole bunch of SEs, I have finally +moved on to an Android phone that is only moderately larger (still by an +annoying amount), but does at least have a headphone jack: The [Samsung Galaxy +S10e][s10e]. + +It has pretty good hardware and I can almost reach 70% of the screen, which is +better than other phones out there right now. Unfortunately it runs Samsung's +impossible-to-remove bloatware on top of Android, but that is still less +annoying to use than iOS. + +This is the only item on this list for which I am actively seeking a +replacement, so if you have any tips about new phones that might fit these +criteria that I've missed please let me know. + +[se]: https://en.wikipedia.org/wiki/IPhone_SE +[s10e]: https://www.phonearena.com/phones/Samsung-Galaxy-S10e_id11114 + +# Other stuff + +## Toothbrush + +The [Philips Sonicare][sonicare] (note: link goes to a newer generation than +mine) is excellent and well worth its money. + +I've had it for a few years and whereas I occasionally had minor teeth issues +before, they seem to be mostly gone now. According to my dentist the state of my +teeth is now usually pretty good and I draw a direct correlation back to this +thing. + +The newer generations come with flashy features like apps and probably more +LEDs, but I suspect that those can just be ignored. + +[sonicare]: https://www.philips.co.uk/c-m-pe/electric-toothbrushes + +## Shavers + +The [Philipps SensoTouch 3D][sensotouch] is excellent. Super-comfortable close +face shave in no time and leaves absolutely no mess around, as far as I can +tell! I've had this for ~5 years and it's not showing any signs of aging yet. + +Another bonus is that its battery time is effectively infinite. I've never had +to worry when bringing it on a longer trip! + +[sensotouch]: https://www.philips.co.uk/c-p/1250X_40/norelco-sensotouch-3d-wet-and-dry-electric-razor-with-precision-trimmer + +## Shoulder bag + +When I moved to London I wanted to stop using backpacks most of the time, as +those are just annoying to deal with when commuting on the tube. + +To work around this I wanted a good shoulder bag with a vertical format (to save +space), but it turned out that there's very few of those around that reach any +kind of quality standard. + +The one I settled on is the [Waterfield Muzetto][muzetto] leather bag. It's one +of those things that comes with a bit of a price tag attached, but it's well +worth it! + +[muzetto]: https://www.sfbags.com/collections/shoulder-messenger-bags/products/muzetto-leather-bag + +## Wallet + +My wallet is the [Bellroy Slim Sleeve][slim-sleeve]. I don't carry cash unless +I'm attending an event in Germany and this wallet fits that lifestyle perfectly. + +It's near indestructible, looks great, is very slim and fits a ton of cards, +business cards, receipts and whatever else you want to be lugging around with +you! + +[slim-sleeve]: https://bellroy.com/products/slim-sleeve-wallet/default/charcoal diff --git a/web/blog/posts/make-object-t-again.md b/web/blog/posts/make-object-t-again.md new file mode 100644 index 000000000000..420b57c0fde9 --- /dev/null +++ b/web/blog/posts/make-object-t-again.md @@ -0,0 +1,98 @@ +A few minutes ago I found myself debugging a strange Java issue related +to Jackson, one of the most common Java JSON serialization libraries. + +The gist of the issue was that a short wrapper using some types from +[Javaslang](http://www.javaslang.io/) was causing unexpected problems: + +```java +public <T> Try<T> readValue(String json, TypeReference type) { + return Try.of(() -> objectMapper.readValue(json, type)); +} +``` + +The signature of this function was based on the original Jackson +`readValue` type signature: + +```java +public <T> T readValue(String content, TypeReference valueTypeRef) +``` + +While happily using my wrapper function I suddenly got an unexpected +error telling me that `Object` is incompatible with the type I was +asking Jackson to de-serialize, which got me to re-evaluate the above +type signature again. + +Lets look for a second at some code that will *happily compile* if you +are using Jackson\'s own `readValue`: + +```java +// This shouldn't compile! +Long l = objectMapper.readValue("\"foo\"", new TypeReference<String>(){}); +``` + +As you can see there we ask Jackson to decode the JSON into a `String` +as enclosed in the `TypeReference`, but assign the result to a `Long`. +And it compiles. And it failes at runtime with +`java.lang.ClassCastException: java.lang.String cannot be cast to java.lang.Long`. +Huh? + +Looking at the Jackson `readValue` implementation it becomes clear +what\'s going on here: + +```java +@SuppressWarnings({ "unchecked", "rawtypes" }) +public <T> T readValue(String content, TypeReference valueTypeRef) + throws IOException, JsonParseException, JsonMappingException +{ + return (T) _readMapAndClose(/* whatever */); +} +``` + +The function is parameterised over the type `T`, however the only place +where `T` occurs in the signature is in the parameter declaration and +the function return type. Java will happily let you use generic +functions and types without specifying type parameters: + +```java +// Compiles fine! +final List myList = List.of(1,2,3); + +// Type is now myList : List<Object> +``` + +Meaning that those parameters default to `Object`. Now in the code above +Jackson also explicitly casts the return value of its inner function +call to `T`. + +What ends up happening is that Java infers the expected return type from +the context of the `readValue` and then happily uses the unchecked cast +to fit that return type. If the type hints of the context aren\'t strong +enough we simply get `Object` back. + +So what\'s the fix for this? It\'s quite simple: + +```java +public <T> T readValue(String content, TypeReference<T> valueTypeRef) +``` + +By also making the parameter appear in the `TypeReference` we \"bind\" +`T` to the type enclosed in the type reference. The cast can then also +safely be removed. + +The cherries on top of this are: + +1. `@SuppressWarnings({ "rawtypes" })` explicitly disables a + warning that would\'ve caught this + +2. the `readValue` implementation using the less powerful `Class` + class to carry the type parameter does this correctly: `public <T> + T readValue(String content, Class<T> valueType)` + +The big question I have about this is *why* does Jackson do it this way? +Obviously the warning did not just appear there by chance, so somebody +must have thought about this? + +If anyone knows what the reason is, I\'d be happy to hear from you. + +PS: Shoutout to David & Lucia for helping me not lose my sanity over +this. diff --git a/web/blog/posts/nsa-zettabytes.md b/web/blog/posts/nsa-zettabytes.md new file mode 100644 index 000000000000..f8b326f2fb42 --- /dev/null +++ b/web/blog/posts/nsa-zettabytes.md @@ -0,0 +1,93 @@ +I've been reading a few discussions on Reddit about the new NSA data +centre that is being built and stumbled upon [this +post](http://www.reddit.com/r/restorethefourth/comments/1jf6cx/the_guardian_releases_another_leaked_document_nsa/cbe5hnc), +putting its alleged storage capacity at *5 zettabytes*. + +That seems to be a bit much which I tried to explain to that guy, but I +was quickly blocked by the common conspiracy argument that government +technology is somehow far beyond the wildest dreams of us mere mortals - +thus I wrote a very long reply that will most likely never be seen by +anybody. Therefore I've decided to repost it here. + +------------------------------------------------------------------------ + +I feel like I've entered /r/conspiracy. Please have some facts (and do +read them!) + +A one terabyte SSD (I assume that\'s what you meant by flash-drive) +would require 5000000000 of those. That is *five billion* of those flash +drives. Can you visualise how much five billion flash-drives are? + +A single SSD is roughly 2cm\*13cm\*13cm with an approximate weight of +80g. That would make 400 000 metric tons of SSDs, a weight equivalent to +*over one thousand Boeing 747 airplanes*. Even if we assume that they +solder the flash chips directly onto some kind of controller (which also +weighs something), the raw material for that would be completely insane. + +Another visualization: If you stacked 5 billion SSDs on top of each +other you would get an SSD tower that is a hundred thousand kilometres +high, that is equivalent to 2,5 x the equatorial circumference of +*Earth* or 62000 miles. + +The volume of those SSDs would be clocking in at 1690000000 cubic +metres, more than the Empire State building. Are you still with me? + +Lets speak cost. The Samsung SSD that I assume you are referring to will +clock in at \$600, lets assume that the NSA gets a discount when buying +*five billion* of those and gets them at the cheap price of \$250. That +makes 1.25 trillion dollars. That would be a significant chunk of the +current US national debt. + +And all of this is just SSDs to stick into servers and storage units, +which need a whole bunch of other equipment as well to support them - +the cost would probably shoot up to something like 8 trillion dollars if +they were to build this. It would with very high certainty be more than +the annual production of SSDs (I can\'t find numbers on that +unfortunately) and take up *slightly* more space than they have in the +Utah data centre (assuming you\'re not going to tell me that it is in +fact attached to an underground base that goes down to the core of the +Earth). + +Lets look at the \"But the government has better technologies!\" idea. + +Putting aside the fact that the military *most likely* does not have a +secret base on Mars that deals with advanced science that the rest of us +can only dream of, and doing this under the assumption that they do have +this base, lets assume that they build a storage chip that stores 100TB. +This reduces the amount of needed chips to \"just\" 50 million, lets say +they get 10 of those into a server / some kind of specialized storage +unit and we only need 5 million of those specially engineered servers, +with custom connectors, software, chips, storage, most likely also power +sources and whatever - 10 million completely custom units built with +technology that is not available to the market. Google is estimated to +have about a million servers in total, I don\'t know exactly in how many +data centres those are placed but numbers I heard recently said that +it\'s about 40. When Apple assembles a new iPhone model they need +massive factories with thousands of workers and supplies from many +different countries, over several months, to assemble just a few million +units for their launch month. + +You are seriously proposing that the NSA is better than Google and Apple +and the rest of the tech industry, world-wide, combined at designing +*everything* in tech, manufacturing *everything* in tech, without *any* +information about that leaking and without *any* of the science behind +it being known? That\'s not just insane, that\'s outright impossible. + +And we haven\'t even touched upon how they would route the necessary +amounts of bandwidth (crazy insane) to save *the entire internet* into +that data center. + +------------------------------------------------------------------------ + +I\'m not saying that the NSA is not building a data center to store +surveillance information, to have more capacity to spy on people and all +that - I\'m merely making the point that the extent in which conspiracy +sites say they do this vastly overestimates their actual abilities. They +don\'t have magic available to them! Instead of making up insane figures +like that you should focus on what we actually know about their +operations, because using those figures in a debate with somebody who is +responsible for this (and knows what they\'re talking about) will end +with you being destroyed - nobody will listen to the rest of what +you\'re saying when that happens. + +\"Stick to the facts\" is valid for our side as well. diff --git a/web/blog/posts/reversing-watchguard-vpn.md b/web/blog/posts/reversing-watchguard-vpn.md new file mode 100644 index 000000000000..1f84e9e143cf --- /dev/null +++ b/web/blog/posts/reversing-watchguard-vpn.md @@ -0,0 +1,158 @@ +**Update**: WatchGuard has +[responded](https://www.reddit.com/r/netsec/comments/5tg0f9/reverseengineering_watchguard_mobile_vpn/dds6knx/) +to this post on Reddit. If you haven\'t read the post yet I\'d recommend +doing that first before reading the response to have the proper context. + +------------------------------------------------------------------------ + +One of my current client makes use of +[WatchGuard](http://www.watchguard.com/help/docs/fireware/11/en-US/Content/en-US/mvpn/ssl/mvpn_ssl_client-install_c.html) +Mobile VPN software to provide access to the internal network. + +Currently WatchGuard only provides clients for OS X and Windows, neither +of which I am very fond of. In addition an OpenVPN configuration file is +provided, but it quickly turned out that this was only a piece of the +puzzle. + +The problem is that this VPN setup is secured using 2-factor +authentication (good!), but it does not use OpenVPN\'s default +[challenge/response](https://openvpn.net/index.php/open-source/documentation/miscellaneous/79-management-interface.html) +functionality to negotiate the credentials. + +Connecting with the OpenVPN config that the website supplied caused the +VPN server to send me a token to my phone, but I simply couldn\'t figure +out how to supply it back to the server. In a normal challenge/response +setting the token would be supplied as the password on the second +authentication round, but the VPN server kept rejecting that. + +Other possibilities were various combinations of username&password +(I\'ve seen a lot of those around) so I tried a whole bunch, for example +`$password:$token` or even a `sha1(password, token)` - to no avail. + +At this point it was time to crank out +[Hopper](https://www.hopperapp.com/) and see what\'s actually going on +in the official OS X client - which uses OpenVPN under the hood! + +Diving into the client +---------------------- + +The first surprise came up right after opening the executable: It had +debug symbols in it - and was written in Objective-C! + +![Debug symbols](https://i.imgur.com/EacIeXH.png) + +A good first step when looking at an application binary is going through +the strings that are included in it, and the WatchGuard client had a lot +to offer. Among the most interesting were a bunch of URIs that looked +important: + +![Some URIs](https://i.imgur.com/4rg24K5.png) + +I started with the first one + +`%@?action=sslvpn_download&filename=%@&fw_password=%@&fw_username=%@` + +and just =curl=ed it on the VPN host, replacing the username and +password fields with bogus data and the filename field with +`client.wgssl` - another string in the executable that looked like a +filename. + +To my surprise this endpoint immediately responded with a GZIPed file +containing the OpenVPN config, CA certificate, and the client +*certificate and key*, which I previously thought was only accessible +after logging in to the web UI - oh well. + +The next endpoint I tried ended up being a bit more interesting still: + +`/?action=sslvpn_logon&fw_username=%@&fw_password=%@&style=fw_logon_progress.xsl&fw_logon_type=logon&fw_domain=Firebox-DB` + +Inserting the correct username and password into the query parameters +actually triggered the process that sent a token to my phone. The +response was a simple XML blob: + +``` {.example} +<?xml version="1.0" encoding="UTF-8"?> +<resp> + <action>sslvpn_logon</action> + <logon_status>4</logon_status> + <auth-domain-list> + <auth-domain> + <name>RADIUS</name> + </auth-domain> + </auth-domain-list> + <logon_id>441</logon_id> + <chaStr>Enter Your 6 Digit Passcode </chaStr> +</resp> +``` + +Somewhat unsurprisingly that `chaStr` field is actually the challenge +string displayed in the client when logging in. + +This was obviously going in the right direction so I proceeded to the +procedures making use of this string. The first step was a relatively +uninteresting function called `-[VPNController sslvpnLogon]` which +formatted the URL, opened it and checked whether the `logon_status` was +`4` before proceeding with the `logon_id` and `chaStr` contained in the +response. + +*(Code snippets from here on are Hopper\'s pseudo-Objective-C)* + +![sslvpnLogon](https://i.imgur.com/KUK6MPz.png) + +It proceeded to the function `-[VPNController processTokenPrompt]` which +showed the dialog window into which the user enters the token, sent it +off to the next URL and checked the `logon_status` again: + +(`r12` is the reference to the `VPNController` instance, i.e. `self`). + +![processTokenPrompt](https://i.imgur.com/y6eYHxG.png) + +If the `logon_status` was `1` (apparently \"success\" here) it proceeded +to do something quite interesting: + +![processTokenPrompt2](https://i.imgur.com/f5dAsHD.png) + +The user\'s password was overwritten with the (verified) OTP token - +before OpenVPN had even been started! + +Reading a bit more of the code in the subsequent +`-[VPNController doLogin]` method revealed that it shelled out to +`openvpn` and enabled the management socket, which makes it possible to +remotely control an `openvpn` process by sending it commands over TCP. + +It then simply sent the username and the OTP token as the credentials +after configuring OpenVPN with the correct config file: + +![doLogin](https://i.imgur.com/YLxxpKD.png) + +... and the OpenVPN connection then succeeds. + +TL;DR +----- + +Rather than using OpenVPN\'s built-in challenge/response mechanism, the +WatchGuard client validates user credentials *outside* of the VPN +connection protocol and then passes on the OTP token, which seems to be +temporarily in a \'blessed\' state after verification, as the user\'s +password. + +I didn\'t check to see how much verification of this token is performed +(does it check the source IP against the IP that performed the challenge +validation?), but this certainly seems like a bit of a security issue - +considering that an attacker on the same network would, if they time the +attack right, only need your username and 6-digit OTP token to +authenticate. + +Don\'t roll your own security, folks! + +Bonus +----- + +The whole reason why I set out to do this is so I could connect to this +VPN from Linux, so this blog post wouldn\'t be complete without a +solution for that. + +To make this process really easy I\'ve written a [little +tool](https://github.com/tazjin/watchblob) that performs the steps +mentioned above from the CLI and lets users know when they can +authenticate using their OTP token. diff --git a/web/blog/posts/sick-in-sweden.md b/web/blog/posts/sick-in-sweden.md new file mode 100644 index 000000000000..0c43c5832d73 --- /dev/null +++ b/web/blog/posts/sick-in-sweden.md @@ -0,0 +1,26 @@ +I\'ve been sick more in the two years in Sweden than in the ten years +before that. + +Why? I have a theory about it and after briefly discussing it with one +of my roommates (who is experiencing the same thing) I\'d like to share +it with you: + +Normally when people get sick, are coughing, have a fever and so on they +take a few days off from work and stay at home. The reasons are twofold: +You want to rest a bit in order to get rid of the disease and you want +to *avoid infecting your co-workers*. + +In Sweden people will drag themselves into work anyways, because of a +concept called the +[karensdag](https://www.forsakringskassan.se/wps/portal/sjukvard/sjukskrivning_och_sjukpenning/karensdag_och_forstadagsintyg). +The TL;DR of this is \'if you take days off sick you won\'t get paid for +the first day, and only 80% of your salary on the remaining days\'. + +Many people are not willing to take that financial hit. In combination +with Sweden\'s rather mediocre healthcare system you end up constantly +being surrounded by sick people, not just in your own office but also on +public transport and basically all other public places. + +Oh and the best thing about this? Swedish politicians [often ignore +this](https://www.aftonbladet.se/nyheter/article10506886.ab) rule and +just don\'t report their sick days. Nice. diff --git a/web/blog/posts/the-smu-problem.md b/web/blog/posts/the-smu-problem.md new file mode 100644 index 000000000000..f411e3116046 --- /dev/null +++ b/web/blog/posts/the-smu-problem.md @@ -0,0 +1,151 @@ +After having tested countless messaging apps over the years, being +unsatisfied with most of them and finally getting stuck with +[Telegram](https://telegram.org/) I have developed a little theory about +messaging apps. + +SMU stands for *Security*, *Multi-Device* and *Usability*. Quite like +the [CAP-theorem](https://en.wikipedia.org/wiki/CAP_theorem) I believe +that you can - using current models - only solve two out of three things +on this list. Let me elaborate what I mean by the individual points: + +**Security**: This is mainly about encryption of messages, not so much +about hiding identities to third-parties. Commonly some kind of +asymmetric encryption scheme. Verification of keys used must be possible +for the user. + +**Multi-Device**: Messaging-app clients for multiple devices, with +devices being linked to the same identifier, receiving the same messages +and being independent of each other. A nice bonus is also an open +protocol (like Telegram\'s) that would let people write new clients. + +**Usability**: Usability is a bit of a broad term, but what I mean by it +here is handling contacts and identities. It should be easy to create +accounts, give contact information to people and have everything just +work in a somewhat automated fashion. + +Some categorisation of popular messaging apps: + +**SU**: Threema + +**MU**: Telegram, Google Hangouts, iMessage, Facebook Messenger + +**SM**: +[Signal](https://gist.github.com/TheBlueMatt/d2fcfb78d29faca117f5) + +*Side note: The most popular messaging app - WhatsApp - only scores a +single letter (U). This makes it completely uninteresting to me.* + +Let\'s talk about **SM** - which might contain the key to solving SMU. +Two approaches are interesting here. + +The single key model +-------------------- + +In Signal there is a single identity key which can be used to register a +device on the server. There exists a process for sharing this identity +key from a primary device to a secondary one, so that the secondary +device can register itself (see the link above for a description). + +This *almost* breaks M because there is still a dependence on a primary +device and newly onboarded devices can not be used to onboard further +devices. However, for lack of a better SM example I\'ll give it a pass. + +The other thing it obviously breaks is U as the process for setting it +up is annoying and having to rely on the primary device is a SPOF (there +might be a way to recover from a lost primary device, but I didn\'t find +any information so far). + +The multiple key model +---------------------- + +In iMessage every device that a user logs into creates a new key pair +and submits its public key to a per-account key pool. Senders fetch all +available public keys for a recipient and encrypt to all of the keys. + +Devices that join can catch up on history by receiving it from other +devices that use its public key. + +This *almost* solves all of SMU, but its compliance with S breaks due to +the fact that the key pool is not auditable, and controlled by a +third-party (Apple). How can you verify that they don\'t go and add +another key to your pool? + +A possible solution +------------------- + +Out of these two approaches I believe the multiple key one looks more +promising. If there was a third-party handling the key pool but in a way +that is verifiable, transparent and auditable that model could be used +to solve SMU. + +The technology I have been thinking about for this is some kind of +blockchain model and here\'s how I think it could work: + +1. Bob installs the app and begins onboarding. The first device + generates its keypair, submits the public key and an account + creation request. + +2. Bob\'s account is created on the messaging apps\' servers and a + unique identifier plus the fingerprint of the first device\'s public + key is written to the chain. + +3. Alice sends a message to Bob, her device asks the messaging service + for Bob\'s account\'s identity and public keys. Her device verifies + the public key fingerprint against the one in the blockchain before + encrypting to it and sending the message. + +4. Bob receives Alice\'s message on his first device. + +5. Bob logs in to his account on a second device. The device generates + a key pair and sends the public key to the service, the service + writes it to the blockchain using its identifier. + +6. The messaging service requests that Bob\'s first device signs the + second device\'s key and triggers a simple confirmation popup. + +7. Bob confirms the second device on his first device. It signs the key + and writes the signature to the chain. + +8. Alice sends another message, her device requests Bob\'s current keys + and receives the new key. It verifies that both the messaging + service and one of Bob\'s older devices have confirmed this key in + the chain. It encrypts the message to both keys and sends it on. + +9. Bob receives Alice\'s message on both devices. + +After this the second device can request conversation history from the +first one to synchronise old messages. + +Further devices added to an account can be confirmed by any of the +devices already in the account. + +The messaging service could not add new keys for an account on its own +because it does not control any of the private keys confirmed by the +chain. + +In case all devices were lost, the messaging service could associate the +account with a fresh identity in the block chain. Message history +synchronisation would of course be impossible. + +Feedback welcome +---------------- + +I would love to hear some input on this idea, especially if anyone knows +of an attempt to implement a similar model already. Possible attack +vectors would also be really interesting. + +Until something like this comes to fruition, I\'ll continue using +Telegram with GPG as the security layer when needed. + +**Update:** WhatsApp has launched an integration with the Signal guys +and added their protocol to the official WhatsApp app. This means +WhatsApp now firmly sits in the SU-category, but it still does not solve +this problem. + +**Update 2:** Facebook Messenger has also integrated with Signal, but +their secret chats do not support multi-device well (it is Signal +afterall). This means it scores either SU or MU depending on which mode +you use it in. + +An interesting service I have not yet evaluated properly is +[Matrix](http://matrix.org/). diff --git a/web/tazblog/static/blog.css b/web/blog/static/blog.css index e6e4ae3c2be0..e6e4ae3c2be0 100644 --- a/web/tazblog/static/blog.css +++ b/web/blog/static/blog.css diff --git a/web/cgit-taz/0001-cgit_monorepo_urls.patch b/web/cgit-taz/0001-cgit_monorepo_urls.patch deleted file mode 100644 index 624a74b5dbbc..000000000000 --- a/web/cgit-taz/0001-cgit_monorepo_urls.patch +++ /dev/null @@ -1,114 +0,0 @@ -From f6646e5a6da29da979d6954feba9d85556bc6936 Mon Sep 17 00:00:00 2001 -From: Vincent Ambo <tazjin@google.com> -Date: Sat, 21 Dec 2019 18:41:45 +0000 -Subject: [PATCH 1/3] feat: Generate monorepo compatible URLs - -Generates URLs that do not include the repository name. - -On git.tazj.in, only one repository (depot) is served - hence URLs -generated by cgit need not include the name. ---- - cmd.c | 24 +----------------------- - ui-shared.c | 29 +++++++++-------------------- - 2 files changed, 10 insertions(+), 43 deletions(-) - -diff --git a/cmd.c b/cmd.c -index 63f0ae5..b37b79d 100644 ---- a/cmd.c -+++ b/cmd.c -@@ -39,29 +39,7 @@ static void atom_fn(void) - - static void about_fn(void) - { -- if (ctx.repo) { -- size_t path_info_len = ctx.env.path_info ? strlen(ctx.env.path_info) : 0; -- if (!ctx.qry.path && -- ctx.qry.url[strlen(ctx.qry.url) - 1] != '/' && -- (!path_info_len || ctx.env.path_info[path_info_len - 1] != '/')) { -- char *currenturl = cgit_currenturl(); -- char *redirect = fmtalloc("%s/", currenturl); -- cgit_redirect(redirect, true); -- free(currenturl); -- free(redirect); -- } else if (ctx.repo->readme.nr) -- cgit_print_repo_readme(ctx.qry.path); -- else if (ctx.repo->homepage) -- cgit_redirect(ctx.repo->homepage, false); -- else { -- char *currenturl = cgit_currenturl(); -- char *redirect = fmtalloc("%s../", currenturl); -- cgit_redirect(redirect, false); -- free(currenturl); -- free(redirect); -- } -- } else -- cgit_print_site_readme(); -+ cgit_print_repo_readme(ctx.qry.path); - } - - static void blame_fn(void) -diff --git a/ui-shared.c b/ui-shared.c -index 739505a..c7c3754 100644 ---- a/ui-shared.c -+++ b/ui-shared.c -@@ -95,29 +95,23 @@ const char *cgit_loginurl(void) - - char *cgit_repourl(const char *reponame) - { -- if (ctx.cfg.virtual_root) -- return fmtalloc("%s%s/", ctx.cfg.virtual_root, reponame); -- else -- return fmtalloc("?r=%s", reponame); -+ // my cgit instance *only* serves the depot, hence that's the only value ever -+ // needed. -+ return fmtalloc("/"); - } - - char *cgit_fileurl(const char *reponame, const char *pagename, - const char *filename, const char *query) - { - struct strbuf sb = STRBUF_INIT; -- char *delim; - -- if (ctx.cfg.virtual_root) { -- strbuf_addf(&sb, "%s%s/%s/%s", ctx.cfg.virtual_root, reponame, -- pagename, (filename ? filename:"")); -- delim = "?"; -- } else { -- strbuf_addf(&sb, "?url=%s/%s/%s", reponame, pagename, -- (filename ? filename : "")); -- delim = "&"; -+ strbuf_addf(&sb, "%s%s/%s", ctx.cfg.virtual_root, -+ pagename, (filename ? filename:"")); -+ -+ if (query) { -+ strbuf_addf(&sb, "%s%s", "?", query); - } -- if (query) -- strbuf_addf(&sb, "%s%s", delim, query); -+ - return strbuf_detach(&sb, NULL); - } - -@@ -245,9 +239,6 @@ static char *repolink(const char *title, const char *class, const char *page, - html(" href='"); - if (ctx.cfg.virtual_root) { - html_url_path(ctx.cfg.virtual_root); -- html_url_path(ctx.repo->url); -- if (ctx.repo->url[strlen(ctx.repo->url) - 1] != '/') -- html("/"); - if (page) { - html_url_path(page); - html("/"); -@@ -957,8 +948,6 @@ static void print_header(void) - - html("<td class='main'>"); - if (ctx.repo) { -- cgit_index_link("index", NULL, NULL, NULL, NULL, 0, 1); -- html(" : "); - cgit_summary_link(ctx.repo->name, ctx.repo->name, NULL, NULL); - if (ctx.env.authenticated) { - html("</td><td class='form'>"); --- -2.24.1.735.g03f4e72817-goog - diff --git a/web/cgit-taz/0002-cgit_subtree_readmes.patch b/web/cgit-taz/0002-cgit_subtree_readmes.patch deleted file mode 100644 index f3aba10215bc..000000000000 --- a/web/cgit-taz/0002-cgit_subtree_readmes.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 61500898c7d1363f88b763c7778cf1a8dfd13aca Mon Sep 17 00:00:00 2001 -From: Vincent Ambo <tazjin@google.com> -Date: Sat, 21 Dec 2019 22:58:19 +0000 -Subject: [PATCH 2/3] feat(ui-summary): Attempt to use README at each subtree - -This means that individual subtrees of a repository will also have -their READMEs rendered on the about page, for example: - - /foo/bar/README.md - -Will render on: - - /about/foo/bar/ - -This is useful for monorepo setups in which subtrees represent -individual projects. ---- - ui-summary.c | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/ui-summary.c b/ui-summary.c -index 8e81ac4..34ce4e9 100644 ---- a/ui-summary.c -+++ b/ui-summary.c -@@ -128,6 +128,18 @@ void cgit_print_repo_readme(char *path) - goto done; - } - -+ /* Determine which file to serve by checking whether the given filename is -+ * already a valid file and otherwise appending the expected file name of -+ * the readme. -+ * -+ * If neither yield a valid file, the user gets a blank page. Could probably -+ * do with an error message in between there, but whatever. -+ */ -+ if (path && ref && !cgit_ref_path_exists(filename, ref, 1)) { -+ filename = fmtalloc("%s/%s", path, ctx.repo->readme.items[0].string); -+ free_filename = 1; -+ } -+ - /* Print the calculated readme, either from the git repo or from the - * filesystem, while applying the about-filter. - */ --- -2.24.1.735.g03f4e72817-goog - diff --git a/web/cgit-taz/0003-cgit_subtree_about_links.patch b/web/cgit-taz/0003-cgit_subtree_about_links.patch deleted file mode 100644 index 6b3d0a70b11d..000000000000 --- a/web/cgit-taz/0003-cgit_subtree_about_links.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 531b55dc96bb7ee2ce52a3612021e1c1f4ddac8a Mon Sep 17 00:00:00 2001 -From: Vincent Ambo <tazjin@google.com> -Date: Sat, 21 Dec 2019 23:27:28 +0000 -Subject: [PATCH 3/3] feat(ui-shared): Generate links to about pages from - subtrees - -If you're on tree/foo/bar, the about link will now point to -about/foo/bar. - -Currently the annoying thing about this is that it will also do it for -files. ---- - ui-shared.c | 14 ++++++++++---- - 1 file changed, 10 insertions(+), 4 deletions(-) - -diff --git a/ui-shared.c b/ui-shared.c -index c7c3754..c37835a 100644 ---- a/ui-shared.c -+++ b/ui-shared.c -@@ -297,6 +297,12 @@ void cgit_tag_link(const char *name, const char *title, const char *class, - reporevlink("tag", name, title, class, tag, NULL, NULL); - } - -+void cgit_about_link(const char *name, const char *title, const char *class, -+ const char *head, const char *rev, const char *path) -+{ -+ reporevlink("about", name, title, class, head, rev, path); -+} -+ - void cgit_tree_link(const char *name, const char *title, const char *class, - const char *head, const char *rev, const char *path) - { -@@ -985,10 +991,10 @@ void cgit_print_pageheader(void) - - html("<table class='tabs'><tr><td>\n"); - if (ctx.env.authenticated && ctx.repo) { -- if (ctx.repo->readme.nr) -- reporevlink("about", "about", NULL, -- hc("about"), ctx.qry.head, NULL, -- NULL); -+ if (ctx.repo->readme.nr) { -+ cgit_about_link("about", NULL, hc("about"), ctx.qry.head, -+ ctx.qry.sha1, ctx.qry.vpath); -+ } - cgit_summary_link("summary", NULL, hc("summary"), - ctx.qry.head); - cgit_refs_link("refs", NULL, hc("refs"), ctx.qry.head, --- -2.24.1.735.g03f4e72817-goog - diff --git a/web/cgit-taz/default.nix b/web/cgit-taz/default.nix index 962efab91ac7..c2dd7a4a4730 100644 --- a/web/cgit-taz/default.nix +++ b/web/cgit-taz/default.nix @@ -9,21 +9,15 @@ with pkgs.third_party; let - # Patched version of cgit that has monorepo-specific features. - monocgit = cgit.overrideAttrs(old: { - patches = old.patches ++ [ - ./0001-cgit_monorepo_urls.patch - ./0002-cgit_subtree_readmes.patch - ./0003-cgit_subtree_about_links.patch - ]; - }); - + sourceFilter = writeShellScriptBin "cheddar-about" '' + exec ${pkgs.tools.cheddar}/bin/cheddar --about-filter $@ + ''; cgitConfig = writeText "cgitrc" '' # Global configuration virtual-root=/ enable-http-clone=1 readme=:README.md - about-filter=${pkgs.tools.cheddar}/bin/cheddar + about-filter=${sourceFilter}/bin/cheddar-about source-filter=${pkgs.tools.cheddar}/bin/cheddar enable-log-filecount=1 enable-log-linecount=1 @@ -42,7 +36,7 @@ let thttpdConfig = writeText "thttpd.conf" '' port=8080 - dir=${monocgit}/cgit + dir=${cgit}/cgit nochroot novhost logfile=/dev/stdout @@ -77,4 +71,4 @@ let }); in writeShellScriptBin "cgit-launch" '' exec ${thttpdCgit}/bin/thttpd -D -C ${thttpdConfig} -# '' +'' diff --git a/web/homepage/default.nix b/web/homepage/default.nix new file mode 100644 index 000000000000..d2905a7eb6ca --- /dev/null +++ b/web/homepage/default.nix @@ -0,0 +1,76 @@ +# Assembles the website index and configures an nginx instance to +# serve it. +# +# The website is made up of a simple header&footer and content +# elements for things such as blog posts and projects. +# +# Content for the blog is in //web/blog instead of here. +{ pkgs, lib, ... }: + +with pkgs; +with nix.yants; + +let + inherit (builtins) readFile replaceStrings sort; + inherit (third_party) writeFile runCommandNoCC; + + # The different types of entries on the homepage. + entryClass = enum "entryClass" [ "blog" "project" "misc" ]; + + # The definition of a single entry. + entry = struct "entry" { + class = entryClass; + title = string; + url = string; + date = int; # epoch + description = option string; + }; + + escape = replaceStrings [ "<" ">" "&" "'" ] [ "<" ">" "&" "'" ]; + + postToEntry = defun [ web.blog.post entry ] (post: { + class = "blog"; + title = post.title; + url = "/blog/${post.key}"; + date = post.date; + }); + + formatDate = defun [ int string ] (date: readFile (runCommandNoCC "date" {} '' + date --date='@${toString date}' '+%Y-%m-%d' > $out + '')); + + formatEntryDate = defun [ entry string ] (entry: entryClass.match entry.class { + blog = "Blog post from ${formatDate entry.date}"; + project = "Project from ${formatDate entry.date}"; + misc = "Posted on ${formatDate entry.date}"; + }); + + entryToDiv = defun [ entry string ] (entry: '' + <a href="${entry.url}" class="entry ${entry.class}"> + <div> + <p class="entry-title">${escape entry.title}</p> + ${ + lib.optionalString ((entry ? description) && (entry.description != null)) + "<p class=\"entry-description\">${escape entry.description}</p>" + } + <p class="entry-date">${formatEntryDate entry}</p> + </div> + </a> + ''); + + index = entries: third_party.writeText "index.html" (lib.concatStrings ( + [ (builtins.readFile ./header.html) ] + ++ (map entryToDiv (sort (a: b: a.date > b.date) entries)) + ++ [ (builtins.readFile ./footer.html) ] + )); + + homepage = index ((map postToEntry web.blog.posts) ++ (import ./entries.nix)); + website = runCommandNoCC "website" {} '' + mkdir $out + cp ${homepage} $out/index.html + cp -r ${./static} $out/static + ''; +in third_party.callPackage ./nginx.nix { + inherit website; + blog = web.blog; +} diff --git a/web/homepage/entries.nix b/web/homepage/entries.nix new file mode 100644 index 000000000000..d204090330ef --- /dev/null +++ b/web/homepage/entries.nix @@ -0,0 +1,47 @@ +[ + { + class = "project"; + title = "depot"; + url = "https://git.tazj.in/about"; + date = 1576800000; + description = "Merging all of my projects into a single, Nix-based monorepo"; + } + { + class = "project"; + title = "Nixery"; + url = "https://github.com/google/nixery"; + date = 1565132400; + description = "A Nix-backed container registry that builds container images on demand"; + } + { + class = "project"; + title = "kontemplate"; + url = "https://git.tazj.in/about/ops/kontemplate"; + date = 1486550940; + description = "Simple file templating tool built for Kubernetes resources"; + } + { + class = "misc"; + title = "dottime"; + url = "https://dotti.me/"; + date = 1560898800; + description = "A universal convention for conveying time (by edef <3)"; + } + { + class = "project"; + title = "journaldriver"; + url = "https://git.tazj.in/about/ops/journaldriver"; + date = 1527375600; + description = "Small daemon to forward logs from journald to Stackdriver Logging"; + } + { + class = "misc"; + title = "Principia Discordia"; + url = "https://principiadiscordia.com/book/1.php"; + date = 1495494000; + description = '' + The Principia is a short book I read as a child, and didn't + understand until much later. It shaped much of my world view. + ''; + } +] diff --git a/web/homepage/footer.html b/web/homepage/footer.html new file mode 100644 index 000000000000..2f17135066e8 --- /dev/null +++ b/web/homepage/footer.html @@ -0,0 +1,2 @@ + </div> +</body> diff --git a/web/homepage/header.html b/web/homepage/header.html new file mode 100644 index 000000000000..ec81fa04dc05 --- /dev/null +++ b/web/homepage/header.html @@ -0,0 +1,35 @@ +<!DOCTYPE html> +<head><meta charset="utf-8"> + <meta name="viewport" content="width=device-width, initial-scale=1"> + <meta name="description" content="tazjin's blog"> + <link rel="stylesheet" type="text/css" href="static/tazjin.css" media="all"> + <link rel="icon" type="image/webp" href="/static/favicon.webp"> + <title>tazjin's interblag</title> +</head> +<body class="dark"> + <header> + <h1> + <a class="interblag-title" href="/">tazjin's interblag</a> + </h1> + <hr> + </header> + <div class="introduction"> + <p>Hello, illuminated visitor.</p> + <p> + I'm tazjin. Usually you can find + me <a class="dark-link" href="https://git.tazj.in/about">programming computers</a> + using tools such as <a class="dark-link" href="https://nixos.org/nix">Nix</a> + and <a class="dark-link" href="https://www.gnu.org/software/emacs/">Emacs</a>, + cuddling <a class="dark-link" href="https://twitter.com/edefic">people I love</a> + or posting nonsense <a class="dark-link" href="https://twitter.com/tazjin">on the + internet</a>. + </p> + <p> + Below is a collection of + my <span class="project">projects</span>, <span class="blog">blog + posts</span> and some <span class="misc">random things</span> by + me or others. If you'd like to get in touch about anything, send + me a mail at mail@[this domain] or ping me on IRC or Twitter. + </p> + </div> + <div class="entry-container"> diff --git a/web/homepage/nginx.nix b/web/homepage/nginx.nix new file mode 100644 index 000000000000..100c0cc9ee3a --- /dev/null +++ b/web/homepage/nginx.nix @@ -0,0 +1,79 @@ +# This file creates an nginx server that serves the blog on port 8080. +# +# It's not intended to be the user-facing nginx. +{ + # third_party attributes supplied by callPackage + writeText, writeShellScriptBin, nginx, lib, + + # website content + blog, website +}: + +let + inherit (builtins) hasAttr filter map; + inherit (pkgs.third_party) ; + + oldRedirects = lib.concatStringsSep "\n" (map (post: '' + location ~* ^(/en)?/${post.oldKey} { + # TODO(tazjin): 301 once this works + return 302 https://tazj.in/blog/${post.key}; + } + '') (filter (hasAttr "oldKey") blog.posts)); + + config = writeText "homepage-nginx.conf" '' + daemon off; + worker_processes 1; + error_log stderr; + pid /tmp/nginx-homepage.pid; + + events { + worker_connections 1024; + } + + http { + include ${nginx}/conf/mime.types; + fastcgi_temp_path /tmp/nginx-homepage; + uwsgi_temp_path /tmp/nginx-homepage; + scgi_temp_path /tmp/nginx-homepage; + client_body_temp_path /tmp/nginx-homepage; + proxy_temp_path /tmp/nginx-homepage; + sendfile on; + + # Logging is handled by the primary nginx server + access_log off; + + server { + listen 8080 default_server; + server_name tazj.in; + root ${website}; + + ${oldRedirects} + + location /blog { + alias ${blog.rendered}; + + if ($request_uri ~ ^/(.*)\.html$) { + return 302 /$1; + } + + try_files $uri $uri.html $uri/ =404; + } + } + + server { + listen 8080; + server_name www.tazj.in; + return 301 https://tazj.in$request_uri; + } + } + ''; +in writeShellScriptBin "homepage" '' + if [[ -v CONTAINER_SETUP ]]; then + cd /run + echo 'nogroup:x:30000:nobody' >> /etc/group + echo 'nobody:x:30000:30000:nobody:/tmp:/bin/bash' >> /etc/passwd + fi + + mkdir -p /tmp/nginx-homepage + exec ${nginx}/bin/nginx -c ${config} +'' diff --git a/web/homepage/static/favicon.webp b/web/homepage/static/favicon.webp new file mode 100644 index 000000000000..f99c9085340b --- /dev/null +++ b/web/homepage/static/favicon.webp Binary files differdiff --git a/web/homepage/static/jetbrains-mono-bold-italic.woff2 b/web/homepage/static/jetbrains-mono-bold-italic.woff2 new file mode 100644 index 000000000000..34b5c69ae1cf --- /dev/null +++ b/web/homepage/static/jetbrains-mono-bold-italic.woff2 Binary files differdiff --git a/web/homepage/static/jetbrains-mono-bold.woff2 b/web/homepage/static/jetbrains-mono-bold.woff2 new file mode 100644 index 000000000000..84a008af7edb --- /dev/null +++ b/web/homepage/static/jetbrains-mono-bold.woff2 Binary files differdiff --git a/web/homepage/static/jetbrains-mono-italic.woff2 b/web/homepage/static/jetbrains-mono-italic.woff2 new file mode 100644 index 000000000000..85fd4687891e --- /dev/null +++ b/web/homepage/static/jetbrains-mono-italic.woff2 Binary files differdiff --git a/web/homepage/static/jetbrains-mono.woff2 b/web/homepage/static/jetbrains-mono.woff2 new file mode 100644 index 000000000000..d5b94cb9e7db --- /dev/null +++ b/web/homepage/static/jetbrains-mono.woff2 Binary files differdiff --git a/web/homepage/static/tazjin.css b/web/homepage/static/tazjin.css new file mode 100644 index 000000000000..68e72577c37a --- /dev/null +++ b/web/homepage/static/tazjin.css @@ -0,0 +1,142 @@ +/* Jetbrains Mono font from https://www.jetbrains.com/lp/mono/ + licensed under Apache 2.0. Thanks, Jetbrains! */ +@font-face { + font-family: jetbrains-mono; + src: url(jetbrains-mono.woff2); +} + +@font-face { + font-family: jetbrains-mono; + font-weight: bold; + src: url(jetbrains-mono-bold.woff2); +} + +@font-face { + font-family: jetbrains-mono; + font-style: italic; + src: url(jetbrains-mono-italic.woff2); +} + +@font-face { + font-family: jetbrains-mono; + font-weight: bold; + font-style: italic; + src: url(jetbrains-mono-bold-italic.woff2); +} + +/* Generic-purpose styling */ + +body { + margin: 40px auto; + line-height: 1.6; + font-size: 18px; + padding: 0 10px; + font-family: jetbrains-mono, monospace; +} + +p, a :not(.uncoloured-link) { + color: inherit; +} + +h1, h2, h3 { + line-height: 1.2 +} + +/* Homepage styling */ + +.dark { + max-width: 800px; + background-color: #181818; + color: #e4e4ef; +} + +.dark-link, .interblag-title { + color: #96a6c8; +} + +.entry-container { + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: flex-start; +} + +.interblag-title { + text-decoration: none; +} + +.entry { + width: 42%; + margin: 5px; + padding-left: 7px; + padding-right: 5px; + border: 2px solid; + border-radius: 5px; + flex-grow: 1; + text-decoration: none; +} + +.misc { + color: #268bd2; + border-color: #268bd2; +} + +.project { + color: #9e95c7; + border-color: #9e95c7; +} + +.blog { + color: #95a99f; + border-color: #95a99f; +} + +.entry-title { + color: inherit !important; + font-weight: bold; + text-decoration: none; +} + +.entry-date { + font-style: italic; +} + +/* Blog styling */ + +.light { + max-width: 650px; + color: #383838; +} + +.blog-title { + color: inherit; + text-decoration: none; +} + +.footer { + text-align: right; +} + +.date { + text-align: right; + font-style: italic; + float: right; +} + +.inline { + display: inline; +} + +.lod { + text-align: center; +} + +pre { + min-width: 100%; + /* some code snippets escape to the side, but I don't want to wrap them */ + width: max-content; +} + +img { + max-width: 100%; +} diff --git a/web/tazblog/blog/Main.hs b/web/tazblog/blog/Main.hs deleted file mode 100644 index 6074f96b7685..000000000000 --- a/web/tazblog/blog/Main.hs +++ /dev/null @@ -1,24 +0,0 @@ --- | Main module for the blog's web server -module Main where - -import Control.Applicative ((<$>), (<*>)) -import Server (runBlog) -import System.Environment (getEnv) - -data MainOptions - = MainOptions - { blogPort :: Int, - resourceDir :: String - } - -readOpts :: IO MainOptions -readOpts = - MainOptions - <$> (fmap read $ getEnv "PORT") - <*> getEnv "RESOURCE_DIR" - -main :: IO () -main = do - opts <- readOpts - putStrLn ("tazblog starting on port " ++ (show $ blogPort opts)) - runBlog (blogPort opts) (resourceDir opts) diff --git a/web/tazblog/default.nix b/web/tazblog/default.nix deleted file mode 100644 index eecadff6ba17..000000000000 --- a/web/tazblog/default.nix +++ /dev/null @@ -1,18 +0,0 @@ -# Build configuration for the blog using plain Nix. -# -# tazblog.nix was generated using cabal2nix. - -{ pkgs, ... }: - -let - inherit (pkgs.third_party) writeShellScriptBin haskell; - tazblog = haskell.packages.ghc865.callPackage ./tazblog.nix {}; - wrapper = writeShellScriptBin "tazblog" '' - export PORT=8000 - export RESOURCE_DIR=${./static} - exec ${tazblog}/bin/tazblog - ''; -in wrapper.overrideAttrs(_: { - allowSubstitutes = true; - meta.enableCI = true; -}) diff --git a/web/tazblog/shell.nix b/web/tazblog/shell.nix deleted file mode 100644 index ebb891a87458..000000000000 --- a/web/tazblog/shell.nix +++ /dev/null @@ -1,11 +0,0 @@ -{ pkgs ? (import ../../default.nix {}).third_party.nixpkgs }: - -let tazblog = import ./tazblog.nix; - depNames = with builtins; filter ( - p: hasAttr p pkgs.haskellPackages - ) (attrNames (functionArgs tazblog)); - ghc = pkgs.ghc.withPackages(p: map (x: p."${x}") depNames); -in pkgs.stdenv.mkDerivation { - name = "shell"; - buildInputs = [ ghc pkgs.hlint ]; -} diff --git a/web/tazblog/src/Blog.hs b/web/tazblog/src/Blog.hs deleted file mode 100644 index 0a53b5f2fbf4..000000000000 --- a/web/tazblog/src/Blog.hs +++ /dev/null @@ -1,141 +0,0 @@ -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE GeneralizedNewtypeDeriving #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE QuasiQuotes #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TemplateHaskell #-} -{-# LANGUAGE TypeFamilies #-} - -module Blog where - -import BlogStore -import Data.Text (Text, pack) -import qualified Data.Text as T -import Data.Text.Lazy (fromStrict) -import Data.Time -import Text.Blaze.Html (preEscapedToHtml) -import Text.Hamlet -import Text.Markdown - -blogTitle :: Text = "tazjin's blog" - -repoURL :: Text = "https://bitbucket.org/tazjin/tazblog-haskell" - -mailTo :: Text = "mailto:mail@tazj.in" - -twitter :: Text = "https://twitter.com/tazjin" - -replace :: Eq a => a -> a -> [a] -> [a] -replace x y = map (\z -> if z == x then y else z) - --- |After this date all entries are Markdown -markdownCutoff :: Day -markdownCutoff = fromGregorian 2013 04 28 - -blogTemplate :: Text -> Html -> Html -blogTemplate t_append body = - [shamlet| -$doctype 5 - <head> - <meta charset="utf-8"> - <meta name="viewport" content="width=device-width, initial-scale=1"> - <meta name="description" content=#{blogTitle}#{t_append}> - <link rel="stylesheet" type="text/css" href="/static/blog.css" media="all"> - <link rel="alternate" type="application/rss+xml" title="RSS-Feed" href="/rss.xml"> - <title>#{blogTitle}#{t_append} - <body> - <header> - <h1> - <a href="/" .unstyled-link>#{blogTitle} - <hr> - ^{body} - ^{showFooter} -|] - -showFooter :: Html -showFooter = - [shamlet| -<footer> - <p .footer>Served without any dynamic languages. - <p .footer> - <a href=#{repoURL} .uncoloured-link> - | - <a href=#{twitter} .uncoloured-link>Twitter - | - <a href=#{mailTo} .uncoloured-link>Mail - <p .lod> - ಠ_ಠ -|] - -isEntryMarkdown :: Entry -> Bool -isEntryMarkdown e = edate e > markdownCutoff - -renderEntryMarkdown :: Text -> Html -renderEntryMarkdown = markdown def {msXssProtect = False} . fromStrict - -renderEntries :: [Entry] -> Maybe Html -> Html -renderEntries entries pageLinks = - [shamlet| -$forall entry <- entries - <article> - <h2 .inline> - <a href=#{linkElems entry} .unstyled-link> - #{title entry} - <aside .date> - #{pack $ formatTime defaultTimeLocale "%Y-%m-%d" $ edate entry} - $if (isEntryMarkdown entry) - ^{renderEntryMarkdown $ text entry} - $else - ^{preEscapedToHtml $ text entry} - <hr> -$maybe links <- pageLinks - ^{links} -|] - where - linkElems Entry {..} = "/" ++ show entryId - -showLinks :: Maybe Int -> Html -showLinks (Just i) = - [shamlet| - $if ((>) i 1) - <div .navigation> - <a href=#{nLink $ succ i} .uncoloured-link>Earlier - | - <a href=#{nLink $ pred i} .uncoloured-link>Later - $elseif ((<=) i 1) - ^{showLinks Nothing} -|] - where - nLink page = T.concat ["/?page=", show' page] -showLinks Nothing = - [shamlet| -<div .navigation> - <a href="/?page=2" .uncoloured-link>Earlier -|] - -renderEntry :: Entry -> Html -renderEntry e@Entry {..} = - [shamlet| -<article> - <h2 .inline> - #{title} - <aside .date> - #{pack $ formatTime defaultTimeLocale "%Y-%m-%d" edate} - $if (isEntryMarkdown e) - ^{renderEntryMarkdown text} - $else - ^{preEscapedToHtml $ text} -<hr> -|] - -showError :: Text -> Text -> Html -showError title err = - blogTemplate (": " <> title) - [shamlet| -<p>:( -<p>#{err} -<hr> -|] diff --git a/web/tazblog/src/BlogStore.hs b/web/tazblog/src/BlogStore.hs deleted file mode 100644 index 60ccd0b5a003..000000000000 --- a/web/tazblog/src/BlogStore.hs +++ /dev/null @@ -1,182 +0,0 @@ -{-# LANGUAGE GeneralizedNewtypeDeriving #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE OverloadedStrings #-} - --- |This module implements fetching of individual blog entries from --- DNS. Yes, you read that correctly. --- --- Each blog post is stored as a set of records in a designated DNS --- zone. For the production blog, this zone is `blog.tazj.in.`. --- --- A top-level record at `_posts` contains a list of all published --- post IDs. --- --- For each of these post IDs, there is a record at `_meta.$postID` --- that contains the title and number of post chunks. --- --- For each post chunk, there is a record at `_$chunkID.$postID` that --- contains a base64-encoded post fragment. --- --- This module implements logic for assembling a post out of these --- fragments and caching it based on the TTL of its `_meta` record. -module BlogStore - ( BlogCache, - EntryId (..), - Entry (..), - withCache, - listEntries, - getEntry, - show' - ) -where - -import Control.Applicative ((<$>), (<*>)) -import Control.Monad (mzero) -import Control.Monad.IO.Class (MonadIO, liftIO) -import Data.Aeson ((.:), FromJSON (..), Value (Object), decodeStrict) -import Data.ByteString.Base64 (decodeLenient) -import Data.Either (fromRight) -import Data.List (sortBy) -import Data.Text as T (Text, concat, pack) -import Data.Text.Encoding (decodeUtf8', encodeUtf8) -import Data.Time (Day) -import Network.DNS (DNSError, lookupTXT) -import qualified Network.DNS.Resolver as R - -newtype EntryId = EntryId {unEntryId :: Integer} - deriving (Eq, Ord, FromJSON) - -instance Show EntryId where - - show = show . unEntryId - -data Entry - = Entry - { entryId :: EntryId, - author :: Text, - title :: Text, - text :: Text, - edate :: Day - } - deriving (Eq, Ord, Show) - --- | Wraps a DNS resolver with caching configured. For the initial --- version of this, all caching of entries is done by the resolver --- (i.e. no pre-assembled versions of entries are cached). -data BlogCache = BlogCache R.Resolver Text - -data StoreError - = PostNotFound EntryId - | DNS DNSError - | InvalidMetadata - | InvalidChunk - | InvalidPosts - deriving (Show) - -type Offset = Int - -type Count = Int - -withCache :: Text -> (BlogCache -> IO a) -> IO a -withCache zone f = do - let conf = - R.defaultResolvConf - { R.resolvCache = Just R.defaultCacheConf, - R.resolvConcurrent = True - } - seed <- R.makeResolvSeed conf - R.withResolver seed (\r -> f $ BlogCache r zone) - -listEntries :: MonadIO m => BlogCache -> Offset -> Count -> m [Entry] -listEntries cache offset count = liftIO $ do - posts <- postList cache - entries <- mapM (entryFromDNS cache) $ take count $ drop offset $ fromRight (error "no posts") posts - -- TODO: maybe don't just drop broken entries - return - $ fromRight (error "no entries") - $ sequence entries - -getEntry :: MonadIO m => BlogCache -> EntryId -> m (Maybe Entry) -getEntry cache eid = liftIO $ entryFromDNS cache eid >>= \case - Left _ -> return Nothing -- TODO: ?? - Right entry -> return $ Just entry - -show' :: Show a => a -> Text -show' = pack . show - --- * DNS fetching implementation -type Chunk = Integer - --- | Represents the metadata stored for each post in the _meta record. -data Meta = Meta Integer Text Day - deriving (Show) - -instance FromJSON Meta where - - parseJSON (Object v) = - Meta - <$> v - .: "c" - <*> v - .: "t" - <*> v - .: "d" - parseJSON _ = mzero - -entryMetadata :: BlogCache -> EntryId -> IO (Either StoreError Meta) -entryMetadata (BlogCache r z) (EntryId eid) = - let domain = encodeUtf8 ("_meta." <> show' eid <> "." <> z) - record = lookupTXT r domain - toMeta rrdata = case decodeStrict $ decodeLenient rrdata of - Nothing -> Left InvalidMetadata - Just m -> Right m - in record >>= \case - (Left err) -> return $ Left $ DNS err - (Right [bs]) -> return $ toMeta bs - _ -> return $ Left InvalidMetadata - -entryChunk :: BlogCache -> EntryId -> Chunk -> IO (Either StoreError Text) -entryChunk (BlogCache r z) (EntryId eid) c = - let domain = encodeUtf8 ("_" <> show' c <> "." <> show' eid <> "." <> z) - record = lookupTXT r domain - toChunk rrdata = case decodeUtf8' $ decodeLenient rrdata of - Left _ -> Left InvalidChunk - Right chunk -> Right chunk - in record >>= \case - (Left err) -> return $ Left $ DNS err - (Right [bs]) -> return $ toChunk bs - _ -> return $ Left InvalidChunk - -fetchAssembleChunks :: BlogCache -> EntryId -> Meta -> IO (Either StoreError Text) -fetchAssembleChunks cache eid (Meta n _ _) = do - chunks <- mapM (entryChunk cache eid) [0 .. (n - 1)] - return $ fmap T.concat $ sequence chunks - -entryFromDNS :: BlogCache -> EntryId -> IO (Either StoreError Entry) -entryFromDNS cache eid = do - meta <- entryMetadata cache eid - case meta of - Left err -> return $ Left err - Right meta -> do - chunks <- fetchAssembleChunks cache eid meta - let (Meta _ t d) = meta - return - $ either Left - ( \text -> Right $ Entry - { entryId = eid, - author = "tazjin", - title = t, - text = text, - edate = d - } - ) - chunks - -postList :: BlogCache -> IO (Either StoreError [EntryId]) -postList (BlogCache r z) = - let domain = encodeUtf8 ("_posts." <> z) - record = lookupTXT r domain - toPosts = - fmap (sortBy (flip compare)) - . mapM (maybe (Left InvalidPosts) Right . decodeStrict) - in either (Left . DNS) toPosts <$> record diff --git a/web/tazblog/src/RSS.hs b/web/tazblog/src/RSS.hs deleted file mode 100644 index 913aa9a4081b..000000000000 --- a/web/tazblog/src/RSS.hs +++ /dev/null @@ -1,48 +0,0 @@ -{-# LANGUAGE RecordWildCards #-} - -module RSS - ( renderFeed - ) -where - -import BlogStore -import Data.Maybe (fromJust) -import qualified Data.Text as T -import Data.Time (UTCTime (..), getCurrentTime, secondsToDiffTime) -import Network.URI (URI, parseURI) -import Text.RSS - -createChannel :: UTCTime -> [ChannelElem] -createChannel now = - [ Language "en", - Copyright "Vincent Ambo", - WebMaster "mail@tazj.in", - ChannelPubDate now - ] - -createRSS :: UTCTime -> [Item] -> RSS -createRSS t = - let link = fromJust $ parseURI "https://tazj.in" - in RSS "tazjin's blog" link "tazjin's blog feed" (createChannel t) - -createItem :: Entry -> Item -createItem Entry {..} = - [ Title "tazjin's blog", - Link $ entryLink entryId, - Description $ T.unpack text, - PubDate $ UTCTime edate $ secondsToDiffTime 0 - ] - -entryLink :: EntryId -> URI -entryLink i = - let url = "http://tazj.in/" ++ "/" ++ show i - in fromJust $ parseURI url - -createItems :: [Entry] -> [Item] -createItems = map createItem - -createFeed :: [Entry] -> IO RSS -createFeed e = getCurrentTime >>= (\t -> return $ createRSS t $ createItems e) - -renderFeed :: [Entry] -> IO String -renderFeed e = fmap (showXML . rssToXML) (createFeed e) diff --git a/web/tazblog/src/Server.hs b/web/tazblog/src/Server.hs deleted file mode 100644 index 40129988393b..000000000000 --- a/web/tazblog/src/Server.hs +++ /dev/null @@ -1,81 +0,0 @@ -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE OverloadedStrings #-} -{-# LANGUAGE ScopedTypeVariables #-} - -module Server where - -import Blog -import BlogStore -import Control.Applicative (optional) -import Control.Monad (msum) -import Control.Monad.IO.Class (liftIO) -import Data.Maybe (maybe) -import qualified Data.Text as T -import Happstack.Server hiding (Session) -import RSS - -pageSize :: Int -pageSize = 3 - -tmpPolicy :: BodyPolicy -tmpPolicy = defaultBodyPolicy "/tmp" 0 200000 1000 - -runBlog :: Int -> String -> IO () -runBlog port respath = - withCache "blog.tazj.in." $ \cache -> - simpleHTTP nullConf {port = port} $ tazblog cache respath - -tazblog :: BlogCache -> String -> ServerPart Response -tazblog cache resDir = - msum - [ -- legacy language-specific routes - dir "de" $ blogHandler cache, - dir "en" $ blogHandler cache, - dir "static" $ staticHandler resDir, - blogHandler cache, - staticHandler resDir, - notFound $ toResponse $ showError "Not found" "Page not found" - ] - -blogHandler :: BlogCache -> ServerPart Response -blogHandler cache = - msum - [ path $ \(eId :: Integer) -> showEntry cache $ EntryId eId, - nullDir >> showIndex cache, - dir "rss" $ nullDir >> showRSS cache, - dir "rss.xml" $ nullDir >> showRSS cache - ] - -staticHandler :: String -> ServerPart Response -staticHandler resDir = do - setHeaderM "cache-control" "max-age=630720000" - setHeaderM "expires" "Tue, 20 Jan 2037 04:20:42 GMT" - serveDirectory DisableBrowsing [] resDir - -showEntry :: BlogCache -> EntryId -> ServerPart Response -showEntry cache eId = do - entry <- getEntry cache eId - tryEntry entry - -tryEntry :: Maybe Entry -> ServerPart Response -tryEntry Nothing = notFound $ toResponse $ showError "Not found" "Blog entry not found" -tryEntry (Just entry) = ok $ toResponse $ blogTemplate eTitle $ renderEntry entry - where - eTitle = T.append ": " (title entry) - -offset :: Maybe Int -> Int -offset = maybe 0 (pageSize *) - -showIndex :: BlogCache -> ServerPart Response -showIndex cache = do - (page :: Maybe Int) <- optional $ lookRead "page" - entries <- listEntries cache (offset page) pageSize - ok $ toResponse $ blogTemplate "" - $ renderEntries entries (Just $ showLinks page) - -showRSS :: BlogCache -> ServerPart Response -showRSS cache = do - entries <- listEntries cache 0 4 - feed <- liftIO $ renderFeed entries - setHeaderM "content-type" "text/xml" - ok $ toResponse feed diff --git a/web/tazblog/static/apple-touch-icon.png b/web/tazblog/static/apple-touch-icon.png deleted file mode 100644 index 22ba058cddd4..000000000000 --- a/web/tazblog/static/apple-touch-icon.png +++ /dev/null Binary files differdiff --git a/web/tazblog/static/favicon.ico b/web/tazblog/static/favicon.ico deleted file mode 100644 index 2958dd3afcb0..000000000000 --- a/web/tazblog/static/favicon.ico +++ /dev/null Binary files differdiff --git a/web/tazblog/static/keybase.txt b/web/tazblog/static/keybase.txt deleted file mode 100644 index 661c33e01e73..000000000000 --- a/web/tazblog/static/keybase.txt +++ /dev/null @@ -1,69 +0,0 @@ -================================================================== -https://keybase.io/tazjin --------------------------------------------------------------------- - -I hereby claim: - - * I am an admin of http://tazj.in - * I am tazjin (https://keybase.io/tazjin) on keybase. - * I have a public key with fingerprint DCF3 4CFA C1AC 44B8 7E26 3331 36EE 3481 4F6D 294A - -To claim this, I am signing this object: - -{ - "body": { - "key": { - "fingerprint": "dcf34cfac1ac44b87e26333136ee34814f6d294a", - "host": "keybase.io", - "key_id": "36EE34814F6D294A", - "uid": "2268b75a56bb9693d3ef077bc1217900", - "username": "tazjin" - }, - "service": { - "hostname": "tazj.in", - "protocol": "http:" - }, - "type": "web_service_binding", - "version": 1 - }, - "ctime": 1397644545, - "expire_in": 157680000, - "prev": "4973fdda56a6cfa726a813411c915458c652be45dd19283f7a4ae4f9c217df14", - "seqno": 4, - "tag": "signature" -} - -with the aforementioned key, yielding the PGP signature: - ------BEGIN PGP MESSAGE----- -Version: GnuPG v2.0.22 (GNU/Linux) - -owGbwMvMwMWY9pU1Q3bHF2vG0wdeJTEE+8WyVSsl5adUKllVK2Wngqm0zLz01KKC -osy8EiUrpZTkNGOT5LTEZMPEZBOTJAvzVCMzY2NjQ2Oz1FRjEwtDkzSzFCNLk0Ql -HaWM/GKQDqAxSYnFqXqZ+UAxICc+MwUoamzm6gpW72bmAlTvCJQrBUsYGZlZJJmb -JpqaJSVZmlkapxinphmYmyclGxoZmlsaGIAUFqcW5SXmpgJVlyRWZWXmKdXqKAHF -yjKTU0EuBlmMJK8HVKCjVFCUX5KfnJ8DFMwoKSmwAukpqSwAKSpPTYqHao9PysxL -AXoYqKEstag4Mz9PycoQqDK5JBNknqGxpbmZiYmpiamOUmpFQWZRanwmSIWpuZmF -ARCArEktAxppYmlunJaSAvRFohkwtMyNzBItDI1NDA2TLQ2Bui2SzUyNklJNTFNS -DC2NLIzTzBNNElNN0iyTgZ5MSTM0UQJ5qDAvX8nKBOjMxHSgkcWZ6XmJJaVFqUq1 -nUwyLAyMXAxsrEygKGPg4hSARWSZH/8/0573HMdvfH5XxeayYZ2efPb8bw730i1/ -WBU3qru5pKlf3xKmeK5ihtKeT6VXGm3usV2reZWyvO/0joi83oT9P80s88Q6U/vb -vmycHnB7e110v/3OZadu/Sx6+uXk/ZeCR8u+p/+6dNc8XWqX/68t06pnrGKU/BfU -F7X5S/HUy4ysvyZN+v1Jj6NtMvvN1EvPpCpv3kz2tGU1EzpZFfl8Xujq1OopuxZJ -l5kvDlgZ78ezdLZ1+aOlixbsXra4/3fdbZ8XnQX1DatzV18+e2rmMcPKm6qngqIf -Xp8oKTAz+Mg1v6gHP0wLN/Mf3JKjYHnX5U6L/KIvkbsLArtES0r7w1iWZ3OvvSPr -fW6heune1tOb7j3vP+1XeOyV2ekr6pPO3bdrv9X25HbTaqs7z06f0v35fmtQ3uUZ -Z35eLYmaEmb/x/u3vFh6GsvMDocpCTpPlHa0z+xzOGbhzLFO18v21Zd9ISG3Hqtd -F7jaLlWa2W+TsytNnXudVrfCBSbl8zNMfuk2e0Z8i9ix3PmEVa3rTEfhde3qwgtY -dy8rUbzzd5d9ccF63btqO/VMb4oe04x4uCLB5RD3p+8+s77o/T4WP2cFw+0cviX6 -StlJX5f+U3Or3fZY7dUfPcmMJZ/eSs7m+1d5IUbs3jI27olHFzGVvTcsu7w79aOK -SxmXvnEIUwZXgP6BL4LrPDY1rN2V0q1cZj1/efj880rzeu6+OQYA -=xHfH ------END PGP MESSAGE----- - -And finally, I am proving ownership of this host by posting or -appending to this document. - -View my publicly-auditable identity here: https://keybase.io/tazjin - -================================================================== diff --git a/web/tazblog/tazblog.cabal b/web/tazblog/tazblog.cabal deleted file mode 100644 index 58aeb7049ed1..000000000000 --- a/web/tazblog/tazblog.cabal +++ /dev/null @@ -1,39 +0,0 @@ -Name: tazblog -Version: 6.0.0 -Synopsis: Tazjin's Blog -License: MIT -Author: Vincent Ambo -Maintainer: mail@tazj.in -Category: Web blog -Build-type: Simple -cabal-version: >= 1.10 - -library - hs-source-dirs: src - default-language: Haskell2010 - ghc-options: -W - exposed-modules: Blog, BlogStore, Server, RSS - build-depends: aeson, - base, - bytestring, - happstack-server, - text, - blaze-html, - dns, - old-locale, - time, - base64-bytestring, - network, - network-uri, - rss, - shakespeare, - markdown - -executable tazblog - hs-source-dirs: blog - main-is: Main.hs - default-language: Haskell2010 - ghc-options: -threaded -rtsopts -with-rtsopts=-N - build-depends: base, - tazblog, - network diff --git a/web/tazblog/tazblog.nix b/web/tazblog/tazblog.nix deleted file mode 100644 index b59cddec07a7..000000000000 --- a/web/tazblog/tazblog.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ mkDerivation, aeson, base, base64-bytestring, blaze-html , bytestring, dns -, happstack-server, markdown, network, network-uri, old-locale, rss -, shakespeare, stdenv, text, time }: -mkDerivation { - pname = "tazblog"; - version = "6.0.0"; - src = ./.; - isLibrary = true; - isExecutable = true; - libraryHaskellDepends = [ - aeson - base - base64-bytestring - blaze-html - bytestring - dns - happstack-server - markdown - network - network-uri - old-locale - rss - shakespeare - text - time - ]; - executableHaskellDepends = [ base network ]; - description = "Tazjin's Blog"; - license = stdenv.lib.licenses.mit; -} diff --git a/web/tazblog_lisp/default.nix b/web/tazblog_lisp/default.nix new file mode 100644 index 000000000000..178c731ccb75 --- /dev/null +++ b/web/tazblog_lisp/default.nix @@ -0,0 +1,21 @@ +{ pkgs, ... }: + +pkgs.nix.buildLisp.library { + name = "tazblog"; + + deps = + # Local dependencies + (with pkgs.lisp; [ dns ]) + + # Third-party dependencies + ++ (with pkgs.third_party.lisp; [ + cl-base64 + cl-json + hunchentoot + iterate + ]); + + srcs = [ + ./store.lisp + ]; +} diff --git a/web/tazblog_lisp/store.lisp b/web/tazblog_lisp/store.lisp new file mode 100644 index 000000000000..01935a68a023 --- /dev/null +++ b/web/tazblog_lisp/store.lisp @@ -0,0 +1,79 @@ +(defpackage #:tazblog/store + (:documentation + "This module implements fetching of individual blog entries from DNS. + Yes, you read that correctly. + + Each blog post is stored as a set of records in a designated DNS + zone. For the production blog, this zone is `blog.tazj.in.`. + + A top-level record at `_posts` contains a list of all published + post IDs. + + For each of these post IDs, there is a record at `_meta.$postID` + that contains the title and number of post chunks. + + For each post chunk, there is a record at `_$chunkID.$postID` that + contains a base64-encoded post fragment. + + This module implements logic for assembling a post out of these + fragments and caching it based on the TTL of its `_meta` record.") + + (:use #:cl #:dns #:iterate) + (:import-from #:cl-base64 #:base64-string-to-string)) +(in-package :tazblog/store) + +;; TODO: +;; +;; - implement DNS caching + +(defvar *tazblog-zone* ".blog.tazj.in." + "DNS zone in which blog posts are persisted.") + +(deftype entry-id () 'string) + +(defun list-entries (&key (offset 0) (count 4) (zone *tazblog-zone*)) + "Retrieve COUNT entry IDs from ZONE at OFFSET." + (let ((answers (lookup-txt (concatenate 'string "_posts" zone)))) + (map 'vector #'dns-rr-rdata (subseq answers offset (+ offset count))))) + +(defun get-entry-meta (entry-id zone) + (let* ((name (concatenate 'string "_meta." entry-id zone)) + (answer (lookup-txt name)) + (encoded (dns-rr-rdata (alexandria:first-elt answer))) + (meta-json (base64-string-to-string encoded))) + (json:decode-json-from-string meta-json))) + +(defun base64-add-padding (string) + "Adds padding to the base64-encoded STRING if required." + (let ((rem (- 4 (mod (length string) 4)))) + (if (= 0 rem) string + (format nil "~A~v@{~A~:*~}" string rem "=")))) + +(defun collect-entry-fragments (entry-id count zone) + (let* ((fragments + (iter (for i from 0 below count) + (for name = (format nil "_~D.~A~A" i entry-id zone)) + (collect (alexandria:first-elt (lookup-txt name))))) + (decoded (map 'list (lambda (f) + (base64-string-to-string + (base64-add-padding (dns-rr-rdata f)))) + fragments))) + (apply #'concatenate 'string decoded))) + +(defstruct entry + (id "" :type string) + (title "" :type string) + (content "" :type string) + (date "" :type string)) + +(defun get-entry (entry-id &optional (zone *tazblog-zone*)) + "Retrieve the entry at ENTRY-ID from ZONE." + (let* ((meta (get-entry-meta entry-id zone)) + (count (cdr (assoc :c meta))) + (title (cdr (assoc :t meta))) + (date (cdr (assoc :d meta))) + (content (collect-entry-fragments entry-id count zone))) + (make-entry :id entry-id + :date date + :title title + :content content))) |