about summary refs log tree commit diff
path: root/src/libutil/affinity.cc
diff options
context:
space:
mode:
authorEelco Dolstra <eelco.dolstra@logicblox.com>2013-08-07T11·51+0000
committerEelco Dolstra <eelco.dolstra@logicblox.com>2013-08-07T12·02+0200
commita583a2bc59a4ee2b067e5520f6c5bc0c61852c32 (patch)
tree137be66f58b664dbf0aa21138000d711d5e00162 /src/libutil/affinity.cc
parent263d6682224f516aed74286453c5e2e097a38aa6 (diff)
Run the daemon worker on the same CPU as the client
On a system with multiple CPUs, running Nix operations through the
daemon is significantly slower than "direct" mode:

$ NIX_REMOTE= nix-instantiate '<nixos>' -A system
real    0m0.974s
user    0m0.875s
sys     0m0.088s

$ NIX_REMOTE=daemon nix-instantiate '<nixos>' -A system
real    0m2.118s
user    0m1.463s
sys     0m0.218s

The main reason seems to be that the client and the worker get moved
to a different CPU after every call to the worker.  This patch adds a
hack to lock them to the same CPU.  With this, the overhead of going
through the daemon is very small:

$ NIX_REMOTE=daemon nix-instantiate '<nixos>' -A system
real    0m1.074s
user    0m0.809s
sys     0m0.098s
Diffstat (limited to 'src/libutil/affinity.cc')
-rw-r--r--src/libutil/affinity.cc54
1 files changed, 54 insertions, 0 deletions
diff --git a/src/libutil/affinity.cc b/src/libutil/affinity.cc
new file mode 100644
index 0000000000..3a20fd2774
--- /dev/null
+++ b/src/libutil/affinity.cc
@@ -0,0 +1,54 @@
+#include "types.hh"
+#include "util.hh"
+#include "affinity.hh"
+
+#if HAVE_SCHED_H
+#include <sched.h>
+#endif
+
+namespace nix {
+
+
+static bool didSaveAffinity = false;
+static cpu_set_t savedAffinity;
+
+
+void setAffinityTo(int cpu)
+{
+#if HAVE_SCHED_SETAFFINITY
+    if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return;
+    didSaveAffinity = true;
+    printMsg(lvlDebug, format("locking this thread to CPU %1%") % cpu);
+    cpu_set_t newAffinity;
+    CPU_ZERO(&newAffinity);
+    CPU_SET(cpu, &newAffinity);
+    if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1)
+        printMsg(lvlError, format("failed to lock thread to CPU %1%") % cpu);
+#endif
+}
+
+
+int lockToCurrentCPU()
+{
+#if HAVE_SCHED_SETAFFINITY
+    if (getEnv("NIX_AFFINITY_HACK", "1") == "1") {
+        int cpu = sched_getcpu();
+        if (cpu != -1) setAffinityTo(cpu);
+        return cpu;
+    }
+#endif
+    return -1;
+}
+
+
+void restoreAffinity()
+{
+#if HAVE_SCHED_SETAFFINITY
+    if (!didSaveAffinity) return;
+    if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1)
+        printMsg(lvlError, "failed to restore affinity %1%");
+#endif
+}
+
+
+}