summary refs log tree commit diff
path: root/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixos')
-rw-r--r--nixos/doc/manual/release-notes/rl-1609.xml8
-rw-r--r--nixos/doc/manual/release-notes/rl-1703.xml7
-rw-r--r--nixos/modules/module-list.nix3
-rw-r--r--nixos/modules/security/acme.xml21
-rw-r--r--nixos/modules/services/databases/openldap.nix9
-rw-r--r--nixos/modules/services/misc/gitlab.nix1
-rw-r--r--nixos/modules/services/misc/nix-optimise.nix2
-rw-r--r--nixos/modules/services/monitoring/riemann-tools.nix1
-rw-r--r--nixos/modules/services/network-filesystems/cachefilesd.nix59
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix15
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix40
-rw-r--r--nixos/modules/services/networking/supplicant.nix3
-rw-r--r--nixos/modules/services/networking/syncthing.nix115
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix6
-rw-r--r--nixos/modules/services/search/hound.nix4
-rw-r--r--nixos/modules/services/web-apps/quassel-webserver.nix99
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix3
-rw-r--r--nixos/modules/services/x11/window-managers/bspwm-unstable.nix48
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix1
-rw-r--r--nixos/modules/services/x11/xserver.nix1
-rw-r--r--nixos/modules/system/boot/kernel.nix4
-rw-r--r--nixos/modules/system/boot/systemd-nspawn.nix71
-rw-r--r--nixos/modules/system/boot/systemd-unit-options.nix3
-rw-r--r--nixos/modules/system/boot/systemd.nix4
25 files changed, 399 insertions, 131 deletions
diff --git a/nixos/doc/manual/release-notes/rl-1609.xml b/nixos/doc/manual/release-notes/rl-1609.xml
index 18b9a333e23..ade7d5581ce 100644
--- a/nixos/doc/manual/release-notes/rl-1609.xml
+++ b/nixos/doc/manual/release-notes/rl-1609.xml
@@ -164,14 +164,6 @@ following incompatible changes:</para>
       PHP has been upgraded to 7.0
     </para>
   </listitem>
-
-  <listitem>
-    <para>PHP now scans for extra configuration .ini files in /etc/php.d
-    instead of /etc. This prevents accidentally loading non-PHP .ini files
-    that may be in /etc.
-    </para>
-  </listitem>
-
 </itemizedlist>
 
 
diff --git a/nixos/doc/manual/release-notes/rl-1703.xml b/nixos/doc/manual/release-notes/rl-1703.xml
index 21cea77f876..efff8b895a1 100644
--- a/nixos/doc/manual/release-notes/rl-1703.xml
+++ b/nixos/doc/manual/release-notes/rl-1703.xml
@@ -61,6 +61,13 @@ following incompatible changes:</para>
       <literal>strippedName</literal>.
     </para>
   </listitem>
+
+  <listitem>
+    <para>PHP now scans for extra configuration .ini files in /etc/php.d
+    instead of /etc. This prevents accidentally loading non-PHP .ini files
+    that may be in /etc.
+    </para>
+  </listitem>
 </itemizedlist>
 
 
diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix
index 09b938a69fe..7c5030c9479 100644
--- a/nixos/modules/module-list.nix
+++ b/nixos/modules/module-list.nix
@@ -313,6 +313,7 @@
   ./services/monitoring/uptime.nix
   ./services/monitoring/zabbix-agent.nix
   ./services/monitoring/zabbix-server.nix
+  ./services/network-filesystems/cachefilesd.nix
   ./services/network-filesystems/drbd.nix
   ./services/network-filesystems/netatalk.nix
   ./services/network-filesystems/nfsd.nix
@@ -494,6 +495,7 @@
   ./services/web-apps/pump.io.nix
   ./services/web-apps/tt-rss.nix
   ./services/web-apps/selfoss.nix
+  ./services/web-apps/quassel-webserver.nix
   ./services/web-servers/apache-httpd/default.nix
   ./services/web-servers/caddy.nix
   ./services/web-servers/fcgiwrap.nix
@@ -533,6 +535,7 @@
   ./services/x11/window-managers/fluxbox.nix
   ./services/x11/window-managers/icewm.nix
   ./services/x11/window-managers/bspwm.nix
+  ./services/x11/window-managers/bspwm-unstable.nix
   ./services/x11/window-managers/metacity.nix
   ./services/x11/window-managers/none.nix
   ./services/x11/window-managers/twm.nix
diff --git a/nixos/modules/security/acme.xml b/nixos/modules/security/acme.xml
index 15ed4c04a23..226cf0382da 100644
--- a/nixos/modules/security/acme.xml
+++ b/nixos/modules/security/acme.xml
@@ -74,9 +74,30 @@ options for the <literal>security.acme</literal> module.</para>
 </para>
 
 <programlisting>
+security.acme.certs."foo.example.com" = {
+  webroot = "/var/www/challenges";
+  email = "foo@example.com";
+  user = "nginx";
+  group = "nginx";
+  postRun = "systemctl restart nginx.service";
+};
 services.nginx.httpConfig = ''
   server {
     server_name foo.example.com;
+    listen 80;
+    listen [::]:80;
+
+    location /.well-known/acme-challenge {
+      root /var/www/challenges;
+    }
+
+    location / {
+      return 301 https://$host$request_uri;
+    }
+  }
+
+  server {
+    server_name foo.example.com;
     listen 443 ssl;
     ssl_certificate     ${config.security.acme.directory}/foo.example.com/fullchain.pem;
     ssl_certificate_key ${config.security.acme.directory}/foo.example.com/key.pem;
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index 9f22aa7c92b..875ed0f39db 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -53,6 +53,13 @@ in
         description = "The database directory.";
       };
 
+      configDir = mkOption {
+        type = types.path;
+        default = "";
+        description = "Use this optional config directory instead of using slapd.conf";
+        example = "/var/db/slapd.d";
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -96,7 +103,7 @@ in
         mkdir -p ${cfg.dataDir}
         chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
       '';
-      serviceConfig.ExecStart = "${openldap.out}/libexec/slapd -u ${cfg.user} -g ${cfg.group} -d 0 -h \"${concatStringsSep " " cfg.urlList}\" -f ${configFile}";
+      serviceConfig.ExecStart = "${openldap.out}/libexec/slapd -u ${cfg.user} -g ${cfg.group} -d 0 -h \"${concatStringsSep " " cfg.urlList}\" ${if cfg.configDir == "" then "-f "+configFile else "-F "+cfg.configDir}";
     };
 
     users.extraUsers.openldap =
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index f8881233dce..3e4584c7a51 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -463,6 +463,7 @@ in {
 
     systemd.services.gitlab = {
       after = [ "network.target" "postgresql.service" "redis.service" ];
+      requires = [ "gitlab-sidekiq.service" ];
       wantedBy = [ "multi-user.target" ];
       environment = gitlabEnv;
       path = with pkgs; [
diff --git a/nixos/modules/services/misc/nix-optimise.nix b/nixos/modules/services/misc/nix-optimise.nix
index 87ce05c5a11..a76bfd9f1f1 100644
--- a/nixos/modules/services/misc/nix-optimise.nix
+++ b/nixos/modules/services/misc/nix-optimise.nix
@@ -41,7 +41,7 @@ in
     systemd.services.nix-optimise =
       { description = "Nix Store Optimiser";
         serviceConfig.ExecStart = "${config.nix.package}/bin/nix-store --optimise";
-        startAt = optional cfg.automatic cfg.dates;
+        startAt = optionals cfg.automatic cfg.dates;
       };
 
   };
diff --git a/nixos/modules/services/monitoring/riemann-tools.nix b/nixos/modules/services/monitoring/riemann-tools.nix
index ce277f09464..de858813a76 100644
--- a/nixos/modules/services/monitoring/riemann-tools.nix
+++ b/nixos/modules/services/monitoring/riemann-tools.nix
@@ -50,6 +50,7 @@ in {
 
     systemd.services.riemann-health = {
       wantedBy = [ "multi-user.target" ];
+      path = [ procps ];
       serviceConfig = {
         User = "riemanntools";
         ExecStart = "${healthLauncher}/bin/riemann-health";
diff --git a/nixos/modules/services/network-filesystems/cachefilesd.nix b/nixos/modules/services/network-filesystems/cachefilesd.nix
new file mode 100644
index 00000000000..61981340840
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/cachefilesd.nix
@@ -0,0 +1,59 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.cachefilesd;
+
+  cfgFile = pkgs.writeText "cachefilesd.conf" ''
+    dir ${cfg.cacheDir}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+  options = {
+    services.cachefilesd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable cachefilesd network filesystems caching daemon.";
+      };
+
+      cacheDir = mkOption {
+        type = types.str;
+        default = "/var/cache/fscache";
+        description = "Directory to contain filesystem cache.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "brun 10%";
+        description = "Additional configuration file entries. See cachefilesd.conf(5) for more information.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.cachefilesd = {
+      description = "Local network file caching management daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.kmod pkgs.cachefilesd ];
+      script = ''
+        modprobe -qab cachefiles
+        mkdir -p ${cfg.cacheDir}
+        chmod 700 ${cfg.cacheDir}
+        exec cachefilesd -n -f ${cfgFile}
+      '';
+    };
+
+  };
+}
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index ecc091d1d03..6a786e75bbc 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -175,11 +175,20 @@ in
 
     environment.systemPackages = [ pkgs.avahi ];
 
+    systemd.sockets.avahi-daemon =
+      { description = "Avahi mDNS/DNS-SD Stack Activation Socket";
+        listenStreams = [ "/var/run/avahi-daemon/socket" ];
+        wantedBy = [ "sockets.target" ];
+      };
+
     systemd.services.avahi-daemon =
-      { description = "Avahi daemon";
+      { description = "Avahi mDNS/DNS-SD Stack";
         wantedBy = [ "multi-user.target" ];
-        # Receive restart event after resume
-        partOf = [ "post-resume.target" ];
+        requires = [ "avahi-daemon.socket" ];
+
+        serviceConfig."NotifyAccess" = "main";
+        serviceConfig."BusName" = "org.freedesktop.Avahi";
+        serviceConfig."Type" = "dbus";
 
         path = [ pkgs.coreutils pkgs.avahi ];
 
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index 3e9fae35847..81941ce1cfb 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -242,7 +242,7 @@ in
 
     systemd =
       let
-        service =
+        sshd-service =
           { description = "SSH Daemon";
 
             wantedBy = optional (!cfg.startWhenNeeded) "multi-user.target";
@@ -253,16 +253,8 @@ in
 
             environment.LD_LIBRARY_PATH = nssModulesPath;
 
-            preStart =
-              ''
-                mkdir -m 0755 -p /etc/ssh
-
-                ${flip concatMapStrings cfg.hostKeys (k: ''
-                  if ! [ -f "${k.path}" ]; then
-                      ssh-keygen -t "${k.type}" ${if k ? bits then "-b ${toString k.bits}" else ""} -f "${k.path}" -N ""
-                  fi
-                '')}
-              '';
+            wants = [ "sshd-keygen.service" ];
+            after = [ "sshd-keygen.service" ];
 
             serviceConfig =
               { ExecStart =
@@ -278,6 +270,26 @@ in
                 PIDFile = "/run/sshd.pid";
               });
           };
+
+        sshd-keygen-service =
+          { description = "SSH Host Key Generation";
+            path = [ cfgc.package ];
+            script =
+            ''
+              mkdir -m 0755 -p /etc/ssh
+              ${flip concatMapStrings cfg.hostKeys (k: ''
+                if ! [ -f "${k.path}" ]; then
+                  ssh-keygen -t "${k.type}" ${if k ? bits then "-b ${toString k.bits}" else ""} -f "${k.path}" -N ""
+                fi
+              '')}
+            '';
+
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = "yes";
+            };
+          };
+
       in
 
       if cfg.startWhenNeeded then {
@@ -289,11 +301,13 @@ in
             socketConfig.Accept = true;
           };
 
-        services."sshd@" = service;
+        services.sshd-keygen = sshd-keygen-service;
+        services."sshd@" = sshd-service;
 
       } else {
 
-        services.sshd = service;
+        services.sshd-keygen = sshd-keygen-service;
+        services.sshd = sshd-service;
 
       };
 
diff --git a/nixos/modules/services/networking/supplicant.nix b/nixos/modules/services/networking/supplicant.nix
index e433ec7c5b9..0c459fb1dd0 100644
--- a/nixos/modules/services/networking/supplicant.nix
+++ b/nixos/modules/services/networking/supplicant.nix
@@ -34,7 +34,8 @@ let
       '';
     in
       { description = "Supplicant ${iface}${optionalString (iface=="WLAN"||iface=="LAN") " %I"}";
-        wantedBy = [ "network.target" ] ++ deps;
+        wantedBy = [ "multi-user.target" ] ++ deps;
+        wants = [ "network.target" ];
         bindsTo = deps;
         after = deps;
         before = [ "network.target" ];
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index 8a430734319..dcdc203bdc6 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -3,46 +3,11 @@
 with lib;
 
 let
-
   cfg = config.services.syncthing;
   defaultUser = "syncthing";
-
-  header = {
-    description = "Syncthing service";
-    after = [ "network.target" ];
-    environment = {
-      STNORESTART = "yes";
-      STNOUPGRADE = "yes";
-      inherit (cfg) all_proxy;
-    } // config.networking.proxy.envVars;
-  };
-
-  service = {
-    Restart = "on-failure";
-    SuccessExitStatus = "2 3 4";
-    RestartForceExitStatus="3 4";
-  };
-
-  iNotifyHeader = {
-    description = "Syncthing Inotify File Watcher service";
-    after = [ "network.target" "syncthing.service" ];
-    requires = [ "syncthing.service" ];
-  };
-
-  iNotifyService = {
-    SuccessExitStatus = "2";
-    RestartForceExitStatus = "3";
-    Restart = "on-failure";
-  };
-
-in
-
-{
-
+in {
   ###### interface
-
   options = {
-
     services.syncthing = {
 
       enable = mkEnableOption ''
@@ -100,6 +65,19 @@ in
         '';
       };
 
+      openDefaultPorts = mkOption {
+        type = types.bool;
+        default = false;
+        example = literalExample "true";
+        description = ''
+          Open the default ports in the firewall:
+            - TCP 22000 for transfers
+            - UDP 21027 for discovery
+          If multiple users are running syncthing on this machine, you will need to manually open a set of ports for each instance and leave this disabled.
+          Alternatively, if are running only a single instance on this machine using the default ports, enable this.
+        '';
+      };
+
       package = mkOption {
         type = types.package;
         default = pkgs.syncthing;
@@ -117,6 +95,14 @@ in
 
   config = mkIf cfg.enable {
 
+    networking.firewall = mkIf cfg.openDefaultPorts {
+      allowedTCPPorts = [ 22000 ];
+      allowedUDPPorts = [ 21027 ];
+    };
+
+    systemd.packages = [ pkgs.syncthing ]
+                       ++ lib.optional cfg.useInotify pkgs.syncthing-inotify;
+
     users = mkIf (cfg.user == defaultUser) {
       extraUsers."${defaultUser}" =
         { group = cfg.group;
@@ -131,39 +117,44 @@ in
     };
 
     systemd.services = {
-      syncthing = mkIf cfg.systemService (header // {
-          wants = mkIf cfg.useInotify [ "syncthing-inotify.service" ];
-          wantedBy = [ "multi-user.target" ];
-          serviceConfig = service // {
-            User = cfg.user;
-            Group = cfg.group;
-            PermissionsStartOnly = true;
-            ExecStart = "${cfg.package}/bin/syncthing -no-browser -home=${cfg.dataDir}";
-          };
-      });
-
-      syncthing-inotify = mkIf (cfg.systemService && cfg.useInotify) (iNotifyHeader // {
+      syncthing = mkIf cfg.systemService {
+        description = "Syncthing service";
+        after = [ "network.target" ];
+        environment = {
+          STNORESTART = "yes";
+          STNOUPGRADE = "yes";
+          inherit (cfg) all_proxy;
+        } // config.networking.proxy.envVars;
+        wants = mkIf cfg.useInotify [ "syncthing-inotify.service" ];
         wantedBy = [ "multi-user.target" ];
-        serviceConfig = iNotifyService // {
+        serviceConfig = {
+          Restart = "on-failure";
+          SuccessExitStatus = "2 3 4";
+          RestartForceExitStatus="3 4";
           User = cfg.user;
-          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -home=${cfg.dataDir} -logflags=0";
+          Group = cfg.group;
+          PermissionsStartOnly = true;
+          ExecStart = "${cfg.package}/bin/syncthing -no-browser -home=${cfg.dataDir}";
         };
-      });
-    };
+      };
 
-    systemd.user.services = {
-      syncthing = header // {
-        serviceConfig = service // {
-          ExecStart = "${cfg.package}/bin/syncthing -no-browser";
-        };
+      syncthing-resume = {
+        wantedBy = [ "suspend.target" ];
       };
 
-      syncthing-inotify = mkIf cfg.useInotify (iNotifyHeader // {
-        serviceConfig = iNotifyService // {
-          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -logflags=0";
+      syncthing-inotify = mkIf (cfg.systemService && cfg.useInotify) {
+        description = "Syncthing Inotify File Watcher service";
+        after = [ "network.target" "syncthing.service" ];
+        requires = [ "syncthing.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          SuccessExitStatus = "2";
+          RestartForceExitStatus = "3";
+          Restart = "on-failure";
+          User = cfg.user;
+          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -home=${cfg.dataDir} -logflags=0";
         };
-      });
+      };
     };
-
   };
 }
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index a344d785546..5657b91c1e7 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -128,9 +128,11 @@ in {
     in {
       description = "WPA Supplicant";
 
-      after = [ "network.target" ] ++ lib.concatMap deviceUnit ifaces;
+      after = lib.concatMap deviceUnit ifaces;
+      before = [ "network.target" ];
+      wants = [ "network.target" ];
       requires = lib.concatMap deviceUnit ifaces;
-      wantedBy = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
 
       path = [ pkgs.wpa_supplicant ];
 
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index 4389f17668b..708f57a5eb7 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -57,6 +57,10 @@ in {
 
       config = mkOption {
         type = types.str;
+        description = ''
+          The full configuration of the Hound daemon. Note the dbpath
+          should be an absolute path to a writable location on disk.
+        '';
         example = ''
           {
              "max-concurrent-indexers" : 2,
diff --git a/nixos/modules/services/web-apps/quassel-webserver.nix b/nixos/modules/services/web-apps/quassel-webserver.nix
new file mode 100644
index 00000000000..7de9480d4c4
--- /dev/null
+++ b/nixos/modules/services/web-apps/quassel-webserver.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.quassel-webserver;
+  quassel-webserver = cfg.pkg;
+  settings = ''
+    module.exports = {
+      default: {
+        host: '${cfg.quasselCoreHost}',  // quasselcore host
+        port: ${toString cfg.quasselCorePort},  // quasselcore port
+        initialBacklogLimit: ${toString cfg.initialBacklogLimit},  // Amount of backlogs to fetch per buffer on connection
+        backlogLimit: ${toString cfg.backlogLimit},  // Amount of backlogs to fetch per buffer after first retrieval
+        securecore: ${if cfg.secureCore then "true" else "false"},  // Connect to the core using SSL
+        theme: '${cfg.theme}'  // Default UI theme
+      },
+      themes: ['default', 'darksolarized'],  //  Available themes
+      forcedefault: ${if cfg.forceHostAndPort then "true" else "false"},  // Will force default host and port to be used, and will hide the corresponding fields in the UI
+      prefixpath: '${cfg.prefixPath}'  // Configure this if you use a reverse proxy
+    };
+  '';
+  settingsFile = pkgs.writeText "settings-user.js" settings;
+in {
+  options = {
+    services.quassel-webserver = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Whether to enable the quassel webclient service";
+      };
+      pkg = mkOption {
+        default = pkgs.quassel-webserver;
+        description = "The quassel-webserver package";
+      };
+      quasselCoreHost = mkOption {
+        default = "";
+        type = types.str;
+        description = "The default host of the quassel core";
+      };
+      quasselCorePort = mkOption {
+        default = 4242;
+        type = types.int;
+        description = "The default quassel core port";
+      };
+      initialBacklogLimit = mkOption {
+        default = 20;
+        type = types.int;
+        description = "Amount of backlogs to fetch per buffer on connection";
+      };
+      backlogLimit = mkOption {
+        default = 100;
+        type = types.int;
+        description = "Amount of backlogs to fetch per buffer after first retrieval";
+      };
+      secureCore = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Connect to the core using SSL";
+      };
+      theme = mkOption {
+        default = "default";
+        type = types.str;
+        description = "default or darksolarized";
+      };
+      prefixPath = mkOption {
+        default = "";
+        type = types.str;
+        description = "Configure this if you use a reverse proxy. Must start with a '/'";
+        example = "/quassel";
+      };
+      port = mkOption {
+        default = 60443;
+        type = types.int;
+        description = "The port the quassel webserver should listen on";
+      };
+      useHttps = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Whether the quassel webserver connection should be a https connection";
+      };
+      forceHostAndPort = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Force the users to use the quasselCoreHost and quasselCorePort defaults";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.quassel-webserver = {
+      description = "A web server/client for Quassel";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${quassel-webserver}/lib/node_modules/quassel-webserver/bin/www -p ${toString cfg.port} -m ${if cfg.useHttps == true then "https" else "http"} -c ${settingsFile}";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 7b822619a2f..166e5a6b2ce 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -392,6 +392,8 @@ in
     security.acme.certs = filterAttrs (n: v: v != {}) (
       mapAttrs (vhostName: vhostConfig:
         optionalAttrs vhostConfig.enableACME {
+          user = cfg.user;
+          group = cfg.group;
           webroot = vhostConfig.acmeRoot;
           extraDomains = genAttrs vhostConfig.serverAliases (alias: null);
           postRun = ''
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 33cd51f37c6..1d309aa3429 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -207,6 +207,9 @@ in
     services.dbus.enable = true;
     services.dbus.packages = [ lightdm ];
 
+    # lightdm uses the accounts daemon to rember language/window-manager per user
+    services.accounts-daemon.enable = true;
+
     security.pam.services.lightdm = {
       allowNullPassword = true;
       startSession = true;
diff --git a/nixos/modules/services/x11/window-managers/bspwm-unstable.nix b/nixos/modules/services/x11/window-managers/bspwm-unstable.nix
new file mode 100644
index 00000000000..3282e0d0851
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/bspwm-unstable.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.bspwm-unstable;
+in
+
+{
+  options = {
+    services.xserver.windowManager.bspwm-unstable = {
+        enable = mkEnableOption "bspwm-unstable";
+        startThroughSession = mkOption {
+            type = with types; bool;
+            default = false;
+            description = "
+                Start the window manager through the script defined in 
+                sessionScript. Defaults to the the bspwm-session script
+                provided by bspwm
+            ";
+        };
+        sessionScript = mkOption {
+            default = "${pkgs.bspwm-unstable}/bin/bspwm-session";
+            defaultText = "(pkgs.bspwm-unstable)/bin/bspwm-session";
+            description = "
+                The start-session script to use. Defaults to the
+                provided bspwm-session script from the bspwm package.
+
+                Does nothing unless `bspwm.startThroughSession` is enabled
+            ";
+        };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "bspwm-unstable";
+      start = if cfg.startThroughSession
+        then cfg.sessionScript
+        else ''
+            export _JAVA_AWT_WM_NONREPARENTING=1
+            SXHKD_SHELL=/bin/sh ${pkgs.sxhkd-unstable}/bin/sxhkd -f 100 &
+            ${pkgs.bspwm-unstable}/bin/bspwm
+        '';
+    };
+    environment.systemPackages = [ pkgs.bspwm-unstable ];
+  };
+}
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index f005decfa33..dabe2c26a72 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -10,6 +10,7 @@ in
   imports = [
     ./afterstep.nix
     ./bspwm.nix
+    ./bspwm-unstable.nix
     ./compiz.nix
     ./dwm.nix
     ./exwm.nix
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 298f30858ce..db60e47c3ea 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -515,6 +515,7 @@ in
       { description = "X11 Server";
 
         after = [ "systemd-udev-settle.service" "local-fs.target" "acpid.service" "systemd-logind.service" ];
+        wants = [ "systemd-udev-settle.service" ];
 
         restartIfChanged = false;
 
diff --git a/nixos/modules/system/boot/kernel.nix b/nixos/modules/system/boot/kernel.nix
index 51b3b8a3dca..e751ff141f7 100644
--- a/nixos/modules/system/boot/kernel.nix
+++ b/nixos/modules/system/boot/kernel.nix
@@ -214,8 +214,8 @@ in
         "hid_generic" "hid_lenovo"
         "hid_apple" "hid_logitech_dj" "hid_lenovo_tpkbd" "hid_roccat"
 
-        # Misc. stuff.
-        "pcips2" "atkbd"
+        # Misc. keyboard stuff.
+        "pcips2" "atkbd" "i8042"
 
         # Temporary fix for https://github.com/NixOS/nixpkgs/issues/18451
         # Remove as soon as upstream gets fixed - marking it:
diff --git a/nixos/modules/system/boot/systemd-nspawn.nix b/nixos/modules/system/boot/systemd-nspawn.nix
index 2527ab35719..f765db275e7 100644
--- a/nixos/modules/system/boot/systemd-nspawn.nix
+++ b/nixos/modules/system/boot/systemd-nspawn.nix
@@ -41,41 +41,43 @@ let
   ];
 
   instanceOptions = {
+    options = {
+
+      execConfig = mkOption {
+        default = {};
+        example = { Parameters = "/bin/sh"; };
+        type = types.addCheck (types.attrsOf unitOption) checkExec;
+        description = ''
+          Each attribute in this set specifies an option in the
+          <literal>[Exec]</literal> section of this unit. See
+          <citerefentry><refentrytitle>systemd.nspawn</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry> for details.
+        '';
+      };
 
-    execConfig = mkOption {
-      default = {};
-      example = { Parameters = "/bin/sh"; };
-      type = types.addCheck (types.attrsOf unitOption) checkExec;
-      description = ''
-        Each attribute in this set specifies an option in the
-        <literal>[Exec]</literal> section of this unit. See
-        <citerefentry><refentrytitle>systemd.nspawn</refentrytitle>
-        <manvolnum>5</manvolnum></citerefentry> for details.
-      '';
-    };
-
-    filesConfig = mkOption {
-      default = {};
-      example = { Bind = [ "/home/alice" ]; };
-      type = types.addCheck (types.attrsOf unitOption) checkFiles;
-      description = ''
-        Each attribute in this set specifies an option in the
-        <literal>[Files]</literal> section of this unit. See
-        <citerefentry><refentrytitle>systemd.nspawn</refentrytitle>
-        <manvolnum>5</manvolnum></citerefentry> for details.
-      '';
-    };
+      filesConfig = mkOption {
+        default = {};
+        example = { Bind = [ "/home/alice" ]; };
+        type = types.addCheck (types.attrsOf unitOption) checkFiles;
+        description = ''
+          Each attribute in this set specifies an option in the
+          <literal>[Files]</literal> section of this unit. See
+          <citerefentry><refentrytitle>systemd.nspawn</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry> for details.
+        '';
+      };
 
-    networkConfig = mkOption {
-      default = {};
-      example = { Private = false; };
-      type = types.addCheck (types.attrsOf unitOption) checkNetwork;
-      description = ''
-        Each attribute in this set specifies an option in the
-        <literal>[Network]</literal> section of this unit. See
-        <citerefentry><refentrytitle>systemd.nspawn</refentrytitle>
-        <manvolnum>5</manvolnum></citerefentry> for details.
-      '';
+      networkConfig = mkOption {
+        default = {};
+        example = { Private = false; };
+        type = types.addCheck (types.attrsOf unitOption) checkNetwork;
+        description = ''
+          Each attribute in this set specifies an option in the
+          <literal>[Network]</literal> section of this unit. See
+          <citerefentry><refentrytitle>systemd.nspawn</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry> for details.
+        '';
+      };
     };
 
   };
@@ -99,8 +101,7 @@ in {
 
     systemd.nspawn = mkOption {
       default = {};
-      type = types.attrsOf types.optionSet;
-      options = [ instanceOptions ];
+      type = with types; attrsOf (submodule instanceOptions);
       description = "Definition of systemd-nspawn configurations.";
     };
 
diff --git a/nixos/modules/system/boot/systemd-unit-options.nix b/nixos/modules/system/boot/systemd-unit-options.nix
index 731b1701e00..4c3fc30358c 100644
--- a/nixos/modules/system/boot/systemd-unit-options.nix
+++ b/nixos/modules/system/boot/systemd-unit-options.nix
@@ -316,7 +316,7 @@ in rec {
 
     startAt = mkOption {
       type = with types; either str (listOf str);
-      default = "";
+      default = [];
       example = "Sun 14:00:00";
       description = ''
         Automatically start this unit at the given date/time, which
@@ -326,6 +326,7 @@ in rec {
         to adding a corresponding timer unit with
         <option>OnCalendar</option> set to the value given here.
       '';
+      apply = v: if isList v then v else [ v ];
     };
 
   };
diff --git a/nixos/modules/system/boot/systemd.nix b/nixos/modules/system/boot/systemd.nix
index d44c2e234b0..d1f3f923e5e 100644
--- a/nixos/modules/system/boot/systemd.nix
+++ b/nixos/modules/system/boot/systemd.nix
@@ -777,7 +777,7 @@ in
         { wantedBy = [ "timers.target" ];
           timerConfig.OnCalendar = service.startAt;
         })
-        (filterAttrs (name: service: service.enable && service.startAt != "") cfg.services);
+        (filterAttrs (name: service: service.enable && service.startAt != []) cfg.services);
 
     # Generate timer units for all services that have a ‘startAt’ value.
     systemd.user.timers =
@@ -785,7 +785,7 @@ in
         { wantedBy = [ "timers.target" ];
           timerConfig.OnCalendar = service.startAt;
         })
-        (filterAttrs (name: service: service.startAt != "") cfg.user.services);
+        (filterAttrs (name: service: service.startAt != []) cfg.user.services);
 
     systemd.sockets.systemd-journal-gatewayd.wantedBy =
       optional config.services.journald.enableHttpGateway "sockets.target";