summary refs log tree commit diff
path: root/nixos/modules/services
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services')
-rw-r--r--nixos/modules/services/databases/openldap.nix9
-rw-r--r--nixos/modules/services/misc/gitlab.nix1
-rw-r--r--nixos/modules/services/misc/nix-optimise.nix2
-rw-r--r--nixos/modules/services/monitoring/riemann-tools.nix1
-rw-r--r--nixos/modules/services/network-filesystems/cachefilesd.nix59
-rw-r--r--nixos/modules/services/networking/avahi-daemon.nix15
-rw-r--r--nixos/modules/services/networking/ssh/sshd.nix40
-rw-r--r--nixos/modules/services/networking/supplicant.nix3
-rw-r--r--nixos/modules/services/networking/syncthing.nix115
-rw-r--r--nixos/modules/services/networking/wpa_supplicant.nix6
-rw-r--r--nixos/modules/services/search/hound.nix4
-rw-r--r--nixos/modules/services/web-apps/quassel-webserver.nix99
-rw-r--r--nixos/modules/services/web-servers/nginx/default.nix2
-rw-r--r--nixos/modules/services/x11/display-managers/lightdm.nix3
-rw-r--r--nixos/modules/services/x11/window-managers/bspwm-unstable.nix48
-rw-r--r--nixos/modules/services/x11/window-managers/default.nix1
-rw-r--r--nixos/modules/services/x11/xserver.nix1
17 files changed, 326 insertions, 83 deletions
diff --git a/nixos/modules/services/databases/openldap.nix b/nixos/modules/services/databases/openldap.nix
index 9f22aa7c92b..875ed0f39db 100644
--- a/nixos/modules/services/databases/openldap.nix
+++ b/nixos/modules/services/databases/openldap.nix
@@ -53,6 +53,13 @@ in
         description = "The database directory.";
       };
 
+      configDir = mkOption {
+        type = types.path;
+        default = "";
+        description = "Use this optional config directory instead of using slapd.conf";
+        example = "/var/db/slapd.d";
+      };
+
       extraConfig = mkOption {
         type = types.lines;
         default = "";
@@ -96,7 +103,7 @@ in
         mkdir -p ${cfg.dataDir}
         chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}
       '';
-      serviceConfig.ExecStart = "${openldap.out}/libexec/slapd -u ${cfg.user} -g ${cfg.group} -d 0 -h \"${concatStringsSep " " cfg.urlList}\" -f ${configFile}";
+      serviceConfig.ExecStart = "${openldap.out}/libexec/slapd -u ${cfg.user} -g ${cfg.group} -d 0 -h \"${concatStringsSep " " cfg.urlList}\" ${if cfg.configDir == "" then "-f "+configFile else "-F "+cfg.configDir}";
     };
 
     users.extraUsers.openldap =
diff --git a/nixos/modules/services/misc/gitlab.nix b/nixos/modules/services/misc/gitlab.nix
index f8881233dce..3e4584c7a51 100644
--- a/nixos/modules/services/misc/gitlab.nix
+++ b/nixos/modules/services/misc/gitlab.nix
@@ -463,6 +463,7 @@ in {
 
     systemd.services.gitlab = {
       after = [ "network.target" "postgresql.service" "redis.service" ];
+      requires = [ "gitlab-sidekiq.service" ];
       wantedBy = [ "multi-user.target" ];
       environment = gitlabEnv;
       path = with pkgs; [
diff --git a/nixos/modules/services/misc/nix-optimise.nix b/nixos/modules/services/misc/nix-optimise.nix
index 87ce05c5a11..a76bfd9f1f1 100644
--- a/nixos/modules/services/misc/nix-optimise.nix
+++ b/nixos/modules/services/misc/nix-optimise.nix
@@ -41,7 +41,7 @@ in
     systemd.services.nix-optimise =
       { description = "Nix Store Optimiser";
         serviceConfig.ExecStart = "${config.nix.package}/bin/nix-store --optimise";
-        startAt = optional cfg.automatic cfg.dates;
+        startAt = optionals cfg.automatic cfg.dates;
       };
 
   };
diff --git a/nixos/modules/services/monitoring/riemann-tools.nix b/nixos/modules/services/monitoring/riemann-tools.nix
index ce277f09464..de858813a76 100644
--- a/nixos/modules/services/monitoring/riemann-tools.nix
+++ b/nixos/modules/services/monitoring/riemann-tools.nix
@@ -50,6 +50,7 @@ in {
 
     systemd.services.riemann-health = {
       wantedBy = [ "multi-user.target" ];
+      path = [ procps ];
       serviceConfig = {
         User = "riemanntools";
         ExecStart = "${healthLauncher}/bin/riemann-health";
diff --git a/nixos/modules/services/network-filesystems/cachefilesd.nix b/nixos/modules/services/network-filesystems/cachefilesd.nix
new file mode 100644
index 00000000000..61981340840
--- /dev/null
+++ b/nixos/modules/services/network-filesystems/cachefilesd.nix
@@ -0,0 +1,59 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.cachefilesd;
+
+  cfgFile = pkgs.writeText "cachefilesd.conf" ''
+    dir ${cfg.cacheDir}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+  options = {
+    services.cachefilesd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable cachefilesd network filesystems caching daemon.";
+      };
+
+      cacheDir = mkOption {
+        type = types.str;
+        default = "/var/cache/fscache";
+        description = "Directory to contain filesystem cache.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "brun 10%";
+        description = "Additional configuration file entries. See cachefilesd.conf(5) for more information.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.cachefilesd = {
+      description = "Local network file caching management daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.kmod pkgs.cachefilesd ];
+      script = ''
+        modprobe -qab cachefiles
+        mkdir -p ${cfg.cacheDir}
+        chmod 700 ${cfg.cacheDir}
+        exec cachefilesd -n -f ${cfgFile}
+      '';
+    };
+
+  };
+}
diff --git a/nixos/modules/services/networking/avahi-daemon.nix b/nixos/modules/services/networking/avahi-daemon.nix
index ecc091d1d03..6a786e75bbc 100644
--- a/nixos/modules/services/networking/avahi-daemon.nix
+++ b/nixos/modules/services/networking/avahi-daemon.nix
@@ -175,11 +175,20 @@ in
 
     environment.systemPackages = [ pkgs.avahi ];
 
+    systemd.sockets.avahi-daemon =
+      { description = "Avahi mDNS/DNS-SD Stack Activation Socket";
+        listenStreams = [ "/var/run/avahi-daemon/socket" ];
+        wantedBy = [ "sockets.target" ];
+      };
+
     systemd.services.avahi-daemon =
-      { description = "Avahi daemon";
+      { description = "Avahi mDNS/DNS-SD Stack";
         wantedBy = [ "multi-user.target" ];
-        # Receive restart event after resume
-        partOf = [ "post-resume.target" ];
+        requires = [ "avahi-daemon.socket" ];
+
+        serviceConfig."NotifyAccess" = "main";
+        serviceConfig."BusName" = "org.freedesktop.Avahi";
+        serviceConfig."Type" = "dbus";
 
         path = [ pkgs.coreutils pkgs.avahi ];
 
diff --git a/nixos/modules/services/networking/ssh/sshd.nix b/nixos/modules/services/networking/ssh/sshd.nix
index 3e9fae35847..81941ce1cfb 100644
--- a/nixos/modules/services/networking/ssh/sshd.nix
+++ b/nixos/modules/services/networking/ssh/sshd.nix
@@ -242,7 +242,7 @@ in
 
     systemd =
       let
-        service =
+        sshd-service =
           { description = "SSH Daemon";
 
             wantedBy = optional (!cfg.startWhenNeeded) "multi-user.target";
@@ -253,16 +253,8 @@ in
 
             environment.LD_LIBRARY_PATH = nssModulesPath;
 
-            preStart =
-              ''
-                mkdir -m 0755 -p /etc/ssh
-
-                ${flip concatMapStrings cfg.hostKeys (k: ''
-                  if ! [ -f "${k.path}" ]; then
-                      ssh-keygen -t "${k.type}" ${if k ? bits then "-b ${toString k.bits}" else ""} -f "${k.path}" -N ""
-                  fi
-                '')}
-              '';
+            wants = [ "sshd-keygen.service" ];
+            after = [ "sshd-keygen.service" ];
 
             serviceConfig =
               { ExecStart =
@@ -278,6 +270,26 @@ in
                 PIDFile = "/run/sshd.pid";
               });
           };
+
+        sshd-keygen-service =
+          { description = "SSH Host Key Generation";
+            path = [ cfgc.package ];
+            script =
+            ''
+              mkdir -m 0755 -p /etc/ssh
+              ${flip concatMapStrings cfg.hostKeys (k: ''
+                if ! [ -f "${k.path}" ]; then
+                  ssh-keygen -t "${k.type}" ${if k ? bits then "-b ${toString k.bits}" else ""} -f "${k.path}" -N ""
+                fi
+              '')}
+            '';
+
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = "yes";
+            };
+          };
+
       in
 
       if cfg.startWhenNeeded then {
@@ -289,11 +301,13 @@ in
             socketConfig.Accept = true;
           };
 
-        services."sshd@" = service;
+        services.sshd-keygen = sshd-keygen-service;
+        services."sshd@" = sshd-service;
 
       } else {
 
-        services.sshd = service;
+        services.sshd-keygen = sshd-keygen-service;
+        services.sshd = sshd-service;
 
       };
 
diff --git a/nixos/modules/services/networking/supplicant.nix b/nixos/modules/services/networking/supplicant.nix
index e433ec7c5b9..0c459fb1dd0 100644
--- a/nixos/modules/services/networking/supplicant.nix
+++ b/nixos/modules/services/networking/supplicant.nix
@@ -34,7 +34,8 @@ let
       '';
     in
       { description = "Supplicant ${iface}${optionalString (iface=="WLAN"||iface=="LAN") " %I"}";
-        wantedBy = [ "network.target" ] ++ deps;
+        wantedBy = [ "multi-user.target" ] ++ deps;
+        wants = [ "network.target" ];
         bindsTo = deps;
         after = deps;
         before = [ "network.target" ];
diff --git a/nixos/modules/services/networking/syncthing.nix b/nixos/modules/services/networking/syncthing.nix
index 8a430734319..dcdc203bdc6 100644
--- a/nixos/modules/services/networking/syncthing.nix
+++ b/nixos/modules/services/networking/syncthing.nix
@@ -3,46 +3,11 @@
 with lib;
 
 let
-
   cfg = config.services.syncthing;
   defaultUser = "syncthing";
-
-  header = {
-    description = "Syncthing service";
-    after = [ "network.target" ];
-    environment = {
-      STNORESTART = "yes";
-      STNOUPGRADE = "yes";
-      inherit (cfg) all_proxy;
-    } // config.networking.proxy.envVars;
-  };
-
-  service = {
-    Restart = "on-failure";
-    SuccessExitStatus = "2 3 4";
-    RestartForceExitStatus="3 4";
-  };
-
-  iNotifyHeader = {
-    description = "Syncthing Inotify File Watcher service";
-    after = [ "network.target" "syncthing.service" ];
-    requires = [ "syncthing.service" ];
-  };
-
-  iNotifyService = {
-    SuccessExitStatus = "2";
-    RestartForceExitStatus = "3";
-    Restart = "on-failure";
-  };
-
-in
-
-{
-
+in {
   ###### interface
-
   options = {
-
     services.syncthing = {
 
       enable = mkEnableOption ''
@@ -100,6 +65,19 @@ in
         '';
       };
 
+      openDefaultPorts = mkOption {
+        type = types.bool;
+        default = false;
+        example = literalExample "true";
+        description = ''
+          Open the default ports in the firewall:
+            - TCP 22000 for transfers
+            - UDP 21027 for discovery
+          If multiple users are running syncthing on this machine, you will need to manually open a set of ports for each instance and leave this disabled.
+          Alternatively, if are running only a single instance on this machine using the default ports, enable this.
+        '';
+      };
+
       package = mkOption {
         type = types.package;
         default = pkgs.syncthing;
@@ -117,6 +95,14 @@ in
 
   config = mkIf cfg.enable {
 
+    networking.firewall = mkIf cfg.openDefaultPorts {
+      allowedTCPPorts = [ 22000 ];
+      allowedUDPPorts = [ 21027 ];
+    };
+
+    systemd.packages = [ pkgs.syncthing ]
+                       ++ lib.optional cfg.useInotify pkgs.syncthing-inotify;
+
     users = mkIf (cfg.user == defaultUser) {
       extraUsers."${defaultUser}" =
         { group = cfg.group;
@@ -131,39 +117,44 @@ in
     };
 
     systemd.services = {
-      syncthing = mkIf cfg.systemService (header // {
-          wants = mkIf cfg.useInotify [ "syncthing-inotify.service" ];
-          wantedBy = [ "multi-user.target" ];
-          serviceConfig = service // {
-            User = cfg.user;
-            Group = cfg.group;
-            PermissionsStartOnly = true;
-            ExecStart = "${cfg.package}/bin/syncthing -no-browser -home=${cfg.dataDir}";
-          };
-      });
-
-      syncthing-inotify = mkIf (cfg.systemService && cfg.useInotify) (iNotifyHeader // {
+      syncthing = mkIf cfg.systemService {
+        description = "Syncthing service";
+        after = [ "network.target" ];
+        environment = {
+          STNORESTART = "yes";
+          STNOUPGRADE = "yes";
+          inherit (cfg) all_proxy;
+        } // config.networking.proxy.envVars;
+        wants = mkIf cfg.useInotify [ "syncthing-inotify.service" ];
         wantedBy = [ "multi-user.target" ];
-        serviceConfig = iNotifyService // {
+        serviceConfig = {
+          Restart = "on-failure";
+          SuccessExitStatus = "2 3 4";
+          RestartForceExitStatus="3 4";
           User = cfg.user;
-          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -home=${cfg.dataDir} -logflags=0";
+          Group = cfg.group;
+          PermissionsStartOnly = true;
+          ExecStart = "${cfg.package}/bin/syncthing -no-browser -home=${cfg.dataDir}";
         };
-      });
-    };
+      };
 
-    systemd.user.services = {
-      syncthing = header // {
-        serviceConfig = service // {
-          ExecStart = "${cfg.package}/bin/syncthing -no-browser";
-        };
+      syncthing-resume = {
+        wantedBy = [ "suspend.target" ];
       };
 
-      syncthing-inotify = mkIf cfg.useInotify (iNotifyHeader // {
-        serviceConfig = iNotifyService // {
-          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -logflags=0";
+      syncthing-inotify = mkIf (cfg.systemService && cfg.useInotify) {
+        description = "Syncthing Inotify File Watcher service";
+        after = [ "network.target" "syncthing.service" ];
+        requires = [ "syncthing.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          SuccessExitStatus = "2";
+          RestartForceExitStatus = "3";
+          Restart = "on-failure";
+          User = cfg.user;
+          ExecStart = "${pkgs.syncthing-inotify.bin}/bin/syncthing-inotify -home=${cfg.dataDir} -logflags=0";
         };
-      });
+      };
     };
-
   };
 }
diff --git a/nixos/modules/services/networking/wpa_supplicant.nix b/nixos/modules/services/networking/wpa_supplicant.nix
index a344d785546..5657b91c1e7 100644
--- a/nixos/modules/services/networking/wpa_supplicant.nix
+++ b/nixos/modules/services/networking/wpa_supplicant.nix
@@ -128,9 +128,11 @@ in {
     in {
       description = "WPA Supplicant";
 
-      after = [ "network.target" ] ++ lib.concatMap deviceUnit ifaces;
+      after = lib.concatMap deviceUnit ifaces;
+      before = [ "network.target" ];
+      wants = [ "network.target" ];
       requires = lib.concatMap deviceUnit ifaces;
-      wantedBy = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
 
       path = [ pkgs.wpa_supplicant ];
 
diff --git a/nixos/modules/services/search/hound.nix b/nixos/modules/services/search/hound.nix
index 4389f17668b..708f57a5eb7 100644
--- a/nixos/modules/services/search/hound.nix
+++ b/nixos/modules/services/search/hound.nix
@@ -57,6 +57,10 @@ in {
 
       config = mkOption {
         type = types.str;
+        description = ''
+          The full configuration of the Hound daemon. Note the dbpath
+          should be an absolute path to a writable location on disk.
+        '';
         example = ''
           {
              "max-concurrent-indexers" : 2,
diff --git a/nixos/modules/services/web-apps/quassel-webserver.nix b/nixos/modules/services/web-apps/quassel-webserver.nix
new file mode 100644
index 00000000000..7de9480d4c4
--- /dev/null
+++ b/nixos/modules/services/web-apps/quassel-webserver.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.quassel-webserver;
+  quassel-webserver = cfg.pkg;
+  settings = ''
+    module.exports = {
+      default: {
+        host: '${cfg.quasselCoreHost}',  // quasselcore host
+        port: ${toString cfg.quasselCorePort},  // quasselcore port
+        initialBacklogLimit: ${toString cfg.initialBacklogLimit},  // Amount of backlogs to fetch per buffer on connection
+        backlogLimit: ${toString cfg.backlogLimit},  // Amount of backlogs to fetch per buffer after first retrieval
+        securecore: ${if cfg.secureCore then "true" else "false"},  // Connect to the core using SSL
+        theme: '${cfg.theme}'  // Default UI theme
+      },
+      themes: ['default', 'darksolarized'],  //  Available themes
+      forcedefault: ${if cfg.forceHostAndPort then "true" else "false"},  // Will force default host and port to be used, and will hide the corresponding fields in the UI
+      prefixpath: '${cfg.prefixPath}'  // Configure this if you use a reverse proxy
+    };
+  '';
+  settingsFile = pkgs.writeText "settings-user.js" settings;
+in {
+  options = {
+    services.quassel-webserver = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Whether to enable the quassel webclient service";
+      };
+      pkg = mkOption {
+        default = pkgs.quassel-webserver;
+        description = "The quassel-webserver package";
+      };
+      quasselCoreHost = mkOption {
+        default = "";
+        type = types.str;
+        description = "The default host of the quassel core";
+      };
+      quasselCorePort = mkOption {
+        default = 4242;
+        type = types.int;
+        description = "The default quassel core port";
+      };
+      initialBacklogLimit = mkOption {
+        default = 20;
+        type = types.int;
+        description = "Amount of backlogs to fetch per buffer on connection";
+      };
+      backlogLimit = mkOption {
+        default = 100;
+        type = types.int;
+        description = "Amount of backlogs to fetch per buffer after first retrieval";
+      };
+      secureCore = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Connect to the core using SSL";
+      };
+      theme = mkOption {
+        default = "default";
+        type = types.str;
+        description = "default or darksolarized";
+      };
+      prefixPath = mkOption {
+        default = "";
+        type = types.str;
+        description = "Configure this if you use a reverse proxy. Must start with a '/'";
+        example = "/quassel";
+      };
+      port = mkOption {
+        default = 60443;
+        type = types.int;
+        description = "The port the quassel webserver should listen on";
+      };
+      useHttps = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Whether the quassel webserver connection should be a https connection";
+      };
+      forceHostAndPort = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Force the users to use the quasselCoreHost and quasselCorePort defaults";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.quassel-webserver = {
+      description = "A web server/client for Quassel";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${quassel-webserver}/lib/node_modules/quassel-webserver/bin/www -p ${toString cfg.port} -m ${if cfg.useHttps == true then "https" else "http"} -c ${settingsFile}";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/services/web-servers/nginx/default.nix b/nixos/modules/services/web-servers/nginx/default.nix
index 7b822619a2f..166e5a6b2ce 100644
--- a/nixos/modules/services/web-servers/nginx/default.nix
+++ b/nixos/modules/services/web-servers/nginx/default.nix
@@ -392,6 +392,8 @@ in
     security.acme.certs = filterAttrs (n: v: v != {}) (
       mapAttrs (vhostName: vhostConfig:
         optionalAttrs vhostConfig.enableACME {
+          user = cfg.user;
+          group = cfg.group;
           webroot = vhostConfig.acmeRoot;
           extraDomains = genAttrs vhostConfig.serverAliases (alias: null);
           postRun = ''
diff --git a/nixos/modules/services/x11/display-managers/lightdm.nix b/nixos/modules/services/x11/display-managers/lightdm.nix
index 33cd51f37c6..1d309aa3429 100644
--- a/nixos/modules/services/x11/display-managers/lightdm.nix
+++ b/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -207,6 +207,9 @@ in
     services.dbus.enable = true;
     services.dbus.packages = [ lightdm ];
 
+    # lightdm uses the accounts daemon to rember language/window-manager per user
+    services.accounts-daemon.enable = true;
+
     security.pam.services.lightdm = {
       allowNullPassword = true;
       startSession = true;
diff --git a/nixos/modules/services/x11/window-managers/bspwm-unstable.nix b/nixos/modules/services/x11/window-managers/bspwm-unstable.nix
new file mode 100644
index 00000000000..3282e0d0851
--- /dev/null
+++ b/nixos/modules/services/x11/window-managers/bspwm-unstable.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.bspwm-unstable;
+in
+
+{
+  options = {
+    services.xserver.windowManager.bspwm-unstable = {
+        enable = mkEnableOption "bspwm-unstable";
+        startThroughSession = mkOption {
+            type = with types; bool;
+            default = false;
+            description = "
+                Start the window manager through the script defined in 
+                sessionScript. Defaults to the the bspwm-session script
+                provided by bspwm
+            ";
+        };
+        sessionScript = mkOption {
+            default = "${pkgs.bspwm-unstable}/bin/bspwm-session";
+            defaultText = "(pkgs.bspwm-unstable)/bin/bspwm-session";
+            description = "
+                The start-session script to use. Defaults to the
+                provided bspwm-session script from the bspwm package.
+
+                Does nothing unless `bspwm.startThroughSession` is enabled
+            ";
+        };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "bspwm-unstable";
+      start = if cfg.startThroughSession
+        then cfg.sessionScript
+        else ''
+            export _JAVA_AWT_WM_NONREPARENTING=1
+            SXHKD_SHELL=/bin/sh ${pkgs.sxhkd-unstable}/bin/sxhkd -f 100 &
+            ${pkgs.bspwm-unstable}/bin/bspwm
+        '';
+    };
+    environment.systemPackages = [ pkgs.bspwm-unstable ];
+  };
+}
diff --git a/nixos/modules/services/x11/window-managers/default.nix b/nixos/modules/services/x11/window-managers/default.nix
index f005decfa33..dabe2c26a72 100644
--- a/nixos/modules/services/x11/window-managers/default.nix
+++ b/nixos/modules/services/x11/window-managers/default.nix
@@ -10,6 +10,7 @@ in
   imports = [
     ./afterstep.nix
     ./bspwm.nix
+    ./bspwm-unstable.nix
     ./compiz.nix
     ./dwm.nix
     ./exwm.nix
diff --git a/nixos/modules/services/x11/xserver.nix b/nixos/modules/services/x11/xserver.nix
index 298f30858ce..db60e47c3ea 100644
--- a/nixos/modules/services/x11/xserver.nix
+++ b/nixos/modules/services/x11/xserver.nix
@@ -515,6 +515,7 @@ in
       { description = "X11 Server";
 
         after = [ "systemd-udev-settle.service" "local-fs.target" "acpid.service" "systemd-logind.service" ];
+        wants = [ "systemd-udev-settle.service" ];
 
         restartIfChanged = false;