summary refs log tree commit diff
path: root/nixos/modules/services/misc
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/services/misc')
-rw-r--r--nixos/modules/services/misc/autofs.nix120
-rw-r--r--nixos/modules/services/misc/cgminer.nix140
-rw-r--r--nixos/modules/services/misc/disnix.nix164
-rw-r--r--nixos/modules/services/misc/felix.nix110
-rw-r--r--nixos/modules/services/misc/folding-at-home.nix74
-rw-r--r--nixos/modules/services/misc/gpsd.nix104
-rw-r--r--nixos/modules/services/misc/nix-daemon.nix370
-rw-r--r--nixos/modules/services/misc/nix-gc.nix61
-rw-r--r--nixos/modules/services/misc/nixos-manual.nix116
-rw-r--r--nixos/modules/services/misc/rogue.nix59
-rw-r--r--nixos/modules/services/misc/svnserve.nix46
-rw-r--r--nixos/modules/services/misc/synergy.nix131
12 files changed, 1495 insertions, 0 deletions
diff --git a/nixos/modules/services/misc/autofs.nix b/nixos/modules/services/misc/autofs.nix
new file mode 100644
index 00000000000..50491c556e8
--- /dev/null
+++ b/nixos/modules/services/misc/autofs.nix
@@ -0,0 +1,120 @@
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.services.autofs;
+
+  autoMaster = pkgs.writeText "auto.master" cfg.autoMaster;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.autofs = {
+
+      enable = mkOption {
+        default = false;
+        description = "
+          Mount filesystems on demand. Unmount them automatically.
+          You may also be interested in afuese.
+        ";
+      };
+
+      autoMaster = mkOption {
+        example = literalExample ''
+          autoMaster = let
+            mapConf = pkgs.writeText "auto" '''
+             kernel    -ro,soft,intr       ftp.kernel.org:/pub/linux
+             boot      -fstype=ext2        :/dev/hda1
+             windoze   -fstype=smbfs       ://windoze/c
+             removable -fstype=ext2        :/dev/hdd
+             cd        -fstype=iso9660,ro  :/dev/hdc
+             floppy    -fstype=auto        :/dev/fd0
+             server    -rw,hard,intr       / -ro myserver.me.org:/ \
+                                           /usr myserver.me.org:/usr \
+                                           /home myserver.me.org:/home
+            ''';
+          in '''
+            /auto file:''${mapConf}
+          '''
+        '';
+        description = "
+          file contents of /etc/auto.master. See man auto.master
+          See man 5 auto.master and man 5 autofs.
+        ";
+      };
+
+      timeout = mkOption {
+        default = 600;
+        description = "Set the global minimum timeout, in seconds, until directories are unmounted";
+      };
+
+      debug = mkOption {
+        default = false;
+        description = "
+        pass -d and -7 to automount and write log to /var/log/autofs
+        ";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.etc = singleton
+      { target = "auto.master";
+        source = pkgs.writeText "auto.master" cfg.autoMaster;
+      };
+
+    boot.kernelModules = [ "autofs4" ];
+
+    jobs.autofs =
+      { description = "Filesystem automounter";
+
+        startOn = "started network-interfaces";
+        stopOn = "stopping network-interfaces";
+
+        path = [ pkgs.nfsUtils pkgs.sshfsFuse ];
+
+        preStop =
+          ''
+            set -e; while :; do pkill -TERM automount; sleep 1; done
+          '';
+
+        # automount doesn't clean up when receiving SIGKILL.
+        # umount -l should unmount the directories recursively when they are no longer used
+        # It does, but traces are left in /etc/mtab. So unmount recursively..
+        postStop =
+          ''
+          PATH=${pkgs.gnused}/bin:${pkgs.coreutils}/bin
+          exec &> /tmp/logss
+          # double quote for sed:
+          escapeSpaces(){ sed 's/ /\\\\040/g'; }
+          unescapeSpaces(){ sed 's/\\040/ /g'; }
+          sed -n 's@^\s*\(\([^\\ ]\|\\ \)*\)\s.*@\1@p' ${autoMaster} | sed 's/[\\]//' | while read mountPoint; do
+            sed -n "s@[^ ]\+\s\+\($(echo "$mountPoint"| escapeSpaces)[^ ]*\).*@\1@p" /proc/mounts | sort -r | unescapeSpaces| while read smountP; do
+              ${pkgs.utillinux}/bin/umount -l "$smountP" || true
+            done
+          done
+          '';
+
+        script =
+          ''
+            ${if cfg.debug then "exec &> /var/log/autofs" else ""}
+            exec ${pkgs.autofs5}/sbin/automount ${if cfg.debug then "-d" else ""} -f -t ${builtins.toString cfg.timeout} "${autoMaster}" ${if cfg.debug then "-l7" else ""}
+          '';
+      };
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/cgminer.nix b/nixos/modules/services/misc/cgminer.nix
new file mode 100644
index 00000000000..890d7a4020b
--- /dev/null
+++ b/nixos/modules/services/misc/cgminer.nix
@@ -0,0 +1,140 @@
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+  cfg = config.services.cgminer;
+
+  convType = with builtins;
+    v: if isBool v then (if v then "true" else "false") else toString v;
+  mergedHwConfig =
+    mapAttrsToList (n: v: ''"${n}": "${(concatStringsSep "," (map convType v))}"'')
+      (foldAttrs (n: a: [n] ++ a) [] cfg.hardware);
+  mergedConfig = with builtins;
+    mapAttrsToList (n: v: ''"${n}":  ${if isBool v then "" else ''"''}${convType v}${if isBool v then "" else ''"''}'')
+      cfg.config;
+
+  cgminerConfig = pkgs.writeText "cgminer.conf" ''
+  {
+  ${concatStringsSep ",\n" mergedHwConfig},
+  ${concatStringsSep ",\n" mergedConfig},
+  "pools": [
+  ${concatStringsSep ",\n"
+    (map (v: ''{"url": "${v.url}", "user": "${v.user}", "pass": "${v.pass}"}'')
+          cfg.pools)}]
+  }
+  '';
+in
+{
+  ###### interface
+  options = {
+
+    services.cgminer = {
+
+      enable = mkOption {
+        default = false;
+        description = ''
+          Whether to enable cgminer, an ASIC/FPGA/GPU miner for bitcoin and
+          litecoin.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.cgminer;
+        description = "Which cgminer derivation to use.";
+      };
+
+      user = mkOption {
+        default = "cgminer";
+        description = "User account under which cgminer runs";
+      };
+
+      pools = mkOption {
+        default = [];  # Run benchmark
+        description = "List of pools where to mine";
+        example = [{
+          url = "http://p2pool.org:9332";
+          username = "17EUZxTvs9uRmPsjPZSYUU3zCz9iwstudk";
+          password="X";
+        }];
+      };
+
+      hardware = mkOption {
+        default = []; # Run without options
+        description= "List of config options for every GPU";
+        example = [
+        {
+          intensity = 9;
+          gpu-engine = "0-985";
+          gpu-fan = "0-85";
+          gpu-memclock = 860;
+          gpu-powertune = 20;
+          temp-cutoff = 95;
+          temp-overheat = 85;
+          temp-target = 75;
+        }
+        {
+          intensity = 9;
+          gpu-engine = "0-950";
+          gpu-fan = "0-85";
+          gpu-memclock = 825;
+          gpu-powertune = 20;
+          temp-cutoff = 95;
+          temp-overheat = 85;
+          temp-target = 75;
+        }];
+      };
+
+      config = mkOption {
+        default = {};
+        description = "Additional config";
+        example = {
+          auto-fan = true;
+          auto-gpu = true;
+          expiry = 120;
+          failover-only = true;
+          gpu-threads = 2;
+          log = 5;
+          queue = 1;
+          scan-time = 60;
+          temp-histeresys = 3;
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.cgminer.enable {
+
+    users.extraUsers = singleton
+      { name = cfg.user;
+        description = "Cgminer user";
+      };
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.cgminer = {
+      path = [ pkgs.cgminer ];
+
+      after = [ "display-manager.target" "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = { 
+        LD_LIBRARY_PATH = ''/run/opengl-driver/lib:/run/opengl-driver-32/lib'';
+        DISPLAY = ":0";
+        GPU_MAX_ALLOC_PERCENT = "100";
+        GPU_USE_SYNC_OBJECTS = "1";
+      };
+
+      serviceConfig = {
+        ExecStart = "${pkgs.cgminer}/bin/cgminer -T -c ${cgminerConfig}";
+        User = cfg.user;
+        RestartSec = 10;
+      };
+    };
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/disnix.nix b/nixos/modules/services/misc/disnix.nix
new file mode 100644
index 00000000000..6419e6f8fc7
--- /dev/null
+++ b/nixos/modules/services/misc/disnix.nix
@@ -0,0 +1,164 @@
+# Disnix server
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.services.disnix;
+
+  dysnomia = pkgs.dysnomia.override (origArgs: {
+    enableApacheWebApplication = config.services.httpd.enable;
+    enableAxis2WebService = config.services.tomcat.axis2.enable;
+    enableEjabberdDump = config.services.ejabberd.enable;
+    enableMySQLDatabase = config.services.mysql.enable;
+    enablePostgreSQLDatabase = config.services.postgresql.enable;
+    enableSubversionRepository = config.services.svnserve.enable;
+    enableTomcatWebApplication = config.services.tomcat.enable;
+  });
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.disnix = {
+
+      enable = mkOption {
+        default = false;
+        description = "Whether to enable Disnix";
+      };
+
+      useWebServiceInterface = mkOption {
+        default = false;
+        description = "Whether to enable the DisnixWebService interface running on Apache Tomcat";
+      };
+
+      publishInfrastructure = {
+        enable = mkOption {
+          default = false;
+          description = "Whether to publish capabilities/properties of this machine in as attributes in the infrastructure option";
+        };
+
+        enableAuthentication = mkOption {
+          default = false;
+          description = "Whether to publish authentication credentials through the infrastructure attribute (not recommended in combination with Avahi)";
+        };
+      };
+
+      infrastructure = mkOption {
+        default = {};
+        description = "List of name value pairs containing properties for the infrastructure model";
+      };
+
+      publishAvahi = mkOption {
+        default = false;
+        description = "Whether to publish capabilities/properties as a Disnix service through Avahi";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.disnix ] ++ optional cfg.useWebServiceInterface pkgs.DisnixWebService;
+
+    services.dbus.enable = true;
+    services.dbus.packages = [ pkgs.disnix ];
+
+    services.avahi.enable = cfg.publishAvahi;
+
+    services.tomcat.enable = cfg.useWebServiceInterface;
+    services.tomcat.extraGroups = [ "disnix" ];
+    services.tomcat.javaOpts = "${optionalString cfg.useWebServiceInterface "-Djava.library.path=${pkgs.libmatthew_java}/lib/jni"} ";
+    services.tomcat.sharedLibs = optional cfg.useWebServiceInterface "${pkgs.DisnixWebService}/share/java/DisnixConnection.jar"
+                                 ++ optional cfg.useWebServiceInterface "${pkgs.dbus_java}/share/java/dbus.jar";
+    services.tomcat.webapps = optional cfg.useWebServiceInterface pkgs.DisnixWebService;
+
+    users.extraGroups = singleton
+      { name = "disnix";
+        gid = config.ids.gids.disnix;
+      };
+
+    services.disnix.infrastructure =
+      optionalAttrs (cfg.publishInfrastructure.enable)
+      ( { hostname = config.networking.hostName;
+          #targetHost = config.deployment.targetHost;
+          system = if config.nixpkgs.system == "" then builtins.currentSystem else config.nixpkgs.system;
+          
+          supportedTypes = (import "${pkgs.stdenv.mkDerivation {
+            name = "supportedtypes";
+            buildCommand = ''
+              ( echo -n "[ "
+                cd ${dysnomia}/libexec/dysnomia
+                for i in *
+                do
+                    echo -n "\"$i\" "
+                done
+                echo -n " ]") > $out
+            '';
+          }}");
+        }
+        #// optionalAttrs (cfg.useWebServiceInterface) { targetEPR = "http://${config.deployment.targetHost}:8080/DisnixWebService/services/DisnixWebService"; }
+        // optionalAttrs (config.services.httpd.enable) { documentRoot = config.services.httpd.documentRoot; }
+        // optionalAttrs (config.services.mysql.enable) { mysqlPort = config.services.mysql.port; }
+        // optionalAttrs (config.services.tomcat.enable) { tomcatPort = 8080; }
+        // optionalAttrs (config.services.svnserve.enable) { svnBaseDir = config.services.svnserve.svnBaseDir; }
+        // optionalAttrs (cfg.publishInfrastructure.enableAuthentication) (
+          optionalAttrs (config.services.mysql.enable) { mysqlUsername = "root"; mysqlPassword = builtins.readFile config.services.mysql.rootPassword; })
+        )
+    ;
+
+    services.disnix.publishInfrastructure.enable = cfg.publishAvahi;
+
+    jobs = {
+      disnix =
+        { description = "Disnix server";
+        
+          wantedBy = [ "multi-user.target" ];
+          after = [ "dbus.service" ]
+            ++ optional config.services.httpd.enable "httpd.service"
+            ++ optional config.services.mysql.enable "mysql.service"
+            ++ optional config.services.tomcat.enable "tomcat.service"
+            ++ optional config.services.svnserve.enable "svnserve.service";
+
+          restartIfChanged = false;
+          
+          path = [ pkgs.nix pkgs.disnix ];
+        
+          script =
+          ''
+            export HOME=/root
+            disnix-service --dysnomia-modules-dir=${dysnomia}/libexec/dysnomia
+          '';
+        };
+    } // optionalAttrs cfg.publishAvahi {
+      disnixAvahi =
+        { description = "Disnix Avahi publisher";
+
+          startOn = "started avahi-daemon";
+
+          exec =
+          ''
+            ${pkgs.avahi}/bin/avahi-publish-service disnix-${config.networking.hostName} _disnix._tcp 22 \
+              "mem=$(grep 'MemTotal:' /proc/meminfo | sed -e 's/kB//' -e 's/MemTotal://' -e 's/ //g')" \
+              ${concatMapStrings (infrastructureAttrName:
+                let infrastructureAttrValue = getAttr infrastructureAttrName (cfg.infrastructure);
+                in
+                if builtins.isInt infrastructureAttrValue then
+                ''${infrastructureAttrName}=${toString infrastructureAttrValue} \
+                ''
+                else
+                ''${infrastructureAttrName}=\"${infrastructureAttrValue}\" \
+                ''
+                ) (attrNames (cfg.infrastructure))}
+          '';
+        };
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/felix.nix b/nixos/modules/services/misc/felix.nix
new file mode 100644
index 00000000000..2da50fc8595
--- /dev/null
+++ b/nixos/modules/services/misc/felix.nix
@@ -0,0 +1,110 @@
+# Felix server
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.services.felix;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.felix = {
+
+      enable = mkOption {
+        default = false;
+        description = "Whether to enable the Apache Felix OSGi service";
+      };
+
+      bundles = mkOption {
+        default = [ pkgs.felix_remoteshell ];
+        description = "List of bundles that should be activated on startup";
+      };
+
+      user = mkOption {
+        default = "osgi";
+        description = "User account under which Apache Felix runs.";
+      };
+
+      group = mkOption {
+        default = "osgi";
+        description = "Group account under which Apache Felix runs.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.extraGroups = singleton
+      { name = "osgi";
+        gid = config.ids.gids.osgi;
+      };
+
+    users.extraUsers = singleton
+      { name = "osgi";
+        uid = config.ids.uids.osgi;
+        description = "OSGi user";
+        home = "/homeless-shelter";
+      };
+
+    jobs.felix =
+      { description = "Felix server";
+
+        preStart =
+	  ''
+	    # Initialise felix instance on first startup
+	    if [ ! -d /var/felix ]
+	    then
+	        # Symlink system files
+
+	        mkdir -p /var/felix
+		chown ${cfg.user}:${cfg.group} /var/felix
+
+		for i in ${pkgs.felix}/*
+		do
+		    if [ "$i" != "${pkgs.felix}/bundle" ]
+		    then
+		        ln -sfn $i /var/felix/$(basename $i)
+		    fi
+		done
+
+		# Symlink bundles
+		mkdir -p /var/felix/bundle
+		chown ${cfg.user}:${cfg.group} /var/felix/bundle
+
+		for i in ${pkgs.felix}/bundle/* ${toString cfg.bundles}
+		do
+		    if [ -f $i ]
+		    then
+		        ln -sfn $i /var/felix/bundle/$(basename $i)
+		    elif [ -d $i ]
+		    then
+		        for j in $i/bundle/*
+			do
+			    ln -sfn $j /var/felix/bundle/$(basename $j)
+			done
+		    fi
+		done
+	    fi
+	  '';
+
+        script =
+          ''
+	    cd /var/felix
+            ${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c '${pkgs.jre}/bin/java -jar bin/felix.jar'
+          '';
+      };
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/folding-at-home.nix b/nixos/modules/services/misc/folding-at-home.nix
new file mode 100644
index 00000000000..9f4c4645279
--- /dev/null
+++ b/nixos/modules/services/misc/folding-at-home.nix
@@ -0,0 +1,74 @@
+{ config, pkgs, ... }:
+with pkgs.lib;
+let
+  stateDir = "/var/lib/foldingathome";
+  cfg = config.services.foldingAtHome;
+  fahUser = "foldingathome";
+in {
+
+  ###### interface
+
+  options = {
+
+    services.foldingAtHome = {
+
+      enable = mkOption {
+        default = false;
+        description = ''
+          Whether to enable the Folding@Home to use idle CPU time.
+        '';
+      };
+
+      nickname = mkOption {
+        default = "Anonymous";
+        description = ''
+          A unique handle for statistics.
+        '';
+      };
+
+      config = mkOption {
+        default = "";
+        description = ''
+          Extra configuration. Contents will be added verbatim to the
+          configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.extraUsers = singleton
+      { name = fahUser;
+        uid = config.ids.uids.foldingAtHome;
+        description = "Folding@Home user";
+        home = stateDir;
+      };
+
+    jobs.foldingAtHome =
+      { name = "foldingathome";
+
+        startOn = "started network-interfaces";
+        stopOn = "stopping network-interfaces";
+
+        preStart =
+          ''
+            mkdir -m 0755 -p ${stateDir}
+            chown ${fahUser} ${stateDir}
+            cp -f ${pkgs.writeText "client.cfg" cfg.config} ${stateDir}/client.cfg
+          '';
+        exec = "${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${fahUser} -c 'cd ${stateDir}; ${pkgs.foldingathome}/bin/fah6'";
+      };
+
+      services.foldingAtHome.config = ''
+          [settings]
+          username=${cfg.nickname}
+      '';
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/gpsd.nix b/nixos/modules/services/misc/gpsd.nix
new file mode 100644
index 00000000000..bc1d1f4575a
--- /dev/null
+++ b/nixos/modules/services/misc/gpsd.nix
@@ -0,0 +1,104 @@
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  uid = config.ids.uids.gpsd;
+  gid = config.ids.gids.gpsd;
+  cfg = config.services.gpsd;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.gpsd = {
+
+      enable = mkOption {
+        default = false;
+        description = ''
+          Whether to enable `gpsd', a GPS service daemon.
+        '';
+      };
+
+      device = mkOption {
+        default = "/dev/ttyUSB0";
+        description = ''
+          A device may be a local serial device for GPS input, or a URL of the form:
+               <literal>[{dgpsip|ntrip}://][user:passwd@]host[:port][/stream]</literal>
+          in which case it specifies an input source for DGPS or ntrip data.
+        '';
+      };
+
+      readonly = mkOption {
+        default = true;
+        description = ''
+          Whether to enable the broken-device-safety, otherwise
+          known as read-only mode.  Some popular bluetooth and USB
+          receivers lock up or become totally inaccessible when
+          probed or reconfigured.  This switch prevents gpsd from
+          writing to a receiver.  This means that gpsd cannot
+          configure the receiver for optimal performance, but it
+          also means that gpsd cannot break the receiver.  A better
+          solution would be for Bluetooth to not be so fragile.  A
+          platform independent method to identify
+          serial-over-Bluetooth devices would also be nice.
+        '';
+      };
+
+      port = mkOption {
+        default = 2947;
+        description = ''
+          The port where to listen for TCP connections.
+        '';
+      };
+
+      debugLevel = mkOption {
+        default = 0;
+        description = ''
+          The debugging level.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.extraUsers = singleton
+      { name = "gpsd";
+        inherit uid;
+        description = "gpsd daemon user";
+        home = "/var/empty";
+      };
+
+    users.extraGroups = singleton
+      { name = "gpsd";
+        inherit gid;
+      };
+
+    jobs.gpsd =
+      { description = "GPSD daemon";
+
+        startOn = "ip-up";
+
+        exec =
+          ''
+            ${pkgs.gpsd}/sbin/gpsd -D "${toString cfg.debugLevel}"  \
+              -S "${toString cfg.port}"                             \
+              ${if cfg.readonly then "-b" else ""}                  \
+              "${cfg.device}"
+          '';
+      };
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/nix-daemon.nix b/nixos/modules/services/misc/nix-daemon.nix
new file mode 100644
index 00000000000..adf4f145f25
--- /dev/null
+++ b/nixos/modules/services/misc/nix-daemon.nix
@@ -0,0 +1,370 @@
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.nix;
+
+  inherit (config.environment) nix;
+
+  makeNixBuildUser = nr:
+    { name = "nixbld${toString nr}";
+      description = "Nix build user ${toString nr}";
+
+      /* For consistency with the setgid(2), setuid(2), and setgroups(2)
+         calls in `libstore/build.cc', don't add any supplementary group
+         here except "nixbld".  */
+      uid = builtins.add config.ids.uids.nixbld nr;
+      group = "nixbld";
+      extraGroups = [ "nixbld" ];
+    };
+
+  nixConf =
+    let
+      # Tricky: if we're using a chroot for builds, then we need
+      # /bin/sh in the chroot (our own compromise to purity).
+      # However, since /bin/sh is a symlink to some path in the
+      # Nix store, which furthermore has runtime dependencies on
+      # other paths in the store, we need the closure of /bin/sh
+      # in `build-chroot-dirs' - otherwise any builder that uses
+      # /bin/sh won't work.
+      binshDeps = pkgs.writeReferencesToFile config.system.build.binsh;
+    in
+      pkgs.runCommand "nix.conf" {extraOptions = cfg.extraOptions; } ''
+        extraPaths=$(for i in $(cat ${binshDeps}); do if test -d $i; then echo $i; fi; done)
+        cat > $out <<END
+        # WARNING: this file is generated from the nix.* options in
+        # your NixOS configuration, typically
+        # /etc/nixos/configuration.nix.  Do not edit it!
+        build-users-group = nixbld
+        build-max-jobs = ${toString (cfg.maxJobs)}
+        build-use-chroot = ${if cfg.useChroot then "true" else "false"}
+        build-chroot-dirs = ${toString cfg.chrootDirs} $(echo $extraPaths)
+        binary-caches = ${toString cfg.binaryCaches}
+        trusted-binary-caches = ${toString cfg.trustedBinaryCaches}
+        $extraOptions
+        END
+      '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    environment.nix = mkOption {
+      default = pkgs.nix;
+      merge = mergeOneOption;
+      description = ''
+        This option specifies the Nix package instance to use throughout the system.
+      '';
+    };
+
+    nix = {
+
+      maxJobs = mkOption {
+        default = 1;
+        example = 2;
+        description = "
+          This option defines the maximum number of jobs that Nix will try
+          to build in parallel.  The default is 1.  You should generally
+          set it to the number of CPUs in your system (e.g., 2 on an Athlon
+          64 X2).
+        ";
+      };
+
+      useChroot = mkOption {
+        default = false;
+        example = true;
+        description = "
+          If set, Nix will perform builds in a chroot-environment that it
+          will set up automatically for each build.  This prevents
+          impurities in builds by disallowing access to dependencies
+          outside of the Nix store.
+        ";
+      };
+
+      chrootDirs = mkOption {
+        default = [];
+        example = [ "/dev" "/proc" ];
+        description =
+          ''
+            Directories from the host filesystem to be included
+            in the chroot.
+          '';
+      };
+
+      extraOptions = mkOption {
+        default = "";
+        example = "
+          gc-keep-outputs = true
+          gc-keep-derivations = true
+        ";
+        description = "Additional text appended to <filename>nix.conf</filename>.";
+      };
+
+      distributedBuilds = mkOption {
+        default = false;
+        description = "
+          Whether to distribute builds to the machines listed in
+          <option>nix.buildMachines</option>.
+          If you know that the <option>buildMachines</option> are not
+          always available either use nixos
+          <command>nixos-rebuild --no-build-hook</command>
+          or consider managing <filename>/etc/nix.machines</filename> manually
+          by setting <option>manualNixMachines</option>. Then you can comment
+          unavailable build machines.
+        ";
+      };
+
+      manualNixMachines = mkOption {
+        default = false;
+        description = "
+          Whether to manually manage the list of build machines used in distributed
+          builds in /etc/nix.machines.
+        ";
+      };
+
+      daemonNiceLevel = mkOption {
+        default = 0;
+        description = "
+          Nix daemon process priority. This priority propagates to build processes.
+          0 is the default Unix process priority, 20 is the lowest.
+        ";
+      };
+
+      daemonIONiceLevel = mkOption {
+        default = 0;
+        description = "
+          Nix daemon process I/O priority. This priority propagates to build processes.
+          0 is the default Unix process I/O priority, 7 is the lowest.
+        ";
+      };
+
+      buildMachines = mkOption {
+        example = [
+          { hostName = "voila.labs.cs.uu.nl";
+            sshUser = "nix";
+            sshKey = "/root/.ssh/id_buildfarm";
+            system = "powerpc-darwin";
+            maxJobs = 1;
+          }
+          { hostName = "linux64.example.org";
+            sshUser = "buildfarm";
+            sshKey = "/root/.ssh/id_buildfarm";
+            system = "x86_64-linux";
+            maxJobs = 2;
+            supportedFeatures = "kvm";
+            mandatoryFeatures = "perf";
+          }
+        ];
+        description = "
+          This option lists the machines to be used if distributed
+          builds are enabled (see
+          <option>nix.distributedBuilds</option>).  Nix will perform
+          derivations on those machines via SSH by copying the inputs
+          to the Nix store on the remote machine, starting the build,
+          then copying the output back to the local Nix store.  Each
+          element of the list should be an attribute set containing
+          the machine's host name (<varname>hostname</varname>), the
+          user name to be used for the SSH connection
+          (<varname>sshUser</varname>), the Nix system type
+          (<varname>system</varname>, e.g.,
+          <literal>\"i686-linux\"</literal>), the maximum number of
+          jobs to be run in parallel on that machine
+          (<varname>maxJobs</varname>), the path to the SSH private
+          key to be used to connect (<varname>sshKey</varname>), a
+          list of supported features of the machine
+          (<varname>supportedFeatures</varname>) and a list of
+          mandatory features of the machine
+          (<varname>mandatoryFeatures</varname>). The SSH private key
+          should not have a passphrase, and the corresponding public
+          key should be added to
+          <filename>~<replaceable>sshUser</replaceable>/authorized_keys</filename>
+          on the remote machine.
+        ";
+      };
+
+      proxy = mkOption {
+        default = "";
+        description = "
+          This option specifies the proxy to use for fetchurl. The real effect
+          is just exporting http_proxy, https_proxy and ftp_proxy with that
+          value.
+        ";
+        example = "http://127.0.0.1:3128";
+      };
+
+      # Environment variables for running Nix.
+      envVars = mkOption {
+        internal = true;
+        default = {};
+        type = types.attrs;
+        description = "Environment variables used by Nix.";
+      };
+
+      nrBuildUsers = mkOption {
+        default = 10;
+        description = ''
+          Number of <literal>nixbld</literal> user accounts created to
+          perform secure concurrent builds.  If you receive an error
+          message saying that “all build users are currently in use”,
+          you should increase this value.
+        '';
+      };
+
+      readOnlyStore = mkOption {
+        default = true;
+        description = ''
+          If set, NixOS will enforce the immutability of the Nix store
+          by making <filename>/nix/store</filename> a read-only bind
+          mount.  Nix will automatically make the store writable when
+          needed.
+        '';
+      };
+
+      binaryCaches = mkOption {
+        default = [ http://cache.nixos.org/ ];
+        type = types.listOf types.string;
+        description = ''
+          List of binary cache URLs used to obtain pre-built binaries
+          of Nix packages.
+        '';
+      };
+
+      trustedBinaryCaches = mkOption {
+        default = [ ];
+        example = [ http://hydra.nixos.org/ ];
+        type = types.listOf types.string;
+        description = ''
+          List of binary cache URLs that non-root users can use (in
+          addition to those specified using
+          <option>nix.binaryCaches</option> by passing
+          <literal>--option binary-caches</literal> to Nix commands.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    nix.chrootDirs = [ "/dev" "/dev/pts" "/proc" "/bin" ];
+
+    environment.etc."nix/nix.conf".source = nixConf;
+
+    # List of machines for distributed Nix builds in the format
+    # expected by build-remote.pl.
+    environment.etc."nix.machines" =
+      { enable = cfg.distributedBuilds && !cfg.manualNixMachines;
+        text =
+          concatMapStrings (machine:
+            "${machine.sshUser}@${machine.hostName} "
+            + (if machine ? system then machine.system else concatStringsSep "," machine.systems)
+            + " ${machine.sshKey} ${toString machine.maxJobs} "
+            + (if machine ? speedFactor then toString machine.speedFactor else "1" )
+            + " "
+            + (if machine ? supportedFeatures then concatStringsSep "," machine.supportedFeatures else "" )
+            + " "
+            + (if machine ? mandatoryFeatures then concatStringsSep "," machine.mandatoryFeatures else "" )
+            + "\n"
+          ) cfg.buildMachines;
+      };
+
+    systemd.sockets."nix-daemon" =
+      { description = "Nix Daemon Socket";
+        wantedBy = [ "sockets.target" ];
+        before = [ "multi-user.target" ];
+        socketConfig.ListenStream = "/nix/var/nix/daemon-socket/socket";
+      };
+
+    systemd.services."nix-daemon" =
+      { description = "Nix Daemon";
+
+        path = [ nix pkgs.openssl pkgs.utillinux ]
+          ++ optionals cfg.distributedBuilds [ pkgs.openssh pkgs.gzip ];
+
+        environment = cfg.envVars // { CURL_CA_BUNDLE = "/etc/ssl/certs/ca-bundle.crt"; };
+
+        serviceConfig =
+          { ExecStart = "@${nix}/bin/nix-daemon nix-daemon --daemon";
+            KillMode = "process";
+            Nice = cfg.daemonNiceLevel;
+            IOSchedulingPriority = cfg.daemonIONiceLevel;
+            LimitNOFILE = 4096;
+          };
+
+        restartTriggers = [ nixConf ];
+      };
+
+    nix.envVars =
+      { NIX_CONF_DIR = "/etc/nix";
+
+        # Enable the copy-from-other-stores substituter, which allows builds
+        # to be sped up by copying build results from remote Nix stores.  To
+        # do this, mount the remote file system on a subdirectory of
+        # /var/run/nix/remote-stores.
+        NIX_OTHER_STORES = "/var/run/nix/remote-stores/*/nix";
+      }
+
+      // optionalAttrs cfg.distributedBuilds {
+        NIX_BUILD_HOOK = "${config.environment.nix}/libexec/nix/build-remote.pl";
+        NIX_REMOTE_SYSTEMS = "/etc/nix.machines";
+        NIX_CURRENT_LOAD = "/var/run/nix/current-load";
+      }
+
+      # !!! These should not be defined here, but in some general proxy configuration module!
+      // optionalAttrs (cfg.proxy != "") {
+        http_proxy = cfg.proxy;
+        https_proxy = cfg.proxy;
+        ftp_proxy = cfg.proxy;
+      };
+
+    # Set up the environment variables for running Nix.
+    environment.variables = cfg.envVars;
+
+    environment.extraInit =
+      ''
+        # Set up secure multi-user builds: non-root users build through the
+        # Nix daemon.
+        if test "$USER" != root; then
+            export NIX_REMOTE=daemon
+        else
+            export NIX_REMOTE=
+        fi
+      '';
+
+    users.extraUsers = map makeNixBuildUser (range 1 cfg.nrBuildUsers);
+
+    system.activationScripts.nix = stringAfter [ "etc" "users" ]
+      ''
+        # Nix initialisation.
+        mkdir -m 0755 -p \
+          /nix/var/nix/gcroots \
+          /nix/var/nix/temproots \
+          /nix/var/nix/manifests \
+          /nix/var/nix/userpool \
+          /nix/var/nix/profiles \
+          /nix/var/nix/db \
+          /nix/var/log/nix/drvs \
+          /nix/var/nix/channel-cache \
+          /nix/var/nix/chroots
+        mkdir -m 1777 -p \
+          /nix/var/nix/gcroots/per-user \
+          /nix/var/nix/profiles/per-user \
+          /nix/var/nix/gcroots/tmp
+
+        ln -sf /nix/var/nix/profiles /nix/var/nix/gcroots/
+        ln -sf /nix/var/nix/manifests /nix/var/nix/gcroots/
+      '';
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/nix-gc.nix b/nixos/modules/services/misc/nix-gc.nix
new file mode 100644
index 00000000000..dfdc4db65d5
--- /dev/null
+++ b/nixos/modules/services/misc/nix-gc.nix
@@ -0,0 +1,61 @@
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+  cfg = config.nix.gc;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    nix.gc = {
+
+      automatic = mkOption {
+        default = false;
+        type = types.bool;
+        description = "Automatically run the garbage collector at a specific time.";
+      };
+
+      dates = mkOption {
+        default = "03:15";
+        type = types.uniq types.string;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>5</manvolnum></citerefentry>) of the time at
+          which the garbage collector will run.
+        '';
+      };
+
+      options = mkOption {
+        default = "";
+        example = "--max-freed $((64 * 1024**3))";
+        type = types.uniq types.string;
+        description = ''
+          Options given to <filename>nix-collect-garbage</filename> when the
+          garbage collector is run automatically.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    systemd.services.nix-gc =
+      { description = "Nix Garbage Collector";
+        serviceConfig.ExecStart = "${config.environment.nix}/bin/nix-collect-garbage ${cfg.options}";
+        startAt = optionalString cfg.automatic cfg.dates;
+      };
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/nixos-manual.nix b/nixos/modules/services/misc/nixos-manual.nix
new file mode 100644
index 00000000000..38f1917a46a
--- /dev/null
+++ b/nixos/modules/services/misc/nixos-manual.nix
@@ -0,0 +1,116 @@
+# This module includes the NixOS man-pages in the system environment,
+# and optionally starts a browser that shows the NixOS manual on one
+# of the virtual consoles.  The latter is useful for the installation
+# CD.
+
+{ config, pkgs, options, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.services.nixosManual;
+
+  manual = import ../../../doc/manual {
+    inherit (cfg) revision;
+    inherit pkgs options;
+  };
+
+  entry = "${manual.manual}/share/doc/nixos/manual.html";
+
+  help = pkgs.writeScriptBin "nixos-help"
+    ''
+      #! ${pkgs.stdenv.shell} -e
+      browser="$BROWSER"
+      if [ -z "$browser" ]; then
+        browser="$(type -P xdg-open || true)"
+        if [ -z "$browser" ]; then
+          browser="$(type -P w3m || true)"
+          if [ -z "$browser" ]; then
+            echo "$0: unable to start a web browser; please set \$BROWSER"
+            exit 1
+          fi
+        fi
+      fi
+      exec "$browser" ${entry}
+    '';
+
+in
+
+{
+
+  options = {
+
+    services.nixosManual.enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = ''
+        Whether to build the NixOS manual pages.
+      '';
+    };
+
+    services.nixosManual.showManual = mkOption {
+      default = false;
+      description = ''
+        Whether to show the NixOS manual on one of the virtual
+        consoles.
+      '';
+    };
+
+    services.nixosManual.ttyNumber = mkOption {
+      default = "8";
+      description = ''
+        Virtual console on which to show the manual.
+      '';
+    };
+
+    services.nixosManual.browser = mkOption {
+      default = "${pkgs.w3m}/bin/w3m";
+      description = ''
+        Browser used to show the manual.
+      '';
+    };
+
+    services.nixosManual.revision = mkOption {
+      default = "local";
+      type = types.uniq types.string;
+      description = ''
+        Revision of the targeted source file.  This value can either be
+        <literal>"local"</literal>, <literal>"HEAD"</literal> or any
+        revision number embedded in a string.
+      '';
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    system.build.manual = manual;
+
+    environment.systemPackages = [ manual.manpages help ];
+
+    boot.extraTTYs = mkIf cfg.showManual ["tty${cfg.ttyNumber}"];
+
+    systemd.services = optionalAttrs cfg.showManual
+      { "nixos-manual" =
+        { description = "NixOS Manual";
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig =
+            { ExecStart = "${cfg.browser} ${entry}";
+              StandardInput = "tty";
+              StandardOutput = "tty";
+              TTYPath = "/dev/tty${cfg.ttyNumber}";
+              TTYReset = true;
+              TTYVTDisallocate = true;
+              Restart = "always";
+            };
+        };
+      };
+
+    services.mingetty.helpLine = mkIf cfg.showManual
+      "\nPress <Alt-F${toString cfg.ttyNumber}> for the NixOS manual.";
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/rogue.nix b/nixos/modules/services/misc/rogue.nix
new file mode 100644
index 00000000000..94fa8850750
--- /dev/null
+++ b/nixos/modules/services/misc/rogue.nix
@@ -0,0 +1,59 @@
+# Execute the game `rogue' on tty 9.  Mostly used by the NixOS
+# installation CD.
+
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.services.rogue;
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.rogue.enable = mkOption {
+      default = false;
+      description = ''
+        Whether to enable the Rogue game on one of the virtual
+        consoles.
+      '';
+    };
+
+    services.rogue.tty = mkOption {
+      default = "tty9";
+      description = ''
+        Virtual console on which to run Rogue.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    boot.extraTTYs = [ cfg.tty ];
+
+    systemd.services.rogue =
+      { description = "Rogue dungeon crawling game";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig =
+          { ExecStart = "${pkgs.rogue}/bin/rogue";
+            StandardInput = "tty";
+            StandardOutput = "tty";
+            TTYPath = "/dev/${cfg.tty}";
+            TTYReset = true;
+            TTYVTDisallocate = true;
+            Restart = "always";
+          };
+      };
+
+  };
+
+}
diff --git a/nixos/modules/services/misc/svnserve.nix b/nixos/modules/services/misc/svnserve.nix
new file mode 100644
index 00000000000..b0806d14738
--- /dev/null
+++ b/nixos/modules/services/misc/svnserve.nix
@@ -0,0 +1,46 @@
+# SVN server
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfg = config.services.svnserve;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.svnserve = {
+
+      enable = mkOption {
+        default = false;
+        description = "Whether to enable svnserve to serve Subversion repositories through the SVN protocol.";
+      };
+
+      svnBaseDir = mkOption {
+        default = "/repos";
+	description = "Base directory from which Subversion repositories are accessed.";
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    jobs.svnserve = {
+      startOn = "started network-interfaces";
+      stopOn = "stopping network-interfaces";
+
+      preStart = "mkdir -p ${cfg.svnBaseDir}";
+
+      exec = "${pkgs.subversion}/bin/svnserve -r ${cfg.svnBaseDir} -d --foreground --pid-file=/var/run/svnserve.pid";
+    };
+  };
+}
diff --git a/nixos/modules/services/misc/synergy.nix b/nixos/modules/services/misc/synergy.nix
new file mode 100644
index 00000000000..91c0acb0bc2
--- /dev/null
+++ b/nixos/modules/services/misc/synergy.nix
@@ -0,0 +1,131 @@
+{ config, pkgs, ... }:
+
+with pkgs.lib;
+
+let
+
+  cfgC = config.services.synergy.client;
+  cfgS = config.services.synergy.server;
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.synergy = {
+
+      # !!! All these option descriptions needs to be cleaned up.
+
+      client = {
+        enable = mkOption {
+          default = false;
+          description = "
+            Whether to enable the synergy client (receive keyboard and mouse events from a synergy server)
+          ";
+        };
+        screenName = mkOption {
+          default = "";
+          description = "
+            use screen-name instead the hostname to identify
+            ourselves to the server.
+            ";
+        };
+        serverAddress = mkOption {
+          description = "
+            The server address is of the form: [hostname][:port].  The
+            hostname must be the address or hostname of the server.  The
+            port overrides the default port, 24800.
+          ";
+        };
+        autoStart = mkOption {
+          default = true;
+          type = types.bool;
+          description = "Whether synergy-client should be started automatically.";
+        };
+      };
+
+      server = {
+        enable = mkOption {
+          default = false;
+          description = "
+            Whether to enable the synergy server (send keyboard and mouse events)
+          ";
+        };
+        configFile = mkOption {
+          default = "/etc/synergy-server.conf";
+          description = "
+            The synergy server configuration file. open upstart-jobs/synergy.nix to see an example
+          ";
+        };
+        screenName = mkOption {
+          default = "";
+          description = "
+            use screen-name instead the hostname to identify
+            this screen in the configuration.
+            ";
+        };
+        address = mkOption {
+          default = "";
+          description = "listen for clients on the given address";
+        };
+        autoStart = mkOption {
+          default = true;
+          type = types.bool;
+          description = "Whether synergy-server should be started automatically.";
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    systemd.services."synergy-client" = mkIf cfgC.enable {
+      after = [ "network.target" ];
+      description = "Synergy client";
+      wantedBy = optional cfgC.autoStart "multi-user.target";
+      path = [ pkgs.synergy ];
+      serviceConfig.ExecStart = ''${pkgs.synergy}/bin/synergyc -f ${optionalString (cfgC.screenName != "") "-n ${cfgC.screenName}"} ${cfgC.serverAddress}'';
+    };
+
+    systemd.services."synergy-server" = mkIf cfgS.enable {
+      after = [ "network.target" ];
+      description = "Synergy server";
+      wantedBy = optional cfgS.autoStart "multi-user.target";
+      path = [ pkgs.synergy ];
+      serviceConfig.ExecStart = ''${pkgs.synergy}/bin/synergys -c ${cfgS.configFile} -f ${optionalString (cfgS.address != "") "-a ${cfgS.address}"} ${optionalString (cfgS.screenName != "") "-n ${cfgS.screenName}" }'';
+    };
+
+  };
+
+}
+
+/* SYNERGY SERVER example configuration file
+section: screens
+  laptop:
+  dm:
+  win:
+end
+section: aliases
+    laptop:
+      192.168.5.5
+    dm:
+      192.168.5.78
+    win:
+      192.168.5.54
+end
+section: links
+   laptop:
+       left = dm
+   dm:
+       right = laptop
+       left = win
+  win:
+      right = dm
+end
+*/