summary refs log tree commit diff
path: root/nixos/modules/hardware/video
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/modules/hardware/video')
-rw-r--r--nixos/modules/hardware/video/amdgpu-pro.nix70
-rw-r--r--nixos/modules/hardware/video/bumblebee.nix93
-rw-r--r--nixos/modules/hardware/video/capture/mwprocapture.nix56
-rw-r--r--nixos/modules/hardware/video/displaylink.nix76
-rw-r--r--nixos/modules/hardware/video/hidpi.nix16
-rw-r--r--nixos/modules/hardware/video/nvidia.nix391
-rw-r--r--nixos/modules/hardware/video/radeon.nix3
-rw-r--r--nixos/modules/hardware/video/switcheroo-control.nix18
-rw-r--r--nixos/modules/hardware/video/uvcvideo/default.nix64
-rw-r--r--nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix45
-rw-r--r--nixos/modules/hardware/video/webcam/facetimehd.nix44
11 files changed, 876 insertions, 0 deletions
diff --git a/nixos/modules/hardware/video/amdgpu-pro.nix b/nixos/modules/hardware/video/amdgpu-pro.nix
new file mode 100644
index 00000000000..d784befc9b8
--- /dev/null
+++ b/nixos/modules/hardware/video/amdgpu-pro.nix
@@ -0,0 +1,70 @@
+# This module provides the proprietary AMDGPU-PRO drivers.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  drivers = config.services.xserver.videoDrivers;
+
+  enabled = elem "amdgpu-pro" drivers;
+
+  package = config.boot.kernelPackages.amdgpu-pro;
+  package32 = pkgs.pkgsi686Linux.linuxPackages.amdgpu-pro.override { kernel = null; };
+
+  opengl = config.hardware.opengl;
+
+in
+
+{
+
+  config = mkIf enabled {
+
+    nixpkgs.config.xorg.abiCompat = "1.20";
+
+    services.xserver.drivers = singleton
+      { name = "amdgpu"; modules = [ package ]; display = true; };
+
+    hardware.opengl.package = package;
+    hardware.opengl.package32 = package32;
+    hardware.opengl.setLdLibraryPath = true;
+
+    boot.extraModulePackages = [ package.kmod ];
+
+    boot.kernelPackages = pkgs.linuxKernel.packagesFor
+      (pkgs.linuxKernel.kernels.linux_5_10.override {
+        structuredExtraConfig = {
+          DEVICE_PRIVATE = kernel.yes;
+          KALLSYMS_ALL = kernel.yes;
+        };
+      });
+
+    hardware.firmware = [ package.fw ];
+
+    system.activationScripts.setup-amdgpu-pro = ''
+      ln -sfn ${package}/opt/amdgpu{,-pro} /run
+    '';
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "DEVICE_PRIVATE")
+      (isYes "KALLSYMS_ALL")
+    ];
+
+    boot.initrd.extraUdevRulesCommands = ''
+      cp -v ${package}/etc/udev/rules.d/*.rules $out/
+    '';
+
+    environment.systemPackages =
+      [ package.vulkan ] ++
+      # this isn't really DRI, but we'll reuse this option for now
+      optional config.hardware.opengl.driSupport32Bit package32.vulkan;
+
+    environment.etc = {
+      "modprobe.d/blacklist-radeon.conf".source = package + "/etc/modprobe.d/blacklist-radeon.conf";
+      amd.source = package + "/etc/amd";
+    };
+
+  };
+
+}
diff --git a/nixos/modules/hardware/video/bumblebee.nix b/nixos/modules/hardware/video/bumblebee.nix
new file mode 100644
index 00000000000..b6af4f80445
--- /dev/null
+++ b/nixos/modules/hardware/video/bumblebee.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.bumblebee;
+
+  kernel = config.boot.kernelPackages;
+
+  useNvidia = cfg.driver == "nvidia";
+
+  bumblebee = pkgs.bumblebee.override {
+    inherit useNvidia;
+    useDisplayDevice = cfg.connectDisplay;
+  };
+
+  useBbswitch = cfg.pmMethod == "bbswitch" || cfg.pmMethod == "auto" && useNvidia;
+
+  primus = pkgs.primus.override {
+    inherit useNvidia;
+  };
+
+in
+
+{
+
+  options = {
+    hardware.bumblebee = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Enable the bumblebee daemon to manage Optimus hybrid video cards.
+          This should power off secondary GPU until its use is requested
+          by running an application with optirun.
+        '';
+      };
+
+      group = mkOption {
+        default = "wheel";
+        example = "video";
+        type = types.str;
+        description = "Group for bumblebee socket";
+      };
+
+      connectDisplay = mkOption {
+        default = false;
+        type = types.bool;
+        description = ''
+          Set to true if you intend to connect your discrete card to a
+          monitor. This option will set up your Nvidia card for EDID
+          discovery and to turn on the monitor signal.
+
+          Only nvidia driver is supported so far.
+        '';
+      };
+
+      driver = mkOption {
+        default = "nvidia";
+        type = types.enum [ "nvidia" "nouveau" ];
+        description = ''
+          Set driver used by bumblebeed. Supported are nouveau and nvidia.
+        '';
+      };
+
+      pmMethod = mkOption {
+        default = "auto";
+        type = types.enum [ "auto" "bbswitch" "switcheroo" "none" ];
+        description = ''
+          Set preferred power management method for unused card.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.blacklistedKernelModules = [ "nvidia-drm" "nvidia" "nouveau" ];
+    boot.kernelModules = optional useBbswitch "bbswitch";
+    boot.extraModulePackages = optional useBbswitch kernel.bbswitch ++ optional useNvidia kernel.nvidia_x11.bin;
+
+    environment.systemPackages = [ bumblebee primus ];
+
+    systemd.services.bumblebeed = {
+      description = "Bumblebee Hybrid Graphics Switcher";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "display-manager.service" ];
+      serviceConfig = {
+        ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${cfg.group} --driver ${cfg.driver} --pm-method ${cfg.pmMethod}";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/hardware/video/capture/mwprocapture.nix b/nixos/modules/hardware/video/capture/mwprocapture.nix
new file mode 100644
index 00000000000..76cb4c6ee9b
--- /dev/null
+++ b/nixos/modules/hardware/video/capture/mwprocapture.nix
@@ -0,0 +1,56 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.mwProCapture;
+
+  kernelPackages = config.boot.kernelPackages;
+
+in
+
+{
+
+  options.hardware.mwProCapture.enable = mkEnableOption "Magewell Pro Capture family kernel module";
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "ProCapture" ];
+
+    environment.systemPackages = [ kernelPackages.mwprocapture ];
+
+    boot.extraModulePackages = [ kernelPackages.mwprocapture ];
+
+    boot.extraModprobeConfig = ''
+      # Set the png picture to be displayed when no input signal is detected.
+      options ProCapture nosignal_file=${kernelPackages.mwprocapture}/res/NoSignal.png
+
+      # Set the png picture to be displayed when an unsupported input signal is detected.
+      options ProCapture unsupported_file=${kernelPackages.mwprocapture}/res/Unsupported.png
+
+      # Set the png picture to be displayed when an loking input signal is detected.
+      options ProCapture locking_file=${kernelPackages.mwprocapture}/res/Locking.png
+
+      # Message signaled interrupts switch
+      #options ProCapture disable_msi=0
+
+      # Set the debug level
+      #options ProCapture debug_level=0
+
+      # Force init switch eeprom
+      #options ProCapture init_switch_eeprom=0
+
+      # Min frame interval for VIDIOC_ENUM_FRAMEINTERVALS (default: 166666(100ns))
+      #options ProCapture enum_frameinterval_min=166666
+
+      # VIDIOC_ENUM_FRAMESIZES type (1: DISCRETE; 2: STEPWISE; otherwise: CONTINUOUS )
+      #options ProCapture enum_framesizes_type=0
+
+      # Parameters for internal usage
+      #options ProCapture internal_params=""
+    '';
+
+  };
+
+}
diff --git a/nixos/modules/hardware/video/displaylink.nix b/nixos/modules/hardware/video/displaylink.nix
new file mode 100644
index 00000000000..912f53da836
--- /dev/null
+++ b/nixos/modules/hardware/video/displaylink.nix
@@ -0,0 +1,76 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  enabled = elem "displaylink" config.services.xserver.videoDrivers;
+
+  evdi = config.boot.kernelPackages.evdi;
+
+  displaylink = pkgs.displaylink.override {
+    inherit evdi;
+  };
+
+in
+
+{
+
+  config = mkIf enabled {
+
+    boot.extraModulePackages = [ evdi ];
+    boot.kernelModules = [ "evdi" ];
+
+    environment.etc."X11/xorg.conf.d/40-displaylink.conf".text = ''
+      Section "OutputClass"
+        Identifier  "DisplayLink"
+        MatchDriver "evdi"
+        Driver      "modesetting"
+        Option      "AccelMethod" "none"
+      EndSection
+    '';
+
+    # make the device available
+    services.xserver.displayManager.sessionCommands = ''
+      ${lib.getBin pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource 1 0
+    '';
+
+    # Those are taken from displaylink-installer.sh and from Arch Linux AUR package.
+
+    services.udev.packages = [ displaylink ];
+
+    powerManagement.powerDownCommands = ''
+      #flush any bytes in pipe
+      while read -n 1 -t 1 SUSPEND_RESULT < /tmp/PmMessagesPort_out; do : ; done;
+
+      #suspend DisplayLinkManager
+      echo "S" > /tmp/PmMessagesPort_in
+
+      #wait until suspend of DisplayLinkManager finish
+      if [ -f /tmp/PmMessagesPort_out ]; then
+        #wait until suspend of DisplayLinkManager finish
+        read -n 1 -t 10 SUSPEND_RESULT < /tmp/PmMessagesPort_out
+      fi
+    '';
+
+    powerManagement.resumeCommands = ''
+      #resume DisplayLinkManager
+      echo "R" > /tmp/PmMessagesPort_in
+    '';
+
+    systemd.services.dlm = {
+      description = "DisplayLink Manager Service";
+      after = [ "display-manager.service" ];
+      conflicts = [ "getty@tty7.service" ];
+
+      serviceConfig = {
+        ExecStart = "${displaylink}/bin/DisplayLinkManager";
+        Restart = "always";
+        RestartSec = 5;
+        LogsDirectory = "displaylink";
+      };
+    };
+
+  };
+
+}
diff --git a/nixos/modules/hardware/video/hidpi.nix b/nixos/modules/hardware/video/hidpi.nix
new file mode 100644
index 00000000000..ac72b652504
--- /dev/null
+++ b/nixos/modules/hardware/video/hidpi.nix
@@ -0,0 +1,16 @@
+{ lib, pkgs, config, ...}:
+with lib;
+
+{
+  options.hardware.video.hidpi.enable = mkEnableOption "Font/DPI configuration optimized for HiDPI displays";
+
+  config = mkIf config.hardware.video.hidpi.enable {
+    console.font = lib.mkDefault "${pkgs.terminus_font}/share/consolefonts/ter-v32n.psf.gz";
+
+    # Needed when typing in passwords for full disk encryption
+    console.earlySetup = mkDefault true;
+    boot.loader.systemd-boot.consoleMode = mkDefault "1";
+
+    # TODO Find reasonable defaults X11 & wayland
+  };
+}
diff --git a/nixos/modules/hardware/video/nvidia.nix b/nixos/modules/hardware/video/nvidia.nix
new file mode 100644
index 00000000000..a81220a92a1
--- /dev/null
+++ b/nixos/modules/hardware/video/nvidia.nix
@@ -0,0 +1,391 @@
+# This module provides the proprietary NVIDIA X11 / OpenGL drivers.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  nvidia_x11 = let
+    drivers = config.services.xserver.videoDrivers;
+    isDeprecated = str: (hasPrefix "nvidia" str) && (str != "nvidia");
+    hasDeprecated = drivers: any isDeprecated drivers;
+  in if (hasDeprecated drivers) then
+    throw ''
+      Selecting an nvidia driver has been modified for NixOS 19.03. The version is now set using `hardware.nvidia.package`.
+    ''
+  else if (elem "nvidia" drivers) then cfg.package else null;
+
+  enabled = nvidia_x11 != null;
+  cfg = config.hardware.nvidia;
+
+  pCfg = cfg.prime;
+  syncCfg = pCfg.sync;
+  offloadCfg = pCfg.offload;
+  primeEnabled = syncCfg.enable || offloadCfg.enable;
+  nvidiaPersistencedEnabled =  cfg.nvidiaPersistenced;
+  nvidiaSettings = cfg.nvidiaSettings;
+in
+
+{
+  imports =
+    [
+      (mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "enable" ] [ "hardware" "nvidia" "prime" "sync" "enable" ])
+      (mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "allowExternalGpu" ] [ "hardware" "nvidia" "prime" "sync" "allowExternalGpu" ])
+      (mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "nvidiaBusId" ] [ "hardware" "nvidia" "prime" "nvidiaBusId" ])
+      (mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "intelBusId" ] [ "hardware" "nvidia" "prime" "intelBusId" ])
+    ];
+
+  options = {
+    hardware.nvidia.powerManagement.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Experimental power management through systemd. For more information, see
+        the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
+      '';
+    };
+
+    hardware.nvidia.powerManagement.finegrained = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Experimental power management of PRIME offload. For more information, see
+        the NVIDIA docs, chapter 22. PCI-Express runtime power management.
+      '';
+    };
+
+    hardware.nvidia.modesetting.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable kernel modesetting when using the NVIDIA proprietary driver.
+
+        Enabling this fixes screen tearing when using Optimus via PRIME (see
+        <option>hardware.nvidia.prime.sync.enable</option>. This is not enabled
+        by default because it is not officially supported by NVIDIA and would not
+        work with SLI.
+      '';
+    };
+
+    hardware.nvidia.prime.nvidiaBusId = mkOption {
+      type = types.str;
+      default = "";
+      example = "PCI:1:0:0";
+      description = ''
+        Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
+        shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
+      '';
+    };
+
+    hardware.nvidia.prime.intelBusId = mkOption {
+      type = types.str;
+      default = "";
+      example = "PCI:0:2:0";
+      description = ''
+        Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
+        shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
+      '';
+    };
+
+    hardware.nvidia.prime.amdgpuBusId = mkOption {
+      type = types.str;
+      default = "";
+      example = "PCI:4:0:0";
+      description = ''
+        Bus ID of the AMD APU. You can find it using lspci; for example if lspci
+        shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
+      '';
+    };
+
+    hardware.nvidia.prime.sync.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
+        If enabled, the NVIDIA GPU will be always on and used for all rendering,
+        while enabling output to displays attached only to the integrated Intel GPU
+        without a multiplexer.
+
+        Note that this option only has any effect if the "nvidia" driver is specified
+        in <option>services.xserver.videoDrivers</option>, and it should preferably
+        be the only driver there.
+
+        If this is enabled, then the bus IDs of the NVIDIA and Intel GPUs have to be
+        specified (<option>hardware.nvidia.prime.nvidiaBusId</option> and
+        <option>hardware.nvidia.prime.intelBusId</option>).
+
+        If you enable this, you may want to also enable kernel modesetting for the
+        NVIDIA driver (<option>hardware.nvidia.modesetting.enable</option>) in order
+        to prevent tearing.
+
+        Note that this configuration will only be successful when a display manager
+        for which the <option>services.xserver.displayManager.setupCommands</option>
+        option is supported is used.
+      '';
+    };
+
+    hardware.nvidia.prime.sync.allowExternalGpu = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Configure X to allow external NVIDIA GPUs when using optimus.
+      '';
+    };
+
+    hardware.nvidia.prime.offload.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable render offload support using the NVIDIA proprietary driver via PRIME.
+
+        If this is enabled, then the bus IDs of the NVIDIA and Intel GPUs have to be
+        specified (<option>hardware.nvidia.prime.nvidiaBusId</option> and
+        <option>hardware.nvidia.prime.intelBusId</option>).
+      '';
+    };
+
+    hardware.nvidia.nvidiaSettings = mkOption {
+      default = true;
+      type = types.bool;
+      description = ''
+        Whether to add nvidia-settings, NVIDIA's GUI configuration tool, to
+        systemPackages.
+      '';
+    };
+
+    hardware.nvidia.nvidiaPersistenced = mkOption {
+      default = false;
+      type = types.bool;
+      description = ''
+        Update for NVIDA GPU headless mode, i.e. nvidia-persistenced. It ensures all
+        GPUs stay awake even during headless mode.
+      '';
+    };
+
+    hardware.nvidia.package = lib.mkOption {
+      type = lib.types.package;
+      default = config.boot.kernelPackages.nvidiaPackages.stable;
+      defaultText = literalExpression "config.boot.kernelPackages.nvidiaPackages.stable";
+      description = ''
+        The NVIDIA X11 derivation to use.
+      '';
+      example = literalExpression "config.boot.kernelPackages.nvidiaPackages.legacy_340";
+    };
+  };
+
+  config = let
+      igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
+      igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
+  in mkIf enabled {
+    assertions = [
+      {
+        assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
+        message = ''
+          You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.
+        '';
+      }
+
+      {
+        assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
+        message = ''
+          When NVIDIA PRIME is enabled, the GPU bus IDs must configured.
+        '';
+      }
+
+      {
+        assertion = offloadCfg.enable -> versionAtLeast nvidia_x11.version "435.21";
+        message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
+      }
+
+      {
+        assertion = !(syncCfg.enable && offloadCfg.enable);
+        message = "Only one NVIDIA PRIME solution may be used at a time.";
+      }
+
+      {
+        assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
+        message = "Sync precludes powering down the NVIDIA GPU.";
+      }
+
+      {
+        assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
+        message = "Fine-grained power management requires offload to be enabled.";
+      }
+
+      {
+        assertion = cfg.powerManagement.enable -> (
+          builtins.pathExists (cfg.package.out + "/bin/nvidia-sleep.sh") &&
+          builtins.pathExists (cfg.package.out + "/lib/systemd/system-sleep/nvidia")
+        );
+        message = "Required files for driver based power management don't exist.";
+      }
+    ];
+
+    # If Optimus/PRIME is enabled, we:
+    # - Specify the configured NVIDIA GPU bus ID in the Device section for the
+    #   "nvidia" driver.
+    # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
+    #   "nvidia" driver, in order to allow the X server to start without any outputs.
+    # - Add a separate Device section for the Intel GPU, using the "modesetting"
+    #   driver and with the configured BusID.
+    # - OR add a separate Device section for the AMD APU, using the "amdgpu"
+    #   driver and with the configures BusID.
+    # - Reference that Device section from the ServerLayout section as an inactive
+    #   device.
+    # - Configure the display manager to run specific `xrandr` commands which will
+    #   configure/enable displays connected to the Intel iGPU / AMD APU.
+
+    services.xserver.useGlamor = mkDefault offloadCfg.enable;
+
+    services.xserver.drivers = let
+    in optional primeEnabled {
+      name = igpuDriver;
+      display = offloadCfg.enable;
+      modules = optional (igpuDriver == "amdgpu") [ pkgs.xorg.xf86videoamdgpu ];
+      deviceSection = ''
+        BusID "${igpuBusId}"
+        ${optionalString syncCfg.enable ''Option "AccelMethod" "none"''}
+      '';
+    } ++ singleton {
+      name = "nvidia";
+      modules = [ nvidia_x11.bin ];
+      display = !offloadCfg.enable;
+      deviceSection = optionalString primeEnabled
+        ''
+          BusID "${pCfg.nvidiaBusId}"
+          ${optionalString syncCfg.allowExternalGpu "Option \"AllowExternalGpus\""}
+          ${optionalString cfg.powerManagement.finegrained "Option \"NVreg_DynamicPowerManagement=0x02\""}
+        '';
+      screenSection =
+        ''
+          Option "RandRRotation" "on"
+          ${optionalString syncCfg.enable "Option \"AllowEmptyInitialConfiguration\""}
+        '';
+    };
+
+    services.xserver.serverLayoutSection = optionalString syncCfg.enable ''
+      Inactive "Device-${igpuDriver}[0]"
+    '' + optionalString offloadCfg.enable ''
+      Option "AllowNVIDIAGPUScreens"
+    '';
+
+    services.xserver.displayManager.setupCommands = optionalString syncCfg.enable ''
+      # Added by nvidia configuration module for Optimus/PRIME.
+      ${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource ${igpuDriver} NVIDIA-0
+      ${pkgs.xorg.xrandr}/bin/xrandr --auto
+    '';
+
+    environment.etc."nvidia/nvidia-application-profiles-rc" = mkIf nvidia_x11.useProfiles {
+      source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
+    };
+
+    # 'nvidia_x11' installs it's files to /run/opengl-driver/...
+    environment.etc."egl/egl_external_platform.d".source =
+      "/run/opengl-driver/share/egl/egl_external_platform.d/";
+
+    hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out;
+    hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_x11.lib32;
+    hardware.opengl.extraPackages = [
+      pkgs.nvidia-vaapi-driver
+    ] ++ optional offloadCfg.enable nvidia_x11.out;
+    hardware.opengl.extraPackages32 = [
+      pkgs.pkgsi686Linux.nvidia-vaapi-driver
+    ] ++ optional offloadCfg.enable nvidia_x11.lib32;
+
+    environment.systemPackages = [ nvidia_x11.bin ]
+      ++ optionals cfg.nvidiaSettings [ nvidia_x11.settings ]
+      ++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ];
+
+    systemd.packages = optional cfg.powerManagement.enable nvidia_x11.out;
+
+    systemd.services = let
+      baseNvidiaService = state: {
+        description = "NVIDIA system ${state} actions";
+
+        path = with pkgs; [ kbd ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
+        };
+      };
+
+      nvidiaService = sleepState: (baseNvidiaService sleepState) // {
+        before = [ "systemd-${sleepState}.service" ];
+        requiredBy = [ "systemd-${sleepState}.service" ];
+      };
+
+      services = (builtins.listToAttrs (map (t: nameValuePair "nvidia-${t}" (nvidiaService t)) ["hibernate" "suspend"]))
+        // {
+          nvidia-resume = (baseNvidiaService "resume") // {
+            after = [ "systemd-suspend.service" "systemd-hibernate.service" ];
+            requiredBy = [ "systemd-suspend.service" "systemd-hibernate.service" ];
+          };
+        };
+    in optionalAttrs cfg.powerManagement.enable services
+      // optionalAttrs nvidiaPersistencedEnabled {
+        "nvidia-persistenced" = mkIf nvidiaPersistencedEnabled {
+          description = "NVIDIA Persistence Daemon";
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            Type = "forking";
+            Restart = "always";
+            PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+            ExecStart = "${nvidia_x11.persistenced}/bin/nvidia-persistenced --verbose";
+            ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+          };
+        };
+      };
+
+    systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
+        "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
+      ++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+        "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+    boot.extraModulePackages = [ nvidia_x11.bin ];
+
+    # nvidia-uvm is required by CUDA applications.
+    boot.kernelModules = [ "nvidia-uvm" ] ++
+      optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
+
+    # If requested enable modesetting via kernel parameter.
+    boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+      ++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1";
+
+    services.udev.extraRules =
+      ''
+        # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
+        KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 255'"
+        KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) 254'"
+        KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $$(grep nvidia-frontend /proc/devices | cut -d \  -f 1) %n'"
+        KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+        KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+      '' + optionalString cfg.powerManagement.finegrained ''
+        # Remove NVIDIA USB xHCI Host Controller devices, if present
+        ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
+
+        # Remove NVIDIA USB Type-C UCSI devices, if present
+        ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
+
+        # Remove NVIDIA Audio devices, if present
+        ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
+
+        # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
+        ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
+        ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
+
+        # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
+        ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
+        ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
+      '';
+
+    boot.extraModprobeConfig = mkIf cfg.powerManagement.finegrained ''
+      options nvidia "NVreg_DynamicPowerManagement=0x02"
+    '';
+
+    boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ];
+
+    services.acpid.enable = true;
+
+  };
+
+}
diff --git a/nixos/modules/hardware/video/radeon.nix b/nixos/modules/hardware/video/radeon.nix
new file mode 100644
index 00000000000..c92b7a0509d
--- /dev/null
+++ b/nixos/modules/hardware/video/radeon.nix
@@ -0,0 +1,3 @@
+{
+  hardware.enableRedistributableFirmware = true;
+}
diff --git a/nixos/modules/hardware/video/switcheroo-control.nix b/nixos/modules/hardware/video/switcheroo-control.nix
new file mode 100644
index 00000000000..199adb2ad8f
--- /dev/null
+++ b/nixos/modules/hardware/video/switcheroo-control.nix
@@ -0,0 +1,18 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  pkg = [ pkgs.switcheroo-control ];
+  cfg = config.services.switcherooControl;
+in {
+  options.services.switcherooControl = {
+    enable = mkEnableOption "switcheroo-control, a D-Bus service to check the availability of dual-GPU";
+  };
+
+  config = mkIf cfg.enable {
+    services.dbus.packages = pkg;
+    environment.systemPackages = pkg;
+    systemd.packages = pkg;
+    systemd.targets.multi-user.wants = [ "switcheroo-control.service" ];
+  };
+}
diff --git a/nixos/modules/hardware/video/uvcvideo/default.nix b/nixos/modules/hardware/video/uvcvideo/default.nix
new file mode 100644
index 00000000000..338062cf69b
--- /dev/null
+++ b/nixos/modules/hardware/video/uvcvideo/default.nix
@@ -0,0 +1,64 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.uvcvideo;
+
+  uvcdynctrl-udev-rules = packages: pkgs.callPackage ./uvcdynctrl-udev-rules.nix {
+    drivers = packages;
+    udevDebug = false;
+  };
+
+in
+
+{
+
+  options = {
+    services.uvcvideo.dynctrl = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to enable <command>uvcvideo</command> dynamic controls.
+
+          Note that enabling this brings the <command>uvcdynctrl</command> tool
+          into your environment and register all dynamic controls from
+          specified <command>packages</command> to the <command>uvcvideo</command> driver.
+        '';
+      };
+
+      packages = mkOption {
+        type = types.listOf types.path;
+        example = literalExpression "[ pkgs.tiscamera ]";
+        description = ''
+          List of packages containing <command>uvcvideo</command> dynamic controls
+          rules. All files found in
+          <filename><replaceable>pkg</replaceable>/share/uvcdynctrl/data</filename>
+          will be included.
+
+          Note that these will serve as input to the <command>libwebcam</command>
+          package which through its own <command>udev</command> rule will register
+          the dynamic controls from specified packages to the <command>uvcvideo</command>
+          driver.
+        '';
+        apply = map getBin;
+      };
+    };
+  };
+
+  config = mkIf cfg.dynctrl.enable {
+
+    services.udev.packages = [
+      (uvcdynctrl-udev-rules cfg.dynctrl.packages)
+    ];
+
+    environment.systemPackages = [
+      pkgs.libwebcam
+    ];
+
+  };
+}
diff --git a/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix b/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
new file mode 100644
index 00000000000..a808429c999
--- /dev/null
+++ b/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
@@ -0,0 +1,45 @@
+{ buildEnv
+, libwebcam
+, makeWrapper
+, runCommand
+, drivers ? []
+, udevDebug ? false
+}:
+
+let
+  version = "0.0.0";
+
+  dataPath = buildEnv {
+    name = "uvcdynctrl-with-drivers-data-path";
+    paths = drivers ++ [ libwebcam ];
+    pathsToLink = [ "/share/uvcdynctrl/data" ];
+    ignoreCollisions = false;
+  };
+
+  dataDir = "${dataPath}/share/uvcdynctrl/data";
+  udevDebugVarValue = if udevDebug then "1" else "0";
+in
+
+runCommand "uvcdynctrl-udev-rules-${version}"
+{
+  inherit dataPath;
+  buildInputs = [
+    makeWrapper
+    libwebcam
+  ];
+  dontPatchELF = true;
+  dontStrip = true;
+  preferLocalBuild = true;
+}
+''
+  mkdir -p "$out/lib/udev"
+  makeWrapper "${libwebcam}/lib/udev/uvcdynctrl" "$out/lib/udev/uvcdynctrl" \
+    --set NIX_UVCDYNCTRL_DATA_DIR "${dataDir}" \
+    --set NIX_UVCDYNCTRL_UDEV_DEBUG "${udevDebugVarValue}"
+
+  mkdir -p "$out/lib/udev/rules.d"
+  cat "${libwebcam}/lib/udev/rules.d/80-uvcdynctrl.rules" | \
+    sed -r "s#RUN\+\=\"([^\"]+)\"#RUN\+\=\"$out/lib/udev/uvcdynctrl\"#g" > \
+    "$out/lib/udev/rules.d/80-uvcdynctrl.rules"
+''
+
diff --git a/nixos/modules/hardware/video/webcam/facetimehd.nix b/nixos/modules/hardware/video/webcam/facetimehd.nix
new file mode 100644
index 00000000000..d311f600c31
--- /dev/null
+++ b/nixos/modules/hardware/video/webcam/facetimehd.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.facetimehd;
+
+  kernelPackages = config.boot.kernelPackages;
+
+in
+
+{
+
+  options.hardware.facetimehd.enable = mkEnableOption "facetimehd kernel module";
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = versionAtLeast kernelPackages.kernel.version "3.19";
+      message = "facetimehd is not supported for kernels older than 3.19";
+    };
+
+    boot.kernelModules = [ "facetimehd" ];
+
+    boot.blacklistedKernelModules = [ "bdc_pci" ];
+
+    boot.extraModulePackages = [ kernelPackages.facetimehd ];
+
+    hardware.firmware = [ pkgs.facetimehd-firmware ];
+
+    # unload module during suspend/hibernate as it crashes the whole system
+    powerManagement.powerDownCommands = ''
+      ${pkgs.kmod}/bin/lsmod | ${pkgs.gnugrep}/bin/grep -q "^facetimehd" && ${pkgs.kmod}/bin/rmmod -f -v facetimehd
+    '';
+
+    # and load it back on resume
+    powerManagement.resumeCommands = ''
+      ${pkgs.kmod}/bin/modprobe -v facetimehd
+    '';
+
+  };
+
+}