summary refs log tree commit diff
path: root/nixos/modules/virtualisation
diff options
context:
space:
mode:
authorAlyssa Ross <hi@alyssa.is>2022-05-31 09:59:33 +0000
committerAlyssa Ross <hi@alyssa.is>2022-05-31 09:59:57 +0000
commit9ff36293d1e428cd7bf03e8d4b03611b6d361c28 (patch)
tree1ab51a42b868c55b83f6ccdb80371b9888739dd9 /nixos/modules/virtualisation
parent1c4fcd0d4b0541e674ee56ace1053e23e562cc80 (diff)
parentddc3c396a51918043bb0faa6f676abd9562be62c (diff)
downloadnixpkgs-archive.tar
nixpkgs-archive.tar.gz
nixpkgs-archive.tar.bz2
nixpkgs-archive.tar.lz
nixpkgs-archive.tar.xz
nixpkgs-archive.tar.zst
nixpkgs-archive.zip
Last good Nixpkgs for Weston+nouveau? archive
I came this commit hash to terwiz[m] on IRC, who is trying to figure out
what the last version of Spectrum that worked on their NUC with Nvidia
graphics is.
Diffstat (limited to 'nixos/modules/virtualisation')
-rw-r--r--nixos/modules/virtualisation/amazon-ec2-amis.nix444
-rw-r--r--nixos/modules/virtualisation/amazon-image.nix180
-rw-r--r--nixos/modules/virtualisation/amazon-init.nix87
-rw-r--r--nixos/modules/virtualisation/amazon-options.nix74
-rw-r--r--nixos/modules/virtualisation/anbox.nix138
-rw-r--r--nixos/modules/virtualisation/azure-agent-entropy.patch17
-rw-r--r--nixos/modules/virtualisation/azure-agent.nix198
-rw-r--r--nixos/modules/virtualisation/azure-bootstrap-blobs.nix3
-rw-r--r--nixos/modules/virtualisation/azure-common.nix66
-rw-r--r--nixos/modules/virtualisation/azure-config-user.nix12
-rw-r--r--nixos/modules/virtualisation/azure-config.nix5
-rw-r--r--nixos/modules/virtualisation/azure-image.nix71
-rw-r--r--nixos/modules/virtualisation/azure-images.nix5
-rw-r--r--nixos/modules/virtualisation/brightbox-config.nix5
-rw-r--r--nixos/modules/virtualisation/brightbox-image.nix166
-rw-r--r--nixos/modules/virtualisation/build-vm.nix58
-rw-r--r--nixos/modules/virtualisation/cloudstack-config.nix40
-rw-r--r--nixos/modules/virtualisation/container-config.nix31
-rw-r--r--nixos/modules/virtualisation/containerd.nix101
-rw-r--r--nixos/modules/virtualisation/containers.nix156
-rw-r--r--nixos/modules/virtualisation/cri-o.nix163
-rw-r--r--nixos/modules/virtualisation/digital-ocean-config.nix197
-rw-r--r--nixos/modules/virtualisation/digital-ocean-image.nix70
-rw-r--r--nixos/modules/virtualisation/digital-ocean-init.nix95
-rw-r--r--nixos/modules/virtualisation/docker-image.nix57
-rw-r--r--nixos/modules/virtualisation/docker-rootless.nix102
-rw-r--r--nixos/modules/virtualisation/docker.nix252
-rw-r--r--nixos/modules/virtualisation/ec2-amis.nix9
-rw-r--r--nixos/modules/virtualisation/ec2-data.nix91
-rw-r--r--nixos/modules/virtualisation/ec2-metadata-fetcher.nix77
-rw-r--r--nixos/modules/virtualisation/ecs-agent.nix45
-rw-r--r--nixos/modules/virtualisation/gce-images.nix17
-rw-r--r--nixos/modules/virtualisation/google-compute-config.nix102
-rw-r--r--nixos/modules/virtualisation/google-compute-image.nix71
-rw-r--r--nixos/modules/virtualisation/grow-partition.nix3
-rw-r--r--nixos/modules/virtualisation/hyperv-guest.nix66
-rw-r--r--nixos/modules/virtualisation/hyperv-image.nix71
-rw-r--r--nixos/modules/virtualisation/kubevirt.nix30
-rw-r--r--nixos/modules/virtualisation/kvmgt.nix86
-rw-r--r--nixos/modules/virtualisation/libvirtd.nix392
-rw-r--r--nixos/modules/virtualisation/lxc-container.nix174
-rw-r--r--nixos/modules/virtualisation/lxc.nix86
-rw-r--r--nixos/modules/virtualisation/lxcfs.nix45
-rw-r--r--nixos/modules/virtualisation/lxd.nix182
-rw-r--r--nixos/modules/virtualisation/nixos-containers.nix866
-rw-r--r--nixos/modules/virtualisation/oci-containers.nix369
-rw-r--r--nixos/modules/virtualisation/openstack-config.nix58
-rw-r--r--nixos/modules/virtualisation/openstack-metadata-fetcher.nix22
-rw-r--r--nixos/modules/virtualisation/openvswitch.nix145
-rw-r--r--nixos/modules/virtualisation/parallels-guest.nix155
-rw-r--r--nixos/modules/virtualisation/podman/default.nix184
-rw-r--r--nixos/modules/virtualisation/podman/dnsname.nix36
-rw-r--r--nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix34
-rw-r--r--nixos/modules/virtualisation/podman/network-socket.nix95
-rw-r--r--nixos/modules/virtualisation/proxmox-image.nix169
-rw-r--r--nixos/modules/virtualisation/proxmox-lxc.nix64
-rw-r--r--nixos/modules/virtualisation/qemu-guest-agent.nix45
-rw-r--r--nixos/modules/virtualisation/qemu-vm.nix1016
-rw-r--r--nixos/modules/virtualisation/railcar.nix124
-rw-r--r--nixos/modules/virtualisation/spice-usb-redirection.nix26
-rw-r--r--nixos/modules/virtualisation/vagrant-guest.nix58
-rw-r--r--nixos/modules/virtualisation/vagrant-virtualbox-image.nix60
-rw-r--r--nixos/modules/virtualisation/virtualbox-guest.nix93
-rw-r--r--nixos/modules/virtualisation/virtualbox-host.nix168
-rw-r--r--nixos/modules/virtualisation/virtualbox-image.nix215
-rw-r--r--nixos/modules/virtualisation/vmware-guest.nix86
-rw-r--r--nixos/modules/virtualisation/vmware-image.nix91
-rw-r--r--nixos/modules/virtualisation/waydroid.nix73
-rw-r--r--nixos/modules/virtualisation/xe-guest-utilities.nix52
-rw-r--r--nixos/modules/virtualisation/xen-dom0.nix453
-rw-r--r--nixos/modules/virtualisation/xen-domU.nix19
71 files changed, 9116 insertions, 0 deletions
diff --git a/nixos/modules/virtualisation/amazon-ec2-amis.nix b/nixos/modules/virtualisation/amazon-ec2-amis.nix
new file mode 100644
index 00000000000..91b5237e337
--- /dev/null
+++ b/nixos/modules/virtualisation/amazon-ec2-amis.nix
@@ -0,0 +1,444 @@
+let self = {
+  "14.04".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-71c6f470";
+  "14.04".ap-northeast-1.x86_64-linux.pv-ebs = "ami-4dcbf84c";
+  "14.04".ap-northeast-1.x86_64-linux.pv-s3 = "ami-8fc4f68e";
+  "14.04".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-da280888";
+  "14.04".ap-southeast-1.x86_64-linux.pv-ebs = "ami-7a9dbc28";
+  "14.04".ap-southeast-1.x86_64-linux.pv-s3 = "ami-c4290996";
+  "14.04".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-ab523e91";
+  "14.04".ap-southeast-2.x86_64-linux.pv-ebs = "ami-6769055d";
+  "14.04".ap-southeast-2.x86_64-linux.pv-s3 = "ami-15533f2f";
+  "14.04".eu-central-1.x86_64-linux.hvm-ebs = "ami-ba0234a7";
+  "14.04".eu-west-1.x86_64-linux.hvm-ebs = "ami-96cb63e1";
+  "14.04".eu-west-1.x86_64-linux.pv-ebs = "ami-b48c25c3";
+  "14.04".eu-west-1.x86_64-linux.pv-s3 = "ami-06cd6571";
+  "14.04".sa-east-1.x86_64-linux.hvm-ebs = "ami-01b90e1c";
+  "14.04".sa-east-1.x86_64-linux.pv-ebs = "ami-69e35474";
+  "14.04".sa-east-1.x86_64-linux.pv-s3 = "ami-61b90e7c";
+  "14.04".us-east-1.x86_64-linux.hvm-ebs = "ami-58ba3a30";
+  "14.04".us-east-1.x86_64-linux.pv-ebs = "ami-9e0583f6";
+  "14.04".us-east-1.x86_64-linux.pv-s3 = "ami-9cbe3ef4";
+  "14.04".us-west-1.x86_64-linux.hvm-ebs = "ami-0bc3d74e";
+  "14.04".us-west-1.x86_64-linux.pv-ebs = "ami-8b1703ce";
+  "14.04".us-west-1.x86_64-linux.pv-s3 = "ami-27ccd862";
+  "14.04".us-west-2.x86_64-linux.hvm-ebs = "ami-3bf1bf0b";
+  "14.04".us-west-2.x86_64-linux.pv-ebs = "ami-259bd515";
+  "14.04".us-west-2.x86_64-linux.pv-s3 = "ami-07094037";
+
+  "14.12".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-24435f25";
+  "14.12".ap-northeast-1.x86_64-linux.pv-ebs = "ami-b0425eb1";
+  "14.12".ap-northeast-1.x86_64-linux.pv-s3 = "ami-fed3c6ff";
+  "14.12".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-6c765d3e";
+  "14.12".ap-southeast-1.x86_64-linux.pv-ebs = "ami-6a765d38";
+  "14.12".ap-southeast-1.x86_64-linux.pv-s3 = "ami-d1bf9183";
+  "14.12".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-af86f395";
+  "14.12".ap-southeast-2.x86_64-linux.pv-ebs = "ami-b386f389";
+  "14.12".ap-southeast-2.x86_64-linux.pv-s3 = "ami-69c5ae53";
+  "14.12".eu-central-1.x86_64-linux.hvm-ebs = "ami-4a497a57";
+  "14.12".eu-central-1.x86_64-linux.pv-ebs = "ami-4c497a51";
+  "14.12".eu-central-1.x86_64-linux.pv-s3 = "ami-60f2c27d";
+  "14.12".eu-west-1.x86_64-linux.hvm-ebs = "ami-d126a5a6";
+  "14.12".eu-west-1.x86_64-linux.pv-ebs = "ami-0126a576";
+  "14.12".eu-west-1.x86_64-linux.pv-s3 = "ami-deda5fa9";
+  "14.12".sa-east-1.x86_64-linux.hvm-ebs = "ami-2d239e30";
+  "14.12".sa-east-1.x86_64-linux.pv-ebs = "ami-35239e28";
+  "14.12".sa-east-1.x86_64-linux.pv-s3 = "ami-81e3519c";
+  "14.12".us-east-1.x86_64-linux.hvm-ebs = "ami-0c463a64";
+  "14.12".us-east-1.x86_64-linux.pv-ebs = "ami-ac473bc4";
+  "14.12".us-east-1.x86_64-linux.pv-s3 = "ami-00e18a68";
+  "14.12".us-west-1.x86_64-linux.hvm-ebs = "ami-ca534a8f";
+  "14.12".us-west-1.x86_64-linux.pv-ebs = "ami-3e534a7b";
+  "14.12".us-west-1.x86_64-linux.pv-s3 = "ami-2905196c";
+  "14.12".us-west-2.x86_64-linux.hvm-ebs = "ami-fb9dc3cb";
+  "14.12".us-west-2.x86_64-linux.pv-ebs = "ami-899dc3b9";
+  "14.12".us-west-2.x86_64-linux.pv-s3 = "ami-cb7f2dfb";
+
+  "15.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-58cac236";
+  "15.09".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-39c8c057";
+  "15.09".ap-northeast-1.x86_64-linux.pv-ebs = "ami-5ac9c134";
+  "15.09".ap-northeast-1.x86_64-linux.pv-s3 = "ami-03cec66d";
+  "15.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-2fc2094c";
+  "15.09".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-9ec308fd";
+  "15.09".ap-southeast-1.x86_64-linux.pv-ebs = "ami-95c00bf6";
+  "15.09".ap-southeast-1.x86_64-linux.pv-s3 = "ami-bfc00bdc";
+  "15.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-996c4cfa";
+  "15.09".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-3f6e4e5c";
+  "15.09".ap-southeast-2.x86_64-linux.pv-ebs = "ami-066d4d65";
+  "15.09".ap-southeast-2.x86_64-linux.pv-s3 = "ami-cc6e4eaf";
+  "15.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-3f8c6b50";
+  "15.09".eu-central-1.x86_64-linux.hvm-s3 = "ami-5b836434";
+  "15.09".eu-central-1.x86_64-linux.pv-ebs = "ami-118c6b7e";
+  "15.09".eu-central-1.x86_64-linux.pv-s3 = "ami-2c977043";
+  "15.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-9cf04aef";
+  "15.09".eu-west-1.x86_64-linux.hvm-s3 = "ami-2bea5058";
+  "15.09".eu-west-1.x86_64-linux.pv-ebs = "ami-c9e852ba";
+  "15.09".eu-west-1.x86_64-linux.pv-s3 = "ami-c6f64cb5";
+  "15.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-6e52df02";
+  "15.09".sa-east-1.x86_64-linux.hvm-s3 = "ami-1852df74";
+  "15.09".sa-east-1.x86_64-linux.pv-ebs = "ami-4368e52f";
+  "15.09".sa-east-1.x86_64-linux.pv-s3 = "ami-f15ad79d";
+  "15.09".us-east-1.x86_64-linux.hvm-ebs = "ami-84a6a0ee";
+  "15.09".us-east-1.x86_64-linux.hvm-s3 = "ami-06a7a16c";
+  "15.09".us-east-1.x86_64-linux.pv-ebs = "ami-a4a1a7ce";
+  "15.09".us-east-1.x86_64-linux.pv-s3 = "ami-5ba8ae31";
+  "15.09".us-west-1.x86_64-linux.hvm-ebs = "ami-22c8bb42";
+  "15.09".us-west-1.x86_64-linux.hvm-s3 = "ami-a2ccbfc2";
+  "15.09".us-west-1.x86_64-linux.pv-ebs = "ami-10cebd70";
+  "15.09".us-west-1.x86_64-linux.pv-s3 = "ami-fa30429a";
+  "15.09".us-west-2.x86_64-linux.hvm-ebs = "ami-ce57b9ae";
+  "15.09".us-west-2.x86_64-linux.hvm-s3 = "ami-2956b849";
+  "15.09".us-west-2.x86_64-linux.pv-ebs = "ami-005fb160";
+  "15.09".us-west-2.x86_64-linux.pv-s3 = "ami-cd55bbad";
+
+  "16.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-40619d21";
+  "16.03".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-ce629eaf";
+  "16.03".ap-northeast-1.x86_64-linux.pv-ebs = "ami-ef639f8e";
+  "16.03".ap-northeast-1.x86_64-linux.pv-s3 = "ami-a1609cc0";
+  "16.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-deca00b0";
+  "16.03".ap-northeast-2.x86_64-linux.hvm-s3 = "ami-a3b77dcd";
+  "16.03".ap-northeast-2.x86_64-linux.pv-ebs = "ami-7bcb0115";
+  "16.03".ap-northeast-2.x86_64-linux.pv-s3 = "ami-a2b77dcc";
+  "16.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-0dff9562";
+  "16.03".ap-south-1.x86_64-linux.hvm-s3 = "ami-13f69c7c";
+  "16.03".ap-south-1.x86_64-linux.pv-ebs = "ami-0ef39961";
+  "16.03".ap-south-1.x86_64-linux.pv-s3 = "ami-e0c8a28f";
+  "16.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-5e964a3d";
+  "16.03".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-4d964a2e";
+  "16.03".ap-southeast-1.x86_64-linux.pv-ebs = "ami-ec9b478f";
+  "16.03".ap-southeast-1.x86_64-linux.pv-s3 = "ami-999b47fa";
+  "16.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-9f7359fc";
+  "16.03".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-987359fb";
+  "16.03".ap-southeast-2.x86_64-linux.pv-ebs = "ami-a2705ac1";
+  "16.03".ap-southeast-2.x86_64-linux.pv-s3 = "ami-a3705ac0";
+  "16.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-17a45178";
+  "16.03".eu-central-1.x86_64-linux.hvm-s3 = "ami-f9a55096";
+  "16.03".eu-central-1.x86_64-linux.pv-ebs = "ami-c8a550a7";
+  "16.03".eu-central-1.x86_64-linux.pv-s3 = "ami-6ea45101";
+  "16.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-b5b3d5c6";
+  "16.03".eu-west-1.x86_64-linux.hvm-s3 = "ami-c986e0ba";
+  "16.03".eu-west-1.x86_64-linux.pv-ebs = "ami-b083e5c3";
+  "16.03".eu-west-1.x86_64-linux.pv-s3 = "ami-3c83e54f";
+  "16.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-f6eb7f9a";
+  "16.03".sa-east-1.x86_64-linux.hvm-s3 = "ami-93e773ff";
+  "16.03".sa-east-1.x86_64-linux.pv-ebs = "ami-cbb82ca7";
+  "16.03".sa-east-1.x86_64-linux.pv-s3 = "ami-abb82cc7";
+  "16.03".us-east-1.x86_64-linux.hvm-ebs = "ami-c123a3d6";
+  "16.03".us-east-1.x86_64-linux.hvm-s3 = "ami-bc25a5ab";
+  "16.03".us-east-1.x86_64-linux.pv-ebs = "ami-bd25a5aa";
+  "16.03".us-east-1.x86_64-linux.pv-s3 = "ami-a325a5b4";
+  "16.03".us-west-1.x86_64-linux.hvm-ebs = "ami-748bcd14";
+  "16.03".us-west-1.x86_64-linux.hvm-s3 = "ami-a68dcbc6";
+  "16.03".us-west-1.x86_64-linux.pv-ebs = "ami-048acc64";
+  "16.03".us-west-1.x86_64-linux.pv-s3 = "ami-208dcb40";
+  "16.03".us-west-2.x86_64-linux.hvm-ebs = "ami-8263a0e2";
+  "16.03".us-west-2.x86_64-linux.hvm-s3 = "ami-925c9ff2";
+  "16.03".us-west-2.x86_64-linux.pv-ebs = "ami-5e61a23e";
+  "16.03".us-west-2.x86_64-linux.pv-s3 = "ami-734c8f13";
+
+  # 16.09.1508.3909827
+  "16.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-68453b0f";
+  "16.09".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-f9bec09e";
+  "16.09".ap-northeast-1.x86_64-linux.pv-ebs = "ami-254a3442";
+  "16.09".ap-northeast-1.x86_64-linux.pv-s3 = "ami-ef473988";
+  "16.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-18ae7f76";
+  "16.09".ap-northeast-2.x86_64-linux.hvm-s3 = "ami-9eac7df0";
+  "16.09".ap-northeast-2.x86_64-linux.pv-ebs = "ami-57aa7b39";
+  "16.09".ap-northeast-2.x86_64-linux.pv-s3 = "ami-5cae7f32";
+  "16.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-b3f98fdc";
+  "16.09".ap-south-1.x86_64-linux.hvm-s3 = "ami-98e690f7";
+  "16.09".ap-south-1.x86_64-linux.pv-ebs = "ami-aef98fc1";
+  "16.09".ap-south-1.x86_64-linux.pv-s3 = "ami-caf88ea5";
+  "16.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-80fb51e3";
+  "16.09".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-2df3594e";
+  "16.09".ap-southeast-1.x86_64-linux.pv-ebs = "ami-37f05a54";
+  "16.09".ap-southeast-1.x86_64-linux.pv-s3 = "ami-27f35944";
+  "16.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-57ece834";
+  "16.09".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-87f4f0e4";
+  "16.09".ap-southeast-2.x86_64-linux.pv-ebs = "ami-d8ede9bb";
+  "16.09".ap-southeast-2.x86_64-linux.pv-s3 = "ami-a6ebefc5";
+  "16.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-9f863bfb";
+  "16.09".ca-central-1.x86_64-linux.hvm-s3 = "ami-ea85388e";
+  "16.09".ca-central-1.x86_64-linux.pv-ebs = "ami-ce8a37aa";
+  "16.09".ca-central-1.x86_64-linux.pv-s3 = "ami-448a3720";
+  "16.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-1b884774";
+  "16.09".eu-central-1.x86_64-linux.hvm-s3 = "ami-b08c43df";
+  "16.09".eu-central-1.x86_64-linux.pv-ebs = "ami-888946e7";
+  "16.09".eu-central-1.x86_64-linux.pv-s3 = "ami-06874869";
+  "16.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-1ed3e76d";
+  "16.09".eu-west-1.x86_64-linux.hvm-s3 = "ami-73d1e500";
+  "16.09".eu-west-1.x86_64-linux.pv-ebs = "ami-44c0f437";
+  "16.09".eu-west-1.x86_64-linux.pv-s3 = "ami-f3d8ec80";
+  "16.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-2c9c9648";
+  "16.09".eu-west-2.x86_64-linux.hvm-s3 = "ami-6b9e940f";
+  "16.09".eu-west-2.x86_64-linux.pv-ebs = "ami-f1999395";
+  "16.09".eu-west-2.x86_64-linux.pv-s3 = "ami-bb9f95df";
+  "16.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-a11882cd";
+  "16.09".sa-east-1.x86_64-linux.hvm-s3 = "ami-7726bc1b";
+  "16.09".sa-east-1.x86_64-linux.pv-ebs = "ami-9725bffb";
+  "16.09".sa-east-1.x86_64-linux.pv-s3 = "ami-b027bddc";
+  "16.09".us-east-1.x86_64-linux.hvm-ebs = "ami-854ca593";
+  "16.09".us-east-1.x86_64-linux.hvm-s3 = "ami-2241a834";
+  "16.09".us-east-1.x86_64-linux.pv-ebs = "ami-a441a8b2";
+  "16.09".us-east-1.x86_64-linux.pv-s3 = "ami-e841a8fe";
+  "16.09".us-east-2.x86_64-linux.hvm-ebs = "ami-3f41645a";
+  "16.09".us-east-2.x86_64-linux.hvm-s3 = "ami-804065e5";
+  "16.09".us-east-2.x86_64-linux.pv-ebs = "ami-f1466394";
+  "16.09".us-east-2.x86_64-linux.pv-s3 = "ami-05426760";
+  "16.09".us-west-1.x86_64-linux.hvm-ebs = "ami-c2efbca2";
+  "16.09".us-west-1.x86_64-linux.hvm-s3 = "ami-d71042b7";
+  "16.09".us-west-1.x86_64-linux.pv-ebs = "ami-04e8bb64";
+  "16.09".us-west-1.x86_64-linux.pv-s3 = "ami-31e9ba51";
+  "16.09".us-west-2.x86_64-linux.hvm-ebs = "ami-6449f504";
+  "16.09".us-west-2.x86_64-linux.hvm-s3 = "ami-344af654";
+  "16.09".us-west-2.x86_64-linux.pv-ebs = "ami-6d4af60d";
+  "16.09".us-west-2.x86_64-linux.pv-s3 = "ami-de48f4be";
+
+  # 17.03.885.6024dd4067
+  "17.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-dbd0f7bc";
+  "17.03".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-7cdff81b";
+  "17.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-c59a48ab";
+  "17.03".ap-northeast-2.x86_64-linux.hvm-s3 = "ami-0b944665";
+  "17.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-4f413220";
+  "17.03".ap-south-1.x86_64-linux.hvm-s3 = "ami-864033e9";
+  "17.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-e08c3383";
+  "17.03".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-c28f30a1";
+  "17.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-fca9a69f";
+  "17.03".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-3daaa55e";
+  "17.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-9b00bdff";
+  "17.03".ca-central-1.x86_64-linux.hvm-s3 = "ami-e800bd8c";
+  "17.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-5450803b";
+  "17.03".eu-central-1.x86_64-linux.hvm-s3 = "ami-6e2efe01";
+  "17.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-10754c76";
+  "17.03".eu-west-1.x86_64-linux.hvm-s3 = "ami-11734a77";
+  "17.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-ff1d099b";
+  "17.03".eu-west-2.x86_64-linux.hvm-s3 = "ami-fe1d099a";
+  "17.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-d95d3eb5";
+  "17.03".sa-east-1.x86_64-linux.hvm-s3 = "ami-fca2c190";
+  "17.03".us-east-1.x86_64-linux.hvm-ebs = "ami-0940c61f";
+  "17.03".us-east-1.x86_64-linux.hvm-s3 = "ami-674fc971";
+  "17.03".us-east-2.x86_64-linux.hvm-ebs = "ami-afc2e6ca";
+  "17.03".us-east-2.x86_64-linux.hvm-s3 = "ami-a1cde9c4";
+  "17.03".us-west-1.x86_64-linux.hvm-ebs = "ami-587b2138";
+  "17.03".us-west-1.x86_64-linux.hvm-s3 = "ami-70411b10";
+  "17.03".us-west-2.x86_64-linux.hvm-ebs = "ami-a93daac9";
+  "17.03".us-west-2.x86_64-linux.hvm-s3 = "ami-5139ae31";
+
+  # 17.09.2681.59661f21be6
+  "17.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-a30192da";
+  "17.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-295a414d";
+  "17.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-8c0eb9f1";
+  "17.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-266cfe49";
+  "17.09".us-east-1.x86_64-linux.hvm-ebs = "ami-40bee63a";
+  "17.09".us-east-2.x86_64-linux.hvm-ebs = "ami-9d84aff8";
+  "17.09".us-west-1.x86_64-linux.hvm-ebs = "ami-d14142b1";
+  "17.09".us-west-2.x86_64-linux.hvm-ebs = "ami-3eb40346";
+  "17.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-ca8207ae";
+  "17.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-84bccff8";
+  "17.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0dc5386f";
+  "17.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-89b921ef";
+  "17.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-179b3b79";
+  "17.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-4762202b";
+  "17.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-4e376021";
+
+  # 18.03.132946.1caae7247b8
+  "18.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-065c46ec";
+  "18.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-64f31903";
+  "18.03".eu-west-3.x86_64-linux.hvm-ebs = "ami-5a8d3d27";
+  "18.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-09faf9e2";
+  "18.03".us-east-1.x86_64-linux.hvm-ebs = "ami-8b3538f4";
+  "18.03".us-east-2.x86_64-linux.hvm-ebs = "ami-150b3170";
+  "18.03".us-west-1.x86_64-linux.hvm-ebs = "ami-ce06ebad";
+  "18.03".us-west-2.x86_64-linux.hvm-ebs = "ami-586c3520";
+  "18.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-aca72ac8";
+  "18.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-aa0b4d40";
+  "18.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-d0f254b2";
+  "18.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-456511a8";
+  "18.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-3366d15d";
+  "18.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-163e1f7a";
+  "18.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-6a390b05";
+
+  # 18.09.910.c15e342304a
+  "18.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-0f412186fb8a0ec97";
+  "18.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-0dada3805ce43c55e";
+  "18.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-074df85565f2e02e2";
+  "18.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-07c9b884e679df4f8";
+  "18.09".us-east-1.x86_64-linux.hvm-ebs = "ami-009c9c3f1af480ff3";
+  "18.09".us-east-2.x86_64-linux.hvm-ebs = "ami-08199961085ea8bc6";
+  "18.09".us-west-1.x86_64-linux.hvm-ebs = "ami-07aa7f56d612ddd38";
+  "18.09".us-west-2.x86_64-linux.hvm-ebs = "ami-01c84b7c368ac24d1";
+  "18.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-04f66113f76198f6c";
+  "18.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0892c7e24ebf2194f";
+  "18.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-010730f36424b0a2c";
+  "18.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-0cdba8e998f076547";
+  "18.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0400a698e6a9f4a15";
+  "18.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-0e4a8a47fd6db6112";
+  "18.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-0880a678d3f555313";
+
+  # 19.03.172286.8ea36d73256
+  "19.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-0fe40176548ff0940";
+  "19.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-03a40fd3a02fe95ba";
+  "19.03".eu-west-3.x86_64-linux.hvm-ebs = "ami-0436f9da0f20a638e";
+  "19.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-0022b8ea9efde5de4";
+  "19.03".us-east-1.x86_64-linux.hvm-ebs = "ami-0efc58fb70ae9a217";
+  "19.03".us-east-2.x86_64-linux.hvm-ebs = "ami-0abf711b1b34da1af";
+  "19.03".us-west-1.x86_64-linux.hvm-ebs = "ami-07d126e8838c40ec5";
+  "19.03".us-west-2.x86_64-linux.hvm-ebs = "ami-03f8a737546e47fb0";
+  "19.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-03f9fd0ef2e035ede";
+  "19.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0cff66114c652c262";
+  "19.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-054c73a7f8d773ea9";
+  "19.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-00db62688900456a4";
+  "19.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0485cdd1a5fdd2117";
+  "19.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
+  "19.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-0303deb1b5890f878";
+
+  # 19.09.2243.84af403f54f
+  "19.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-071082f0fa035374f";
+  "19.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-0d9dc33c54d1dc4c3";
+  "19.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-09566799591d1bfed";
+  "19.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-015f8efc2be419b79";
+  "19.09".eu-north-1.x86_64-linux.hvm-ebs = "ami-07fc0a32d885e01ed";
+  "19.09".us-east-1.x86_64-linux.hvm-ebs = "ami-03330d8b51287412f";
+  "19.09".us-east-2.x86_64-linux.hvm-ebs = "ami-0518b4c84972e967f";
+  "19.09".us-west-1.x86_64-linux.hvm-ebs = "ami-06ad07e61a353b4a6";
+  "19.09".us-west-2.x86_64-linux.hvm-ebs = "ami-0e31e30925cf3ce4e";
+  "19.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-07df50fc76702a36d";
+  "19.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0f71ae5d4b0b78d95";
+  "19.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-057bbf2b4bd62d210";
+  "19.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-02a62555ca182fb5b";
+  "19.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0219dde0e6b7b7b93";
+  "19.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-066f7f2a895c821a1";
+  "19.09".ap-east-1.x86_64-linux.hvm-ebs = "ami-055b2348db2827ff1";
+  "19.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-018aab68377227e06";
+
+  # 20.03.1554.94e39623a49
+  "20.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-02c34db5766cc7013";
+  "20.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-0e32bd8c7853883f1";
+  "20.03".eu-west-3.x86_64-linux.hvm-ebs = "ami-061edb1356c1d69fd";
+  "20.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-0a1a94722dcbff94c";
+  "20.03".eu-north-1.x86_64-linux.hvm-ebs = "ami-02699abfacbb6464b";
+  "20.03".us-east-1.x86_64-linux.hvm-ebs = "ami-0c5e7760748b74e85";
+  "20.03".us-east-2.x86_64-linux.hvm-ebs = "ami-030296bb256764655";
+  "20.03".us-west-1.x86_64-linux.hvm-ebs = "ami-050be818e0266b741";
+  "20.03".us-west-2.x86_64-linux.hvm-ebs = "ami-06562f78dca68eda2";
+  "20.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-02365684a173255c7";
+  "20.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0dbf353e168d155f7";
+  "20.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-04c0f3a75f63daddd";
+  "20.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-093d9cc49c191eb6c";
+  "20.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0087df91a7b6ebd45";
+  "20.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-0a1a6b569af04af9d";
+  "20.03".ap-east-1.x86_64-linux.hvm-ebs = "ami-0d18fdd309cdefa86";
+  "20.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-09859378158ae971d";
+  # 20.03.2351.f8248ab6d9e-aarch64-linux
+  "20.03".eu-west-1.aarch64-linux.hvm-ebs = "ami-0a4c46dfdfe921aab";
+  "20.03".eu-west-2.aarch64-linux.hvm-ebs = "ami-0b47871912b7d36f9";
+  "20.03".eu-west-3.aarch64-linux.hvm-ebs = "ami-01031e1aa505b8935";
+  "20.03".eu-central-1.aarch64-linux.hvm-ebs = "ami-0bb4669de1f477fd1";
+  # missing "20.03".eu-north-1.aarch64-linux.hvm-ebs = "ami-";
+  "20.03".us-east-1.aarch64-linux.hvm-ebs = "ami-01d2de16a1878271c";
+  "20.03".us-east-2.aarch64-linux.hvm-ebs = "ami-0eade0158b1ff49c0";
+  "20.03".us-west-1.aarch64-linux.hvm-ebs = "ami-0913bf30cb9a764a4";
+  "20.03".us-west-2.aarch64-linux.hvm-ebs = "ami-073449580ff8e82b5";
+  "20.03".ca-central-1.aarch64-linux.hvm-ebs = "ami-050f2e923c4d703c0";
+  "20.03".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-0d11ef6705a9a11a7";
+  "20.03".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-05446a2f818cd3263";
+  "20.03".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-0c057f010065d2453";
+  "20.03".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-0e90eda7f24eb33ab";
+  "20.03".ap-south-1.aarch64-linux.hvm-ebs = "ami-03ba7e9f093f568bc";
+  "20.03".sa-east-1.aarch64-linux.hvm-ebs = "ami-0a8344c6ce6d0c902";
+
+  # 20.09.2016.19db3e5ea27
+  "20.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-0057cb7d614329fa2";
+  "20.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-0d46f16e0bb0ec8fd";
+  "20.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-0e8985c3ea42f87fe";
+  "20.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-0eed77c38432886d2";
+  "20.09".eu-north-1.x86_64-linux.hvm-ebs = "ami-0be5bcadd632bea14";
+  "20.09".us-east-1.x86_64-linux.hvm-ebs = "ami-0a2cce52b42daccc8";
+  "20.09".us-east-2.x86_64-linux.hvm-ebs = "ami-09378bf487b07a4d8";
+  "20.09".us-west-1.x86_64-linux.hvm-ebs = "ami-09b4337b2a9e77485";
+  "20.09".us-west-2.x86_64-linux.hvm-ebs = "ami-081d3bb5fbee0a1ac";
+  "20.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-020c24c6c607e7ac7";
+  "20.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-08f648d5db009e67d";
+  "20.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0be390efaccbd40f9";
+  "20.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-0c3311601cbe8f927";
+  "20.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0020146701f4d56cf";
+  "20.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-0117e2bd876bb40d1";
+  "20.09".ap-east-1.x86_64-linux.hvm-ebs = "ami-0c42f97e5b1fda92f";
+  "20.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-021637976b094959d";
+  # 20.09.2016.19db3e5ea27-aarch64-linux
+  "20.09".eu-west-1.aarch64-linux.hvm-ebs = "ami-00a02608ff45ff8f9";
+  "20.09".eu-west-2.aarch64-linux.hvm-ebs = "ami-0e991d0f8dca21e20";
+  "20.09".eu-west-3.aarch64-linux.hvm-ebs = "ami-0d18eec4dc48c6f3b";
+  "20.09".eu-central-1.aarch64-linux.hvm-ebs = "ami-01691f25d08f48c9e";
+  "20.09".eu-north-1.aarch64-linux.hvm-ebs = "ami-09bb5aabe567ec6f4";
+  "20.09".us-east-1.aarch64-linux.hvm-ebs = "ami-0504bd006f9eaae42";
+  "20.09".us-east-2.aarch64-linux.hvm-ebs = "ami-00f0f8f2ab2d695ad";
+  "20.09".us-west-1.aarch64-linux.hvm-ebs = "ami-02d147d2cb992f878";
+  "20.09".us-west-2.aarch64-linux.hvm-ebs = "ami-07f40006cf4d4820e";
+  "20.09".ca-central-1.aarch64-linux.hvm-ebs = "ami-0e5f563919a987894";
+  "20.09".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-083e35d1acecae5c1";
+  "20.09".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-052cdc008b245b067";
+  "20.09".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-05e137f373bd72c0c";
+  "20.09".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-020791fe4c32f851a";
+  "20.09".ap-south-1.aarch64-linux.hvm-ebs = "ami-0285bb96a0f2c3955";
+  "20.09".sa-east-1.aarch64-linux.hvm-ebs = "ami-0a55ab650c32be058";
+
+
+  # 21.05.740.aa576357673
+  "21.05".eu-west-1.x86_64-linux.hvm-ebs = "ami-048dbc738074a3083";
+  "21.05".eu-west-2.x86_64-linux.hvm-ebs = "ami-0234cf81fec68315d";
+  "21.05".eu-west-3.x86_64-linux.hvm-ebs = "ami-020e459baf709107d";
+  "21.05".eu-central-1.x86_64-linux.hvm-ebs = "ami-0857d5d1309ab8b77";
+  "21.05".eu-north-1.x86_64-linux.hvm-ebs = "ami-05403e3ae53d3716f";
+  "21.05".us-east-1.x86_64-linux.hvm-ebs = "ami-0d3002ba40b5b9897";
+  "21.05".us-east-2.x86_64-linux.hvm-ebs = "ami-069a0ca1bde6dea52";
+  "21.05".us-west-1.x86_64-linux.hvm-ebs = "ami-0b415460a84bcf9bc";
+  "21.05".us-west-2.x86_64-linux.hvm-ebs = "ami-093cba49754abd7f8";
+  "21.05".ca-central-1.x86_64-linux.hvm-ebs = "ami-065c13e1d52d60b33";
+  "21.05".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-04f570c70ff9b665e";
+  "21.05".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-02a3d1df595df5ef6";
+  "21.05".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-027836fddb5c56012";
+  "21.05".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0edacd41dc7700c39";
+  "21.05".ap-south-1.x86_64-linux.hvm-ebs = "ami-0b279b5bb55288059";
+  "21.05".ap-east-1.x86_64-linux.hvm-ebs = "ami-06dc98082bc55c1fc";
+  "21.05".sa-east-1.x86_64-linux.hvm-ebs = "ami-04737dd49b98936c6";
+
+  # 21.11.333823.96b4157790f-x86_64-linux
+  "21.11".eu-west-1.x86_64-linux.hvm-ebs = "ami-01d0304a712f2f3f0";
+  "21.11".eu-west-2.x86_64-linux.hvm-ebs = "ami-00e828bfc1e5d09ac";
+  "21.11".eu-west-3.x86_64-linux.hvm-ebs = "ami-0e1ea64430d8103f2";
+  "21.11".eu-central-1.x86_64-linux.hvm-ebs = "ami-0fcf28c07e86142c5";
+  "21.11".eu-north-1.x86_64-linux.hvm-ebs = "ami-0ee83a3c6590fd6b1";
+  "21.11".us-east-1.x86_64-linux.hvm-ebs = "ami-099756bfda4540da0";
+  "21.11".us-east-2.x86_64-linux.hvm-ebs = "ami-0b20a80b82052d23f";
+  "21.11".us-west-1.x86_64-linux.hvm-ebs = "ami-088ea590004b01752";
+  "21.11".us-west-2.x86_64-linux.hvm-ebs = "ami-0025b9d4831b911a7";
+  "21.11".ca-central-1.x86_64-linux.hvm-ebs = "ami-0e67089f898e74443";
+  "21.11".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0dc8d718279d3402d";
+  "21.11".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0155e842329970187";
+  "21.11".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-07c95eda953bf5435";
+  "21.11".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-04167df3cd952b3bd";
+  "21.11".ap-south-1.x86_64-linux.hvm-ebs = "ami-0680e05531b3db677";
+  "21.11".ap-east-1.x86_64-linux.hvm-ebs = "ami-0835a3e481dc240f9";
+  "21.11".sa-east-1.x86_64-linux.hvm-ebs = "ami-0f7c354c421348e51";
+
+  # 21.11.333823.96b4157790f-aarch64-linux
+  "21.11".eu-west-1.aarch64-linux.hvm-ebs = "ami-048f3eea6a12c4b3b";
+  "21.11".eu-west-2.aarch64-linux.hvm-ebs = "ami-0e6f18f2009806add";
+  "21.11".eu-west-3.aarch64-linux.hvm-ebs = "ami-0a28d593f5e938d80";
+  "21.11".eu-central-1.aarch64-linux.hvm-ebs = "ami-0b9c95d926ab9474c";
+  "21.11".eu-north-1.aarch64-linux.hvm-ebs = "ami-0f2d400b4a2368a1a";
+  "21.11".us-east-1.aarch64-linux.hvm-ebs = "ami-05afb75585567d386";
+  "21.11".us-east-2.aarch64-linux.hvm-ebs = "ami-07f360673c2fccf8d";
+  "21.11".us-west-1.aarch64-linux.hvm-ebs = "ami-0a6892c61d85774db";
+  "21.11".us-west-2.aarch64-linux.hvm-ebs = "ami-04eaf20283432e852";
+  "21.11".ca-central-1.aarch64-linux.hvm-ebs = "ami-036b69828502e7fdf";
+  "21.11".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-0d52e51e68b6954ef";
+  "21.11".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-000a3019e003f4fb9";
+  "21.11".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-09b0c7928780e25b6";
+  "21.11".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-05f80f3c83083ff62";
+  "21.11".ap-south-1.aarch64-linux.hvm-ebs = "ami-05b2a3ff8489c3f59";
+  "21.11".ap-east-1.aarch64-linux.hvm-ebs = "ami-0aa3b50a4f2822a00";
+  "21.11".sa-east-1.aarch64-linux.hvm-ebs = "ami-00f68eff453d3fe69";
+
+  latest = self."21.11";
+}; in self
diff --git a/nixos/modules/virtualisation/amazon-image.nix b/nixos/modules/virtualisation/amazon-image.nix
new file mode 100644
index 00000000000..9a56b695015
--- /dev/null
+++ b/nixos/modules/virtualisation/amazon-image.nix
@@ -0,0 +1,180 @@
+# Configuration for Amazon EC2 instances. (Note that this file is a
+# misnomer - it should be "amazon-config.nix" or so, not
+# "amazon-image.nix", since it's used not only to build images but
+# also to reconfigure instances. However, we can't rename it because
+# existing "configuration.nix" files on EC2 instances refer to it.)
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.ec2;
+  metadataFetcher = import ./ec2-metadata-fetcher.nix {
+    inherit (pkgs) curl;
+    targetRoot = "$targetRoot/";
+    wgetExtraOptions = "-q";
+  };
+in
+
+{
+  imports = [
+    ../profiles/headless.nix
+    # Note: While we do use the headless profile, we also explicitly
+    # turn on the serial console on ttyS0 below. This is because
+    # AWS does support accessing the serial console:
+    # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+
+    assertions = [
+      { assertion = cfg.hvm;
+        message = "Paravirtualized EC2 instances are no longer supported.";
+      }
+      { assertion = cfg.efi -> cfg.hvm;
+        message = "EC2 instances using EFI must be HVM instances.";
+      }
+      { assertion = versionOlder config.boot.kernelPackages.kernel.version "5.15";
+        message = "ENA driver fails to build with kernel >= 5.15";
+      }
+    ];
+
+    boot.kernelPackages = pkgs.linuxKernel.packages.linux_5_10;
+
+    boot.growPartition = cfg.hvm;
+
+    fileSystems."/" = mkIf (!cfg.zfs.enable) {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    fileSystems."/boot" = mkIf (cfg.efi || cfg.zfs.enable) {
+      # The ZFS image uses a partition labeled ESP whether or not we're
+      # booting with EFI.
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    services.zfs.expandOnBoot = mkIf cfg.zfs.enable "all";
+
+    boot.zfs.devNodes = mkIf cfg.zfs.enable "/dev/";
+
+    boot.extraModulePackages = [
+      config.boot.kernelPackages.ena
+    ];
+    boot.initrd.kernelModules = [ "xen-blkfront" "xen-netfront" ];
+    boot.initrd.availableKernelModules = [ "ixgbevf" "ena" "nvme" ];
+    boot.kernelParams = mkIf cfg.hvm [ "console=ttyS0,115200n8" "random.trust_cpu=on" ];
+
+    # Prevent the nouveau kernel module from being loaded, as it
+    # interferes with the nvidia/nvidia-uvm modules needed for CUDA.
+    # Also blacklist xen_fbfront to prevent a 30 second delay during
+    # boot.
+    boot.blacklistedKernelModules = [ "nouveau" "xen_fbfront" ];
+
+    # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+    boot.loader.grub.version = if cfg.hvm then 2 else 1;
+    boot.loader.grub.device = if (cfg.hvm && !cfg.efi) then "/dev/xvda" else "nodev";
+    boot.loader.grub.extraPerEntryConfig = mkIf (!cfg.hvm) "root (hd0)";
+    boot.loader.grub.efiSupport = cfg.efi;
+    boot.loader.grub.efiInstallAsRemovable = cfg.efi;
+    boot.loader.timeout = 1;
+    boot.loader.grub.extraConfig = ''
+      serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
+      terminal_output console serial
+      terminal_input console serial
+    '';
+
+    boot.initrd.network.enable = true;
+
+    # Mount all formatted ephemeral disks and activate all swap devices.
+    # We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
+    # because the set of devices is dependent on the instance type
+    # (e.g. "m1.small" has one ephemeral filesystem and one swap device,
+    # while "m1.large" has two ephemeral filesystems and no swap
+    # devices).  Also, put /tmp and /var on /disk0, since it has a lot
+    # more space than the root device.  Similarly, "move" /nix to /disk0
+    # by layering a unionfs-fuse mount on top of it so we have a lot more space for
+    # Nix operations.
+    boot.initrd.postMountCommands =
+      ''
+        ${metadataFetcher}
+
+        diskNr=0
+        diskForUnionfs=
+        for device in /dev/xvd[abcde]*; do
+            if [ "$device" = /dev/xvda -o "$device" = /dev/xvda1 ]; then continue; fi
+            fsType=$(blkid -o value -s TYPE "$device" || true)
+            if [ "$fsType" = swap ]; then
+                echo "activating swap device $device..."
+                swapon "$device" || true
+            elif [ "$fsType" = ext3 ]; then
+                mp="/disk$diskNr"
+                diskNr=$((diskNr + 1))
+                if mountFS "$device" "$mp" "" ext3; then
+                    if [ -z "$diskForUnionfs" ]; then diskForUnionfs="$mp"; fi
+                fi
+            else
+                echo "skipping unknown device type $device"
+            fi
+        done
+
+        if [ -n "$diskForUnionfs" ]; then
+            mkdir -m 755 -p $targetRoot/$diskForUnionfs/root
+
+            mkdir -m 1777 -p $targetRoot/$diskForUnionfs/root/tmp $targetRoot/tmp
+            mount --bind $targetRoot/$diskForUnionfs/root/tmp $targetRoot/tmp
+
+            if [ "$(cat "$metaDir/ami-manifest-path")" != "(unknown)" ]; then
+                mkdir -m 755 -p $targetRoot/$diskForUnionfs/root/var $targetRoot/var
+                mount --bind $targetRoot/$diskForUnionfs/root/var $targetRoot/var
+
+                mkdir -p /unionfs-chroot/ro-nix
+                mount --rbind $targetRoot/nix /unionfs-chroot/ro-nix
+
+                mkdir -m 755 -p $targetRoot/$diskForUnionfs/root/nix
+                mkdir -p /unionfs-chroot/rw-nix
+                mount --rbind $targetRoot/$diskForUnionfs/root/nix /unionfs-chroot/rw-nix
+
+                unionfs -o allow_other,cow,nonempty,chroot=/unionfs-chroot,max_files=32768 /rw-nix=RW:/ro-nix=RO $targetRoot/nix
+            fi
+        fi
+      '';
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # We need swapon in the initrd.
+        copy_bin_and_libs ${pkgs.util-linux}/sbin/swapon
+      '';
+
+    # Allow root logins only using the SSH key that the user specified
+    # at instance creation time.
+    services.openssh.enable = true;
+    services.openssh.permitRootLogin = "prohibit-password";
+
+    # Enable the serial console on ttyS0
+    systemd.services."serial-getty@ttyS0".enable = true;
+
+    # Creates symlinks for block device names.
+    services.udev.packages = [ pkgs.amazon-ec2-utils ];
+
+    # Force getting the hostname from EC2.
+    networking.hostName = mkDefault "";
+
+    # Always include cryptsetup so that Charon can use it.
+    environment.systemPackages = [ pkgs.cryptsetup ];
+
+    boot.initrd.supportedFilesystems = [ "unionfs-fuse" ];
+
+    # EC2 has its own NTP server provided by the hypervisor
+    networking.timeServers = [ "169.254.169.123" ];
+
+    # udisks has become too bloated to have in a headless system
+    # (e.g. it depends on GTK).
+    services.udisks2.enable = false;
+  };
+}
diff --git a/nixos/modules/virtualisation/amazon-init.nix b/nixos/modules/virtualisation/amazon-init.nix
new file mode 100644
index 00000000000..4f2f8df90eb
--- /dev/null
+++ b/nixos/modules/virtualisation/amazon-init.nix
@@ -0,0 +1,87 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.amazon-init;
+
+  script = ''
+    #!${pkgs.runtimeShell} -eu
+
+    echo "attempting to fetch configuration from EC2 user data..."
+
+    export HOME=/root
+    export PATH=${pkgs.lib.makeBinPath [ config.nix.package pkgs.systemd pkgs.gnugrep pkgs.git pkgs.gnutar pkgs.gzip pkgs.gnused pkgs.xz config.system.build.nixos-rebuild]}:$PATH
+    export NIX_PATH=nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
+
+    userData=/etc/ec2-metadata/user-data
+
+    # Check if user-data looks like a shell script and execute it with the
+    # runtime shell if it does. Otherwise treat it as a nixos configuration
+    # expression
+    if IFS= LC_ALL=C read -rN2 shebang < $userData && [ "$shebang" = '#!' ]; then
+      # NB: we cannot chmod the $userData file, this is why we execute it via
+      # `pkgs.runtimeShell`. This means we have only limited support for shell
+      # scripts compatible with the `pkgs.runtimeShell`.
+      exec ${pkgs.runtimeShell} $userData
+    fi
+
+    if [ -s "$userData" ]; then
+      # If the user-data looks like it could be a nix expression,
+      # copy it over. Also, look for a magic three-hash comment and set
+      # that as the channel.
+      if sed '/^\(#\|SSH_HOST_.*\)/d' < "$userData" | grep -q '\S'; then
+        channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+        while IFS= read -r channel; do
+          echo "writing channel: $channel"
+        done < <(printf "%s\n" "$channels")
+
+        if [[ -n "$channels" ]]; then
+          printf "%s" "$channels" > /root/.nix-channels
+          nix-channel --update
+        fi
+
+        echo "setting configuration from EC2 user data"
+        cp "$userData" /etc/nixos/configuration.nix
+      else
+        echo "user data does not appear to be a Nix expression; ignoring"
+        exit
+      fi
+    else
+      echo "no user data is available"
+      exit
+    fi
+
+    nixos-rebuild switch
+  '';
+in {
+
+  options.virtualisation.amazon-init = {
+    enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = ''
+        Enable or disable the amazon-init service.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.amazon-init = {
+      inherit script;
+      description = "Reconfigure the system from EC2 userdata on startup";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "multi-user.target" ];
+      requires = [ "network-online.target" ];
+
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/amazon-options.nix b/nixos/modules/virtualisation/amazon-options.nix
new file mode 100644
index 00000000000..0465571ca92
--- /dev/null
+++ b/nixos/modules/virtualisation/amazon-options.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) literalExpression types;
+in {
+  options = {
+    ec2 = {
+      zfs = {
+        enable = lib.mkOption {
+          default = false;
+          internal = true;
+          description = ''
+            Whether the EC2 instance uses a ZFS root.
+          '';
+        };
+
+        datasets = lib.mkOption {
+          description = ''
+            Datasets to create under the `tank` and `boot` zpools.
+
+            **NOTE:** This option is used only at image creation time, and
+            does not attempt to declaratively create or manage datasets
+            on an existing system.
+          '';
+
+          default = {};
+
+          type = types.attrsOf (types.submodule {
+            options = {
+              mount = lib.mkOption {
+                description = "Where to mount this dataset.";
+                type = types.nullOr types.string;
+                default = null;
+              };
+
+              properties = lib.mkOption {
+                description = "Properties to set on this dataset.";
+                type = types.attrsOf types.string;
+                default = {};
+              };
+            };
+          });
+        };
+      };
+      hvm = lib.mkOption {
+        default = lib.versionAtLeast config.system.stateVersion "17.03";
+        internal = true;
+        description = ''
+          Whether the EC2 instance is a HVM instance.
+        '';
+      };
+      efi = lib.mkOption {
+        default = pkgs.stdenv.hostPlatform.isAarch64;
+        defaultText = literalExpression "pkgs.stdenv.hostPlatform.isAarch64";
+        internal = true;
+        description = ''
+          Whether the EC2 instance is using EFI.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf config.ec2.zfs.enable {
+    networking.hostId = lib.mkDefault "00000000";
+
+    fileSystems = let
+      mountable = lib.filterAttrs (_: value: ((value.mount or null) != null)) config.ec2.zfs.datasets;
+    in lib.mapAttrs'
+      (dataset: opts: lib.nameValuePair opts.mount {
+        device = dataset;
+        fsType = "zfs";
+      })
+      mountable;
+  };
+}
diff --git a/nixos/modules/virtualisation/anbox.nix b/nixos/modules/virtualisation/anbox.nix
new file mode 100644
index 00000000000..a4da62eb5f7
--- /dev/null
+++ b/nixos/modules/virtualisation/anbox.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.anbox;
+  kernelPackages = config.boot.kernelPackages;
+  addrOpts = v: addr: pref: name: {
+    address = mkOption {
+      default = addr;
+      type = types.str;
+      description = ''
+        IPv${toString v} ${name} address.
+      '';
+    };
+
+    prefixLength = mkOption {
+      default = pref;
+      type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+      description = ''
+        Subnet mask of the ${name} address, specified as the number of
+        bits in the prefix (<literal>${if v == 4 then "24" else "64"}</literal>).
+      '';
+    };
+  };
+
+in
+
+{
+
+  options.virtualisation.anbox = {
+
+    enable = mkEnableOption "Anbox";
+
+    image = mkOption {
+      default = pkgs.anbox.image;
+      defaultText = literalExpression "pkgs.anbox.image";
+      type = types.package;
+      description = ''
+        Base android image for Anbox.
+      '';
+    };
+
+    extraInit = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra shell commands to be run inside the container image during init.
+      '';
+    };
+
+    ipv4 = {
+      container = addrOpts 4 "192.168.250.2" 24 "Container";
+      gateway = addrOpts 4 "192.168.250.1" 24 "Host";
+
+      dns = mkOption {
+        default = "1.1.1.1";
+        type = types.str;
+        description = ''
+          Container DNS server.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
+      message = "Anbox needs user namespace support to work properly";
+    };
+
+    environment.systemPackages = with pkgs; [ anbox ];
+
+    boot.kernelModules = [ "ashmem_linux" "binder_linux" ];
+    boot.extraModulePackages = [ kernelPackages.anbox ];
+
+    services.udev.extraRules = ''
+      KERNEL=="ashmem", NAME="%k", MODE="0666"
+      KERNEL=="binder*", NAME="%k", MODE="0666"
+    '';
+
+    virtualisation.lxc.enable = true;
+    networking.bridges.anbox0.interfaces = [];
+    networking.interfaces.anbox0.ipv4.addresses = [ cfg.ipv4.gateway ];
+
+    networking.nat = {
+      enable = true;
+      internalInterfaces = [ "anbox0" ];
+    };
+
+    systemd.services.anbox-container-manager = let
+      anboxloc = "/var/lib/anbox";
+    in {
+      description = "Anbox Container Management Daemon";
+
+      environment.XDG_RUNTIME_DIR="${anboxloc}";
+
+      wantedBy = [ "multi-user.target" ];
+      preStart = let
+        initsh = pkgs.writeText "nixos-init" (''
+          #!/system/bin/sh
+          setprop nixos.version ${config.system.nixos.version}
+
+          # we don't have radio
+          setprop ro.radio.noril yes
+          stop ril-daemon
+
+          # speed up boot
+          setprop debug.sf.nobootanimation 1
+        '' + cfg.extraInit);
+        initshloc = "${anboxloc}/rootfs-overlay/system/etc/init.goldfish.sh";
+      in ''
+        mkdir -p ${anboxloc}
+        mkdir -p $(dirname ${initshloc})
+        [ -f ${initshloc} ] && rm ${initshloc}
+        cp ${initsh} ${initshloc}
+        chown 100000:100000 ${initshloc}
+        chmod +x ${initshloc}
+      '';
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.anbox}/bin/anbox container-manager \
+            --data-path=${anboxloc} \
+            --android-image=${cfg.image} \
+            --container-network-address=${cfg.ipv4.container.address} \
+            --container-network-gateway=${cfg.ipv4.gateway.address} \
+            --container-network-dns-servers=${cfg.ipv4.dns} \
+            --use-rootfs-overlay \
+            --privileged
+        '';
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/virtualisation/azure-agent-entropy.patch b/nixos/modules/virtualisation/azure-agent-entropy.patch
new file mode 100644
index 00000000000..2a7ad08a4af
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-agent-entropy.patch
@@ -0,0 +1,17 @@
+--- a/waagent	2016-03-12 09:58:15.728088851 +0200
++++ a/waagent	2016-03-12 09:58:43.572680025 +0200
+@@ -6173,10 +6173,10 @@
+             Log("MAC  address: " + ":".join(["%02X" % Ord(a) for a in mac]))
+         
+         # Consume Entropy in ACPI table provided by Hyper-V
+-        try:
+-            SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0"))
+-        except:
+-            pass
++        #try:
++        #    SetFileContents("/dev/random", GetFileContents("/sys/firmware/acpi/tables/OEM0"))
++        #except:
++        #    pass
+ 
+         Log("Probing for Azure environment.")
+         self.Endpoint = self.DoDhcpWork()
diff --git a/nixos/modules/virtualisation/azure-agent.nix b/nixos/modules/virtualisation/azure-agent.nix
new file mode 100644
index 00000000000..bd8c7f8c1ee
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-agent.nix
@@ -0,0 +1,198 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.azure.agent;
+
+  waagent = with pkgs; stdenv.mkDerivation rec {
+    name = "waagent-2.0";
+    src = pkgs.fetchFromGitHub {
+      owner = "Azure";
+      repo = "WALinuxAgent";
+      rev = "1b3a8407a95344d9d12a2a377f64140975f1e8e4";
+      sha256 = "10byzvmpgrmr4d5mdn2kq04aapqb3sgr1admk13wjmy5cd6bwd2x";
+    };
+
+    patches = [ ./azure-agent-entropy.patch ];
+
+    buildInputs = [ makeWrapper python pythonPackages.wrapPython ];
+    runtimeDeps = [ findutils gnugrep gawk coreutils openssl openssh
+                    nettools # for hostname
+                    procps # for pidof
+                    shadow # for useradd, usermod
+                    util-linux # for (u)mount, fdisk, sfdisk, mkswap
+                    parted
+                  ];
+    pythonPath = [ pythonPackages.pyasn1 ];
+
+    configurePhase = false;
+    buildPhase = false;
+
+    installPhase = ''
+      substituteInPlace config/99-azure-product-uuid.rules \
+          --replace /bin/chmod "${coreutils}/bin/chmod"
+      mkdir -p $out/lib/udev/rules.d
+      cp config/*.rules $out/lib/udev/rules.d
+
+      mkdir -p $out/bin
+      cp waagent $out/bin/
+      chmod +x $out/bin/waagent
+
+      wrapProgram "$out/bin/waagent" \
+          --prefix PYTHONPATH : $PYTHONPATH \
+          --prefix PATH : "${makeBinPath runtimeDeps}"
+    '';
+  };
+
+  provisionedHook = pkgs.writeScript "provisioned-hook" ''
+    #!${pkgs.runtimeShell}
+    /run/current-system/systemd/bin/systemctl start provisioned.target
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.azure.agent = {
+    enable = mkOption {
+      default = false;
+      description = "Whether to enable the Windows Azure Linux Agent.";
+    };
+    verboseLogging = mkOption {
+      default = false;
+      description = "Whether to enable verbose logging.";
+    };
+    mountResourceDisk = mkOption {
+      default = true;
+      description = "Whether the agent should format (ext4) and mount the resource disk to /mnt/resource.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.hostPlatform.isx86;
+      message = "Azure not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.networking.networkmanager.enable == false;
+      message = "Windows Azure Linux Agent is not compatible with NetworkManager";
+    } ];
+
+    boot.initrd.kernelModules = [ "ata_piix" ];
+    networking.firewall.allowedUDPPorts = [ 68 ];
+
+
+    environment.etc."waagent.conf".text = ''
+        #
+        # Windows Azure Linux Agent Configuration
+        #
+
+        Role.StateConsumer=${provisionedHook}
+
+        # Enable instance creation
+        Provisioning.Enabled=y
+
+        # Password authentication for root account will be unavailable.
+        Provisioning.DeleteRootPassword=n
+
+        # Generate fresh host key pair.
+        Provisioning.RegenerateSshHostKeyPair=n
+
+        # Supported values are "rsa", "dsa" and "ecdsa".
+        Provisioning.SshHostKeyPairType=ed25519
+
+        # Monitor host name changes and publish changes via DHCP requests.
+        Provisioning.MonitorHostName=y
+
+        # Decode CustomData from Base64.
+        Provisioning.DecodeCustomData=n
+
+        # Execute CustomData after provisioning.
+        Provisioning.ExecuteCustomData=n
+
+        # Format if unformatted. If 'n', resource disk will not be mounted.
+        ResourceDisk.Format=${if cfg.mountResourceDisk then "y" else "n"}
+
+        # File system on the resource disk
+        # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.
+        ResourceDisk.Filesystem=ext4
+
+        # Mount point for the resource disk
+        ResourceDisk.MountPoint=/mnt/resource
+
+        # Respond to load balancer probes if requested by Windows Azure.
+        LBProbeResponder=y
+
+        # Enable logging to serial console (y|n)
+        # When stdout is not enough...
+        # 'y' if not set
+        Logs.Console=y
+
+        # Enable verbose logging (y|n)
+        Logs.Verbose=${if cfg.verboseLogging then "y" else "n"}
+
+        # Root device timeout in seconds.
+        OS.RootDeviceScsiTimeout=300
+    '';
+
+    services.udev.packages = [ waagent ];
+
+    networking.dhcpcd.persistent = true;
+
+    services.logrotate = {
+      enable = true;
+      extraConfig = ''
+        /var/log/waagent.log {
+            compress
+            monthly
+            rotate 6
+            notifempty
+            missingok
+        }
+      '';
+    };
+
+    systemd.targets.provisioned = {
+      description = "Services Requiring Azure VM provisioning to have finished";
+    };
+
+  systemd.services.consume-hypervisor-entropy =
+    { description = "Consume entropy in ACPI table provided by Hyper-V";
+
+      wantedBy = [ "sshd.service" "waagent.service" ];
+      before = [ "sshd.service" "waagent.service" ];
+
+      path  = [ pkgs.coreutils ];
+      script =
+        ''
+          echo "Fetching entropy..."
+          cat /sys/firmware/acpi/tables/OEM0 > /dev/random
+        '';
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      serviceConfig.StandardError = "journal+console";
+      serviceConfig.StandardOutput = "journal+console";
+     };
+
+    systemd.services.waagent = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "sshd.service" ];
+      wants = [ "network-online.target" ];
+
+      path = [ pkgs.e2fsprogs pkgs.bash ];
+      description = "Windows Azure Agent Service";
+      unitConfig.ConditionPathExists = "/etc/waagent.conf";
+      serviceConfig = {
+        ExecStart = "${waagent}/bin/waagent -daemon";
+        Type = "simple";
+      };
+    };
+
+  };
+
+}
diff --git a/nixos/modules/virtualisation/azure-bootstrap-blobs.nix b/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
new file mode 100644
index 00000000000..281be9a1231
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
@@ -0,0 +1,3 @@
+{
+    "16.03" = "https://nixos.blob.core.windows.net/images/nixos-image-16.03.847.8688c17-x86_64-linux.vhd";
+}
diff --git a/nixos/modules/virtualisation/azure-common.nix b/nixos/modules/virtualisation/azure-common.nix
new file mode 100644
index 00000000000..8efa177e30d
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-common.nix
@@ -0,0 +1,66 @@
+{ lib, pkgs, ... }:
+
+with lib;
+{
+  imports = [ ../profiles/headless.nix ];
+
+  require = [ ./azure-agent.nix ];
+  virtualisation.azure.agent.enable = true;
+
+  boot.kernelParams = [ "console=ttyS0" "earlyprintk=ttyS0" "rootdelay=300" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "hv_vmbus" "hv_netvsc" "hv_utils" "hv_storvsc" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.grub.version = 2;
+  boot.loader.timeout = 0;
+
+  boot.growPartition = true;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  fileSystems."/".device = "/dev/disk/by-label/nixos";
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time, ping client connections to avoid timeouts
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+  services.openssh.extraConfig = ''
+    ClientAliveInterval 180
+  '';
+
+  # Force getting the hostname from Azure
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  # sg_scan is needed to finalize disk removal on older kernels
+  environment.systemPackages = [ pkgs.cryptsetup pkgs.sg3_utils ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  services.udev.extraRules = ''
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:0", ATTR{removable}=="0", SYMLINK+="disk/by-lun/0",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:1", ATTR{removable}=="0", SYMLINK+="disk/by-lun/1",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:2", ATTR{removable}=="0", SYMLINK+="disk/by-lun/2"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:3", ATTR{removable}=="0", SYMLINK+="disk/by-lun/3"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:4", ATTR{removable}=="0", SYMLINK+="disk/by-lun/4"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:5", ATTR{removable}=="0", SYMLINK+="disk/by-lun/5"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:6", ATTR{removable}=="0", SYMLINK+="disk/by-lun/6"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:7", ATTR{removable}=="0", SYMLINK+="disk/by-lun/7"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:8", ATTR{removable}=="0", SYMLINK+="disk/by-lun/8"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:9", ATTR{removable}=="0", SYMLINK+="disk/by-lun/9"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:10", ATTR{removable}=="0", SYMLINK+="disk/by-lun/10"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:11", ATTR{removable}=="0", SYMLINK+="disk/by-lun/11"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:12", ATTR{removable}=="0", SYMLINK+="disk/by-lun/12"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:13", ATTR{removable}=="0", SYMLINK+="disk/by-lun/13"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:14", ATTR{removable}=="0", SYMLINK+="disk/by-lun/14"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:15", ATTR{removable}=="0", SYMLINK+="disk/by-lun/15"
+
+  '';
+
+}
diff --git a/nixos/modules/virtualisation/azure-config-user.nix b/nixos/modules/virtualisation/azure-config-user.nix
new file mode 100644
index 00000000000..267ba50ae02
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-config-user.nix
@@ -0,0 +1,12 @@
+{ modulesPath, ... }:
+
+{
+  # To build the configuration or use nix-env, you need to run
+  # either nixos-rebuild --upgrade or nix-channel --update
+  # to fetch the nixos channel.
+
+  # This configures everything but bootstrap services,
+  # which only need to be run once and have already finished
+  # if you are able to see this comment.
+  imports = [ "${modulesPath}/virtualisation/azure-common.nix" ];
+}
diff --git a/nixos/modules/virtualisation/azure-config.nix b/nixos/modules/virtualisation/azure-config.nix
new file mode 100644
index 00000000000..780bd1b78dc
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/azure-image.nix" ];
+}
diff --git a/nixos/modules/virtualisation/azure-image.nix b/nixos/modules/virtualisation/azure-image.nix
new file mode 100644
index 00000000000..03dd3c05130
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-image.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.azureImage;
+in
+{
+  imports = [ ./azure-common.nix ];
+
+  options = {
+    virtualisation.azureImage.diskSize = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = "auto";
+      example = 2048;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+  };
+  config = {
+    system.build.azureImage = import ../../lib/make-disk-image.nix {
+      name = "azure-image";
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=fixed,force_size -O vpc $diskImage $out/disk.vhd
+        rm $diskImage
+      '';
+      configFile = ./azure-config-user.nix;
+      format = "raw";
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+    # Azure metadata is available as a CD-ROM drive.
+    fileSystems."/metadata".device = "/dev/sr0";
+
+    systemd.services.fetch-ssh-keys = {
+      description = "Fetch host keys and authorized_keys for root user";
+
+      wantedBy = [ "sshd.service" "waagent.service" ];
+      before = [ "sshd.service" "waagent.service" ];
+
+      path  = [ pkgs.coreutils ];
+      script =
+        ''
+          eval "$(cat /metadata/CustomData.bin)"
+          if ! [ -z "$ssh_host_ecdsa_key" ]; then
+            echo "downloaded ssh_host_ecdsa_key"
+            echo "$ssh_host_ecdsa_key" > /etc/ssh/ssh_host_ed25519_key
+            chmod 600 /etc/ssh/ssh_host_ed25519_key
+          fi
+
+          if ! [ -z "$ssh_host_ecdsa_key_pub" ]; then
+            echo "downloaded ssh_host_ecdsa_key_pub"
+            echo "$ssh_host_ecdsa_key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+            chmod 644 /etc/ssh/ssh_host_ed25519_key.pub
+          fi
+
+          if ! [ -z "$ssh_root_auth_key" ]; then
+            echo "downloaded ssh_root_auth_key"
+            mkdir -m 0700 -p /root/.ssh
+            echo "$ssh_root_auth_key" > /root/.ssh/authorized_keys
+            chmod 600 /root/.ssh/authorized_keys
+          fi
+        '';
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+      serviceConfig.StandardError = "journal+console";
+      serviceConfig.StandardOutput = "journal+console";
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/azure-images.nix b/nixos/modules/virtualisation/azure-images.nix
new file mode 100644
index 00000000000..22c82fc14f6
--- /dev/null
+++ b/nixos/modules/virtualisation/azure-images.nix
@@ -0,0 +1,5 @@
+let self = {
+  "16.09" = "https://nixos.blob.core.windows.net/images/nixos-image-16.09.1694.019dcc3-x86_64-linux.vhd";
+
+  latest = self."16.09";
+}; in self
diff --git a/nixos/modules/virtualisation/brightbox-config.nix b/nixos/modules/virtualisation/brightbox-config.nix
new file mode 100644
index 00000000000..0a018e4cd69
--- /dev/null
+++ b/nixos/modules/virtualisation/brightbox-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/brightbox-image.nix" ];
+}
diff --git a/nixos/modules/virtualisation/brightbox-image.nix b/nixos/modules/virtualisation/brightbox-image.nix
new file mode 100644
index 00000000000..9641b693f18
--- /dev/null
+++ b/nixos/modules/virtualisation/brightbox-image.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  diskSize = "20G";
+in
+{
+  imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
+
+  system.build.brightboxImage =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "brightbox-image"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/$diskImageBase
+              truncate $diskImage --size ${diskSize}
+              mv closure xchg/
+            '';
+
+          postVM =
+            ''
+              PATH=$PATH:${lib.makeBinPath [ pkgs.gnutar pkgs.gzip ]}
+              pushd $out
+              ${pkgs.qemu_kvm}/bin/qemu-img convert -c -O qcow2 $diskImageBase nixos.qcow2
+              rm $diskImageBase
+              popd
+            '';
+          diskImageBase = "nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw";
+          buildInputs = [ pkgs.util-linux pkgs.perl ];
+          exportReferencesGraph =
+            [ "closure" config.system.build.toplevel ];
+        }
+        ''
+          # Create partition table
+          ${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
+          ${pkgs.parted}/sbin/parted --script /dev/vda mkpart primary ext4 1 ${diskSize}
+          ${pkgs.parted}/sbin/parted --script /dev/vda print
+          . /sys/class/block/vda1/uevent
+          mknod /dev/vda1 b $MAJOR $MINOR
+
+          # Create an empty filesystem and mount it.
+          ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
+          ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
+
+          mkdir /mnt
+          mount /dev/vda1 /mnt
+
+          # The initrd expects these directories to exist.
+          mkdir /mnt/dev /mnt/proc /mnt/sys
+
+          mount --bind /proc /mnt/proc
+          mount --bind /dev /mnt/dev
+          mount --bind /sys /mnt/sys
+
+          # Copy all paths in the closure to the filesystem.
+          storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
+
+          mkdir -p /mnt/nix/store
+          echo "copying everything (will take a while)..."
+          cp -prd $storePaths /mnt/nix/store/
+
+          # Register the paths in the Nix database.
+          printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
+              chroot /mnt ${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
+
+          # Create the system profile to allow nixos-rebuild to work.
+          chroot /mnt ${config.nix.package.out}/bin/nix-env \
+              -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel} \
+              --option build-users-group ""
+
+          # `nixos-rebuild' requires an /etc/NIXOS.
+          mkdir -p /mnt/etc
+          touch /mnt/etc/NIXOS
+
+          # `switch-to-configuration' requires a /bin/sh
+          mkdir -p /mnt/bin
+          ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
+
+          # Install a configuration.nix.
+          mkdir -p /mnt/etc/nixos /mnt/boot/grub
+          cp ${./brightbox-config.nix} /mnt/etc/nixos/configuration.nix
+
+          # Generate the GRUB menu.
+          ln -s vda /dev/sda
+          chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /mnt/proc /mnt/dev /mnt/sys
+          umount /mnt
+        ''
+    );
+
+  fileSystems."/".label = "nixos";
+
+  # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+  boot.loader.grub.device = "/dev/vda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time.
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  systemd.services.fetch-ec2-data =
+    { description = "Fetch EC2 Data";
+
+      wantedBy = [ "multi-user.target" "sshd.service" ];
+      before = [ "sshd.service" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      path = [ pkgs.wget pkgs.iproute2 ];
+
+      script =
+        ''
+          wget="wget -q --retry-connrefused -O -"
+
+          ${optionalString (config.networking.hostName == "") ''
+            echo "setting host name..."
+            ${pkgs.nettools}/bin/hostname $($wget http://169.254.169.254/latest/meta-data/hostname)
+          ''}
+
+          # Don't download the SSH key if it has already been injected
+          # into the image (a Nova feature).
+          if ! [ -e /root/.ssh/authorized_keys ]; then
+              echo "obtaining SSH key..."
+              mkdir -m 0700 -p /root/.ssh
+              $wget http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key > /root/key.pub
+              if [ $? -eq 0 -a -e /root/key.pub ]; then
+                  if ! grep -q -f /root/key.pub /root/.ssh/authorized_keys; then
+                      cat /root/key.pub >> /root/.ssh/authorized_keys
+                      echo "new key added to authorized_keys"
+                  fi
+                  chmod 600 /root/.ssh/authorized_keys
+                  rm -f /root/key.pub
+              fi
+          fi
+
+          # Extract the intended SSH host key for this machine from
+          # the supplied user data, if available.  Otherwise sshd will
+          # generate one normally.
+          $wget http://169.254.169.254/2011-01-01/user-data > /root/user-data || true
+          key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' /root/user-data)"
+          key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' /root/user-data)"
+          if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+              mkdir -m 0755 -p /etc/ssh
+              (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+              echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+          fi
+        '';
+
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+    };
+
+}
diff --git a/nixos/modules/virtualisation/build-vm.nix b/nixos/modules/virtualisation/build-vm.nix
new file mode 100644
index 00000000000..4a4694950f9
--- /dev/null
+++ b/nixos/modules/virtualisation/build-vm.nix
@@ -0,0 +1,58 @@
+{ config, extendModules, lib, ... }:
+let
+
+  inherit (lib)
+    mkOption
+    ;
+
+  vmVariant = extendModules {
+    modules = [ ./qemu-vm.nix ];
+  };
+
+  vmVariantWithBootLoader = vmVariant.extendModules {
+    modules = [
+      ({ config, ... }: {
+        _file = "nixos/default.nix##vmWithBootLoader";
+        virtualisation.useBootLoader = true;
+        virtualisation.useEFIBoot =
+          config.boot.loader.systemd-boot.enable ||
+          config.boot.loader.efi.canTouchEfiVariables;
+      })
+    ];
+  };
+in
+{
+  options = {
+
+    virtualisation.vmVariant = mkOption {
+      description = ''
+        Machine configuration to be added for the vm script produced by <literal>nixos-rebuild build-vm</literal>.
+      '';
+      inherit (vmVariant) type;
+      default = {};
+      visible = "shallow";
+    };
+
+    virtualisation.vmVariantWithBootLoader = mkOption {
+      description = ''
+        Machine configuration to be added for the vm script produced by <literal>nixos-rebuild build-vm-with-bootloader</literal>.
+      '';
+      inherit (vmVariantWithBootLoader) type;
+      default = {};
+      visible = "shallow";
+    };
+
+  };
+
+  config = {
+
+    system.build = {
+      vm = lib.mkDefault config.virtualisation.vmVariant.system.build.vm;
+      vmWithBootLoader = lib.mkDefault config.virtualisation.vmVariantWithBootLoader.system.build.vm;
+    };
+
+  };
+
+  # uses extendModules
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixos/modules/virtualisation/cloudstack-config.nix b/nixos/modules/virtualisation/cloudstack-config.nix
new file mode 100644
index 00000000000..78afebdc5dd
--- /dev/null
+++ b/nixos/modules/virtualisation/cloudstack-config.nix
@@ -0,0 +1,40 @@
+{ lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=tty0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+    };
+
+    # Cloud-init configuration.
+    services.cloud-init.enable = true;
+    # Wget is needed for setting password. This is of little use as
+    # root password login is disabled above.
+    environment.systemPackages = [ pkgs.wget ];
+    # Only enable CloudStack datasource for faster boot speed.
+    environment.etc."cloud/cloud.cfg.d/99_cloudstack.cfg".text = ''
+      datasource:
+        CloudStack: {}
+        None: {}
+      datasource_list: ["CloudStack"]
+    '';
+  };
+}
diff --git a/nixos/modules/virtualisation/container-config.nix b/nixos/modules/virtualisation/container-config.nix
new file mode 100644
index 00000000000..0966ef84827
--- /dev/null
+++ b/nixos/modules/virtualisation/container-config.nix
@@ -0,0 +1,31 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  config = mkIf config.boot.isContainer {
+
+    # Disable some features that are not useful in a container.
+    nix.optimise.automatic = mkDefault false; # the store is host managed
+    services.udisks2.enable = mkDefault false;
+    powerManagement.enable = mkDefault false;
+    documentation.nixos.enable = mkDefault false;
+
+    networking.useHostResolvConf = mkDefault true;
+
+    # Containers should be light-weight, so start sshd on demand.
+    services.openssh.startWhenNeeded = mkDefault true;
+
+    # Shut up warnings about not having a boot loader.
+    system.build.installBootLoader = lib.mkDefault "${pkgs.coreutils}/bin/true";
+
+    # Not supported in systemd-nspawn containers.
+    security.audit.enable = false;
+
+    # Use the host's nix-daemon.
+    environment.variables.NIX_REMOTE = "daemon";
+
+  };
+
+}
diff --git a/nixos/modules/virtualisation/containerd.nix b/nixos/modules/virtualisation/containerd.nix
new file mode 100644
index 00000000000..ea89a994b17
--- /dev/null
+++ b/nixos/modules/virtualisation/containerd.nix
@@ -0,0 +1,101 @@
+{ pkgs, lib, config, ... }:
+let
+  cfg = config.virtualisation.containerd;
+
+  configFile = if cfg.configFile == null then
+    settingsFormat.generate "containerd.toml" cfg.settings
+  else
+    cfg.configFile;
+
+  containerdConfigChecked = pkgs.runCommand "containerd-config-checked.toml" {
+    nativeBuildInputs = [ pkgs.containerd ];
+  } ''
+    containerd -c ${configFile} config dump >/dev/null
+    ln -s ${configFile} $out
+  '';
+
+  settingsFormat = pkgs.formats.toml {};
+in
+{
+
+  options.virtualisation.containerd = with lib.types; {
+    enable = lib.mkEnableOption "containerd container runtime";
+
+    configFile = lib.mkOption {
+      default = null;
+      description = ''
+       Path to containerd config file.
+       Setting this option will override any configuration applied by the settings option.
+      '';
+      type = nullOr path;
+    };
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = ''
+        Verbatim lines to add to containerd.toml
+      '';
+    };
+
+    args = lib.mkOption {
+      default = {};
+      description = "extra args to append to the containerd cmdline";
+      type = attrsOf str;
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    warnings = lib.optional (cfg.configFile != null) ''
+      `virtualisation.containerd.configFile` is deprecated. use `virtualisation.containerd.settings` instead.
+    '';
+
+    virtualisation.containerd = {
+      args.config = toString containerdConfigChecked;
+      settings = {
+        version = 2;
+        plugins."io.containerd.grpc.v1.cri" = {
+         containerd.snapshotter =
+           lib.mkIf config.boot.zfs.enabled (lib.mkOptionDefault "zfs");
+         cni.bin_dir = lib.mkOptionDefault "${pkgs.cni-plugins}/bin";
+        };
+      };
+    };
+
+    environment.systemPackages = [ pkgs.containerd ];
+
+    systemd.services.containerd = {
+      description = "containerd - container runtime";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = with pkgs; [
+        containerd
+        runc
+        iptables
+      ] ++ lib.optional config.boot.zfs.enabled config.boot.zfs.package;
+      serviceConfig = {
+        ExecStart = ''${pkgs.containerd}/bin/containerd ${lib.concatStringsSep " " (lib.cli.toGNUCommandLine {} cfg.args)}'';
+        Delegate = "yes";
+        KillMode = "process";
+        Type = "notify";
+        Restart = "always";
+        RestartSec = "10";
+
+        # "limits" defined below are adopted from upstream: https://github.com/containerd/containerd/blob/master/containerd.service
+        LimitNPROC = "infinity";
+        LimitCORE = "infinity";
+        LimitNOFILE = "infinity";
+        TasksMax = "infinity";
+        OOMScoreAdjust = "-999";
+
+        StateDirectory = "containerd";
+        RuntimeDirectory = "containerd";
+        RuntimeDirectoryPreserve = "yes";
+      };
+      unitConfig = {
+        StartLimitBurst = "16";
+        StartLimitIntervalSec = "120s";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/containers.nix b/nixos/modules/virtualisation/containers.nix
new file mode 100644
index 00000000000..cea3d51d3ae
--- /dev/null
+++ b/nixos/modules/virtualisation/containers.nix
@@ -0,0 +1,156 @@
+{ config, lib, pkgs, utils, ... }:
+let
+  cfg = config.virtualisation.containers;
+
+  inherit (lib) literalExpression mkOption types;
+
+  toml = pkgs.formats.toml { };
+in
+{
+  meta = {
+    maintainers = [] ++ lib.teams.podman.members;
+  };
+
+
+  imports = [
+    (
+      lib.mkRemovedOptionModule
+      [ "virtualisation" "containers" "users" ]
+      "All users with `isNormalUser = true` set now get appropriate subuid/subgid mappings."
+    )
+    (
+      lib.mkRemovedOptionModule
+      [ "virtualisation" "containers" "containersConf" "extraConfig" ]
+      "Use virtualisation.containers.containersConf.settings instead."
+    )
+  ];
+
+  options.virtualisation.containers = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables the common /etc/containers configuration module.
+        '';
+      };
+
+    ociSeccompBpfHook.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Enable the OCI seccomp BPF hook";
+    };
+
+    containersConf.settings = mkOption {
+      type = toml.type;
+      default = { };
+      description = "containers.conf configuration";
+    };
+
+    containersConf.cniPlugins = mkOption {
+      type = types.listOf types.package;
+      defaultText = literalExpression ''
+        [
+          pkgs.cni-plugins
+        ]
+      '';
+      example = literalExpression ''
+        [
+          pkgs.cniPlugins.dnsname
+        ]
+      '';
+      description = ''
+        CNI plugins to install on the system.
+      '';
+    };
+
+    storage.settings = mkOption {
+      type = toml.type;
+      default = {
+        storage = {
+          driver = "overlay";
+          graphroot = "/var/lib/containers/storage";
+          runroot = "/run/containers/storage";
+        };
+      };
+      description = "storage.conf configuration";
+    };
+
+    registries = {
+      search = mkOption {
+        type = types.listOf types.str;
+        default = [ "docker.io" "quay.io" ];
+        description = ''
+          List of repositories to search.
+        '';
+      };
+
+      insecure = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of insecure repositories.
+        '';
+      };
+
+      block = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = ''
+          List of blocked repositories.
+        '';
+      };
+    };
+
+    policy = mkOption {
+      default = {};
+      type = types.attrs;
+      example = literalExpression ''
+        {
+          default = [ { type = "insecureAcceptAnything"; } ];
+          transports = {
+            docker-daemon = {
+              "" = [ { type = "insecureAcceptAnything"; } ];
+            };
+          };
+        }
+      '';
+      description = ''
+        Signature verification policy file.
+        If this option is empty the default policy file from
+        <literal>skopeo</literal> will be used.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    virtualisation.containers.containersConf.cniPlugins = [ pkgs.cni-plugins ];
+
+    virtualisation.containers.containersConf.settings = {
+      network.cni_plugin_dirs = map (p: "${lib.getBin p}/bin") cfg.containersConf.cniPlugins;
+      engine = {
+        init_path = "${pkgs.catatonit}/bin/catatonit";
+      } // lib.optionalAttrs cfg.ociSeccompBpfHook.enable {
+        hooks_dir = [ config.boot.kernelPackages.oci-seccomp-bpf-hook ];
+      };
+    };
+
+    environment.etc."containers/containers.conf".source =
+      toml.generate "containers.conf" cfg.containersConf.settings;
+
+    environment.etc."containers/storage.conf".source =
+      toml.generate "storage.conf" cfg.storage.settings;
+
+    environment.etc."containers/registries.conf".source = toml.generate "registries.conf" {
+      registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
+    };
+
+    environment.etc."containers/policy.json".source =
+      if cfg.policy != {} then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
+      else utils.copyFile "${pkgs.skopeo.src}/default-policy.json";
+  };
+
+}
diff --git a/nixos/modules/virtualisation/cri-o.nix b/nixos/modules/virtualisation/cri-o.nix
new file mode 100644
index 00000000000..cf511000150
--- /dev/null
+++ b/nixos/modules/virtualisation/cri-o.nix
@@ -0,0 +1,163 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.cri-o;
+
+  crioPackage = (pkgs.cri-o.override { inherit (cfg) extraPackages; });
+
+  format = pkgs.formats.toml { };
+
+  cfgFile = format.generate "00-default.conf" cfg.settings;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "virtualisation" "cri-o" "registries" ] [ "virtualisation" "containers" "registries" "search" ])
+  ];
+
+  meta = {
+    maintainers = teams.podman.members;
+  };
+
+  options.virtualisation.cri-o = {
+    enable = mkEnableOption "Container Runtime Interface for OCI (CRI-O)";
+
+    storageDriver = mkOption {
+      type = types.enum [ "btrfs" "overlay" "vfs" ];
+      default = "overlay";
+      description = "Storage driver to be used";
+    };
+
+    logLevel = mkOption {
+      type = types.enum [ "trace" "debug" "info" "warn" "error" "fatal" ];
+      default = "info";
+      description = "Log level to be used";
+    };
+
+    pauseImage = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Override the default pause image for pod sandboxes";
+      example = "k8s.gcr.io/pause:3.2";
+    };
+
+    pauseCommand = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Override the default pause command";
+      example = "/pause";
+    };
+
+    runtime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = "Override the default runtime";
+      example = "crun";
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = literalExpression ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = ''
+        Extra packages to be installed in the CRI-O wrapper.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = crioPackage;
+      defaultText = literalDocBook ''
+        <literal>pkgs.cri-o</literal> built with
+        <literal>config.${opt.extraPackages}</literal>.
+      '';
+      internal = true;
+      description = ''
+        The final CRI-O package (including extra packages).
+      '';
+    };
+
+    networkDir = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = "Override the network_dir option.";
+      internal = true;
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = ''
+        Configuration for cri-o, see
+        <link xlink:href="https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md"/>.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package pkgs.cri-tools ];
+
+    environment.etc."crictl.yaml".source = utils.copyFile "${pkgs.cri-o-unwrapped.src}/crictl.yaml";
+
+    virtualisation.cri-o.settings.crio = {
+      storage_driver = cfg.storageDriver;
+
+      image = {
+        pause_image = mkIf (cfg.pauseImage != null) cfg.pauseImage;
+        pause_command = mkIf (cfg.pauseCommand != null) cfg.pauseCommand;
+      };
+
+      network = {
+        plugin_dirs = [ "${pkgs.cni-plugins}/bin" ];
+        network_dir = mkIf (cfg.networkDir != null) cfg.networkDir;
+      };
+
+      runtime = {
+        cgroup_manager = "systemd";
+        log_level = cfg.logLevel;
+        manage_ns_lifecycle = true;
+        pinns_path = "${cfg.package}/bin/pinns";
+        hooks_dir =
+          optional (config.virtualisation.containers.ociSeccompBpfHook.enable)
+            config.boot.kernelPackages.oci-seccomp-bpf-hook;
+
+        default_runtime = mkIf (cfg.runtime != null) cfg.runtime;
+        runtimes = mkIf (cfg.runtime != null) {
+          "${cfg.runtime}" = { };
+        };
+      };
+    };
+
+    environment.etc."cni/net.d/10-crio-bridge.conf".source = utils.copyFile "${pkgs.cri-o-unwrapped.src}/contrib/cni/10-crio-bridge.conf";
+    environment.etc."cni/net.d/99-loopback.conf".source = utils.copyFile "${pkgs.cri-o-unwrapped.src}/contrib/cni/99-loopback.conf";
+    environment.etc."crio/crio.conf.d/00-default.conf".source = cfgFile;
+
+    # Enable common /etc/containers configuration
+    virtualisation.containers.enable = true;
+
+    systemd.services.crio = {
+      description = "Container Runtime Interface for OCI (CRI-O)";
+      documentation = [ "https://github.com/cri-o/cri-o" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/crio";
+        ExecReload = "/bin/kill -s HUP $MAINPID";
+        TasksMax = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "1048576";
+        LimitCORE = "infinity";
+        OOMScoreAdjust = "-999";
+        TimeoutStartSec = "0";
+        Restart = "on-abnormal";
+      };
+      restartTriggers = [ cfgFile ];
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/digital-ocean-config.nix b/nixos/modules/virtualisation/digital-ocean-config.nix
new file mode 100644
index 00000000000..88cb0cd450e
--- /dev/null
+++ b/nixos/modules/virtualisation/digital-ocean-config.nix
@@ -0,0 +1,197 @@
+{ config, pkgs, lib, modulesPath, ... }:
+with lib;
+{
+  imports = [
+    (modulesPath + "/profiles/qemu-guest.nix")
+    (modulesPath + "/virtualisation/digital-ocean-init.nix")
+  ];
+  options.virtualisation.digitalOcean = with types; {
+    setRootPassword = mkOption {
+      type = bool;
+      default = false;
+      example = true;
+      description = "Whether to set the root password from the Digital Ocean metadata";
+    };
+    setSshKeys = mkOption {
+      type = bool;
+      default = true;
+      example = true;
+      description = "Whether to fetch ssh keys from Digital Ocean";
+    };
+    seedEntropy = mkOption {
+      type = bool;
+      default = true;
+      example = true;
+      description = "Whether to run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
+    };
+  };
+  config =
+    let
+      cfg = config.virtualisation.digitalOcean;
+      hostName = config.networking.hostName;
+      doMetadataFile = "/run/do-metadata/v1.json";
+    in mkMerge [{
+      fileSystems."/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+      boot = {
+        growPartition = true;
+        kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+        initrd.kernelModules = [ "virtio_scsi" ];
+        kernelModules = [ "virtio_pci" "virtio_net" ];
+        loader = {
+          grub.device = "/dev/vda";
+          timeout = 0;
+          grub.configurationLimit = 0;
+        };
+      };
+      services.openssh = {
+        enable = mkDefault true;
+        passwordAuthentication = mkDefault false;
+      };
+      services.do-agent.enable = mkDefault true;
+      networking = {
+        hostName = mkDefault ""; # use Digital Ocean metadata server
+      };
+
+      /* Check for and wait for the metadata server to become reachable.
+       * This serves as a dependency for all the other metadata services. */
+      systemd.services.digitalocean-metadata = {
+        path = [ pkgs.curl ];
+        description = "Get host metadata provided by Digitalocean";
+        script = ''
+          set -eu
+          DO_DELAY_ATTEMPTS=0
+          while ! curl -fsSL -o $RUNTIME_DIRECTORY/v1.json http://169.254.169.254/metadata/v1.json; do
+            DO_DELAY_ATTEMPTS=$((DO_DELAY_ATTEMPTS + 1))
+            if (( $DO_DELAY_ATTEMPTS >= $DO_DELAY_ATTEMPTS_MAX )); then
+              echo "giving up"
+              exit 1
+            fi
+
+            echo "metadata unavailable, trying again in 1s..."
+            sleep 1
+          done
+          chmod 600 $RUNTIME_DIRECTORY/v1.json
+          '';
+        environment = {
+          DO_DELAY_ATTEMPTS_MAX = "10";
+        };
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          RuntimeDirectory = "do-metadata";
+          RuntimeDirectoryPreserve = "yes";
+        };
+        unitConfig = {
+          ConditionPathExists = "!${doMetadataFile}";
+          After = [ "network-pre.target" ] ++
+            optional config.networking.dhcpcd.enable "dhcpcd.service" ++
+            optional config.systemd.network.enable "systemd-networkd.service";
+        };
+      };
+
+      /* Fetch the root password from the digital ocean metadata.
+       * There is no specific route for this, so we use jq to get
+       * it from the One Big JSON metadata blob */
+      systemd.services.digitalocean-set-root-password = mkIf cfg.setRootPassword {
+        path = [ pkgs.shadow pkgs.jq ];
+        description = "Set root password provided by Digitalocean";
+        wantedBy = [ "multi-user.target" ];
+        script = ''
+          set -eo pipefail
+          ROOT_PASSWORD=$(jq -er '.auth_key' ${doMetadataFile})
+          echo "root:$ROOT_PASSWORD" | chpasswd
+          mkdir -p /etc/do-metadata/set-root-password
+          '';
+        unitConfig = {
+          ConditionPathExists = "!/etc/do-metadata/set-root-password";
+          Before = optional config.services.openssh.enable "sshd.service";
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+      /* Set the hostname from Digital Ocean, unless the user configured it in
+       * the NixOS configuration. The cached metadata file isn't used here
+       * because the hostname is a mutable part of the droplet. */
+      systemd.services.digitalocean-set-hostname = mkIf (hostName == "") {
+        path = [ pkgs.curl pkgs.nettools ];
+        description = "Set hostname provided by Digitalocean";
+        wantedBy = [ "network.target" ];
+        script = ''
+          set -e
+          DIGITALOCEAN_HOSTNAME=$(curl -fsSL http://169.254.169.254/metadata/v1/hostname)
+          hostname "$DIGITALOCEAN_HOSTNAME"
+          if [[ ! -e /etc/hostname || -w /etc/hostname ]]; then
+            printf "%s\n" "$DIGITALOCEAN_HOSTNAME" > /etc/hostname
+          fi
+        '';
+        unitConfig = {
+          Before = [ "network.target" ];
+          After = [ "digitalocean-metadata.service" ];
+          Wants = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+      /* Fetch the ssh keys for root from Digital Ocean */
+      systemd.services.digitalocean-ssh-keys = mkIf cfg.setSshKeys {
+        description = "Set root ssh keys provided by Digital Ocean";
+        wantedBy = [ "multi-user.target" ];
+        path = [ pkgs.jq ];
+        script = ''
+          set -e
+          mkdir -m 0700 -p /root/.ssh
+          jq -er '.public_keys[]' ${doMetadataFile} > /root/.ssh/authorized_keys
+          chmod 600 /root/.ssh/authorized_keys
+        '';
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+        unitConfig = {
+          ConditionPathExists = "!/root/.ssh/authorized_keys";
+          Before = optional config.services.openssh.enable "sshd.service";
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+      };
+
+      /* Initialize the RNG by running the entropy-seed script from the
+       * Digital Ocean metadata
+       */
+      systemd.services.digitalocean-entropy-seed = mkIf cfg.seedEntropy {
+        description = "Run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
+        wantedBy = [ "network.target" ];
+        path = [ pkgs.jq pkgs.mpack ];
+        script = ''
+          set -eo pipefail
+          TEMPDIR=$(mktemp -d)
+          jq -er '.vendor_data' ${doMetadataFile} | munpack -tC $TEMPDIR
+          ENTROPY_SEED=$(grep -rl "DigitalOcean Entropy Seed script" $TEMPDIR)
+          ${pkgs.runtimeShell} $ENTROPY_SEED
+          rm -rf $TEMPDIR
+          '';
+        unitConfig = {
+          Before = [ "network.target" ];
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+    }
+  ];
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+}
+
diff --git a/nixos/modules/virtualisation/digital-ocean-image.nix b/nixos/modules/virtualisation/digital-ocean-image.nix
new file mode 100644
index 00000000000..0ff2ee591f2
--- /dev/null
+++ b/nixos/modules/virtualisation/digital-ocean-image.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.digitalOceanImage;
+in
+{
+
+  imports = [ ./digital-ocean-config.nix ];
+
+  options = {
+    virtualisation.digitalOceanImage.diskSize = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = "auto";
+      example = 4096;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.digitalOceanImage.configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = ''
+        A path to a configuration file which will be placed at
+        <literal>/etc/nixos/configuration.nix</literal> and be used when switching
+        to a new configuration. If set to <literal>null</literal>, a default
+        configuration is used that imports
+        <literal>(modulesPath + "/virtualisation/digital-ocean-config.nix")</literal>.
+      '';
+    };
+
+    virtualisation.digitalOceanImage.compressionMethod = mkOption {
+      type = types.enum [ "gzip" "bzip2" ];
+      default = "gzip";
+      example = "bzip2";
+      description = ''
+        Disk image compression method. Choose bzip2 to generate smaller images that
+        take longer to generate but will consume less metered storage space on your
+        Digital Ocean account.
+      '';
+    };
+  };
+
+  #### implementation
+  config = {
+
+    system.build.digitalOceanImage = import ../../lib/make-disk-image.nix {
+      name = "digital-ocean-image";
+      format = "qcow2";
+      postVM = let
+        compress = {
+          "gzip" = "${pkgs.gzip}/bin/gzip";
+          "bzip2" = "${pkgs.bzip2}/bin/bzip2";
+        }.${cfg.compressionMethod};
+      in ''
+        ${compress} $diskImage
+      '';
+      configFile = if cfg.configFile == null
+        then config.virtualisation.digitalOcean.defaultConfigFile
+        else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+
+}
diff --git a/nixos/modules/virtualisation/digital-ocean-init.nix b/nixos/modules/virtualisation/digital-ocean-init.nix
new file mode 100644
index 00000000000..4339d91de16
--- /dev/null
+++ b/nixos/modules/virtualisation/digital-ocean-init.nix
@@ -0,0 +1,95 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.virtualisation.digitalOcean;
+  defaultConfigFile = pkgs.writeText "digitalocean-configuration.nix" ''
+    { modulesPath, lib, ... }:
+    {
+      imports = lib.optional (builtins.pathExists ./do-userdata.nix) ./do-userdata.nix ++ [
+        (modulesPath + "/virtualisation/digital-ocean-config.nix")
+      ];
+    }
+  '';
+in {
+  options.virtualisation.digitalOcean.rebuildFromUserData = mkOption {
+    type = types.bool;
+    default = true;
+    example = true;
+    description = "Whether to reconfigure the system from Digital Ocean user data";
+  };
+  options.virtualisation.digitalOcean.defaultConfigFile = mkOption {
+    type = types.path;
+    default = defaultConfigFile;
+    defaultText = literalDocBook ''
+      The default configuration imports user-data if applicable and
+      <literal>(modulesPath + "/virtualisation/digital-ocean-config.nix")</literal>.
+    '';
+    description = ''
+      A path to a configuration file which will be placed at
+      <literal>/etc/nixos/configuration.nix</literal> and be used when switching to
+      a new configuration.
+    '';
+  };
+
+  config = {
+    systemd.services.digitalocean-init = mkIf cfg.rebuildFromUserData {
+      description = "Reconfigure the system from Digital Ocean userdata on startup";
+      wantedBy = [ "network-online.target" ];
+      unitConfig = {
+        ConditionPathExists = "!/etc/nixos/do-userdata.nix";
+        After = [ "digitalocean-metadata.service" "network-online.target" ];
+        Requires = [ "digitalocean-metadata.service" ];
+        X-StopOnRemoval = false;
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+      restartIfChanged = false;
+      path = [ pkgs.jq pkgs.gnused pkgs.gnugrep pkgs.systemd config.nix.package config.system.build.nixos-rebuild ];
+      environment = {
+        HOME = "/root";
+        NIX_PATH = concatStringsSep ":" [
+          "/nix/var/nix/profiles/per-user/root/channels/nixos"
+          "nixos-config=/etc/nixos/configuration.nix"
+          "/nix/var/nix/profiles/per-user/root/channels"
+        ];
+      };
+      script = ''
+        set -e
+        echo "attempting to fetch configuration from Digital Ocean user data..."
+        userData=$(mktemp)
+        if jq -er '.user_data' /run/do-metadata/v1.json > $userData; then
+          # If the user-data looks like it could be a nix expression,
+          # copy it over. Also, look for a magic three-hash comment and set
+          # that as the channel.
+          if nix-instantiate --parse $userData > /dev/null; then
+            channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+            printf "%s" "$channels" | while read channel; do
+              echo "writing channel: $channel"
+            done
+
+            if [[ -n "$channels" ]]; then
+              printf "%s" "$channels" > /root/.nix-channels
+              nix-channel --update
+            fi
+
+            echo "setting configuration from Digital Ocean user data"
+            cp "$userData" /etc/nixos/do-userdata.nix
+            if [[ ! -e /etc/nixos/configuration.nix ]]; then
+              install -m0644 ${cfg.defaultConfigFile} /etc/nixos/configuration.nix
+            fi
+          else
+            echo "user data does not appear to be a Nix expression; ignoring"
+            exit
+          fi
+
+          nixos-rebuild switch
+        else
+          echo "no user data is available"
+        fi
+        '';
+    };
+  };
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+}
diff --git a/nixos/modules/virtualisation/docker-image.nix b/nixos/modules/virtualisation/docker-image.nix
new file mode 100644
index 00000000000..baac3a35a78
--- /dev/null
+++ b/nixos/modules/virtualisation/docker-image.nix
@@ -0,0 +1,57 @@
+{ ... }:
+
+{
+  imports = [
+    ../profiles/docker-container.nix # FIXME, shouldn't include something from profiles/
+  ];
+
+  boot.postBootCommands =
+    ''
+      # Set virtualisation to docker
+      echo "docker" > /run/systemd/container
+    '';
+
+  # Iptables do not work in Docker.
+  networking.firewall.enable = false;
+
+  # Socket activated ssh presents problem in Docker.
+  services.openssh.startWhenNeeded = false;
+}
+
+# Example usage:
+#
+## default.nix
+# let
+#   nixos = import <nixpkgs/nixos> {
+#     configuration = ./configuration.nix;
+#     system = "x86_64-linux";
+#   };
+# in
+# nixos.config.system.build.tarball
+#
+## configuration.nix
+# { pkgs, config, lib, ... }:
+# {
+#   imports = [
+#     <nixpkgs/nixos/modules/virtualisation/docker-image.nix>
+#     <nixpkgs/nixos/modules/installer/cd-dvd/channel.nix>
+#   ];
+#
+#   documentation.doc.enable = false;
+#
+#   environment.systemPackages = with pkgs; [
+#     bashInteractive
+#     cacert
+#     nix
+#   ];
+# }
+#
+## Run
+# Build the tarball:
+# $ nix-build default.nix
+# Load into docker:
+# $ docker import result/tarball/nixos-system-*.tar.xz nixos-docker
+# Boots into systemd
+# $ docker run --privileged -it nixos-docker /init
+# Log into the container
+# $ docker exec -it <container-name> /run/current-system/sw/bin/bash
diff --git a/nixos/modules/virtualisation/docker-rootless.nix b/nixos/modules/virtualisation/docker-rootless.nix
new file mode 100644
index 00000000000..d371f67ecdc
--- /dev/null
+++ b/nixos/modules/virtualisation/docker-rootless.nix
@@ -0,0 +1,102 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.docker.rootless;
+  proxy_env = config.networking.proxy.envVars;
+  settingsFormat = pkgs.formats.json {};
+  daemonSettingsFile = settingsFormat.generate "daemon.json" cfg.daemon.settings;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.docker.rootless = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        This option enables docker in a rootless mode, a daemon that manages
+        linux containers. To interact with the daemon, one needs to set
+        <command>DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock</command>.
+      '';
+    };
+
+    setSocketVariable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Point <command>DOCKER_HOST</command> to rootless Docker instance for
+        normal users by default.
+      '';
+    };
+
+    daemon.settings = mkOption {
+      type = settingsFormat.type;
+      default = { };
+      example = {
+        ipv6 = true;
+        "fixed-cidr-v6" = "fd00::/80";
+      };
+      description = ''
+        Configuration for docker daemon. The attributes are serialized to JSON used as daemon.conf.
+        See https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.docker;
+      defaultText = literalExpression "pkgs.docker";
+      type = types.package;
+      example = literalExpression "pkgs.docker-edge";
+      description = ''
+        Docker package to be used in the module.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    environment.extraInit = optionalString cfg.setSocketVariable ''
+      if [ -z "$DOCKER_HOST" -a -n "$XDG_RUNTIME_DIR" ]; then
+        export DOCKER_HOST="unix://$XDG_RUNTIME_DIR/docker.sock"
+      fi
+    '';
+
+    # Taken from https://github.com/moby/moby/blob/master/contrib/dockerd-rootless-setuptool.sh
+    systemd.user.services.docker = {
+      wantedBy = [ "default.target" ];
+      description = "Docker Application Container Engine (Rootless)";
+      # needs newuidmap from pkgs.shadow
+      path = [ "/run/wrappers" ];
+      environment = proxy_env;
+      unitConfig = {
+        # docker-rootless doesn't support running as root.
+        ConditionUser = "!root";
+        StartLimitInterval = "60s";
+      };
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/dockerd-rootless --config-file=${daemonSettingsFile}";
+        ExecReload = "${pkgs.procps}/bin/kill -s HUP $MAINPID";
+        TimeoutSec = 0;
+        RestartSec = 2;
+        Restart = "always";
+        StartLimitBurst = 3;
+        LimitNOFILE = "infinity";
+        LimitNPROC = "infinity";
+        LimitCORE = "infinity";
+        Delegate = true;
+        NotifyAccess = "all";
+        KillMode = "mixed";
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/virtualisation/docker.nix b/nixos/modules/virtualisation/docker.nix
new file mode 100644
index 00000000000..a69cbe55c78
--- /dev/null
+++ b/nixos/modules/virtualisation/docker.nix
@@ -0,0 +1,252 @@
+# Systemd services for docker.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.docker;
+  proxy_env = config.networking.proxy.envVars;
+  settingsFormat = pkgs.formats.json {};
+  daemonSettingsFile = settingsFormat.generate "daemon.json" cfg.daemon.settings;
+in
+
+{
+  ###### interface
+
+  options.virtualisation.docker = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            This option enables docker, a daemon that manages
+            linux containers. Users in the "docker" group can interact with
+            the daemon (e.g. to start or stop containers) using the
+            <command>docker</command> command line tool.
+          '';
+      };
+
+    listenOptions =
+      mkOption {
+        type = types.listOf types.str;
+        default = ["/run/docker.sock"];
+        description =
+          ''
+            A list of unix and tcp docker should listen to. The format follows
+            ListenStream as described in systemd.socket(5).
+          '';
+      };
+
+    enableOnBoot =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            When enabled dockerd is started on boot. This is required for
+            containers which are created with the
+            <literal>--restart=always</literal> flag to work. If this option is
+            disabled, docker might be started on demand by socket activation.
+          '';
+      };
+
+    daemon.settings =
+      mkOption {
+        type = settingsFormat.type;
+        default = { };
+        example = {
+          ipv6 = true;
+          "fixed-cidr-v6" = "fd00::/80";
+        };
+        description = ''
+          Configuration for docker daemon. The attributes are serialized to JSON used as daemon.conf.
+          See https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+        '';
+      };
+
+    enableNvidia =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
+        '';
+      };
+
+    liveRestore =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Allow dockerd to be restarted without affecting running container.
+            This option is incompatible with docker swarm.
+          '';
+      };
+
+    storageDriver =
+      mkOption {
+        type = types.nullOr (types.enum ["aufs" "btrfs" "devicemapper" "overlay" "overlay2" "zfs"]);
+        default = null;
+        description =
+          ''
+            This option determines which Docker storage driver to use. By default
+            it let's docker automatically choose preferred storage driver.
+          '';
+      };
+
+    logDriver =
+      mkOption {
+        type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs"];
+        default = "journald";
+        description =
+          ''
+            This option determines which Docker log driver to use.
+          '';
+      };
+
+    extraOptions =
+      mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description =
+          ''
+            The extra command-line options to pass to
+            <command>docker</command> daemon.
+          '';
+      };
+
+    autoPrune = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to periodically prune Docker resources. If enabled, a
+          systemd timer will run <literal>docker system prune -f</literal>
+          as specified by the <literal>dates</literal> option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--all" ];
+        description = ''
+          Any additional flags passed to <command>docker system prune</command>.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = ''
+          Specification (in the format described by
+          <citerefentry><refentrytitle>systemd.time</refentrytitle>
+          <manvolnum>7</manvolnum></citerefentry>) of the time at
+          which the prune will occur.
+        '';
+      };
+    };
+
+    package = mkOption {
+      default = pkgs.docker;
+      defaultText = literalExpression "pkgs.docker";
+      type = types.package;
+      example = literalExpression "pkgs.docker-edge";
+      description = ''
+        Docker package to be used in the module.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+      boot.kernelModules = [ "bridge" "veth" ];
+      boot.kernel.sysctl = {
+        "net.ipv4.conf.all.forwarding" = mkOverride 98 true;
+        "net.ipv4.conf.default.forwarding" = mkOverride 98 true;
+      };
+      environment.systemPackages = [ cfg.package ]
+        ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      users.groups.docker.gid = config.ids.gids.docker;
+      systemd.packages = [ cfg.package ];
+
+      systemd.services.docker = {
+        wantedBy = optional cfg.enableOnBoot "multi-user.target";
+        after = [ "network.target" "docker.socket" ];
+        requires = [ "docker.socket" ];
+        environment = proxy_env;
+        serviceConfig = {
+          Type = "notify";
+          ExecStart = [
+            ""
+            ''
+              ${cfg.package}/bin/dockerd \
+                --config-file=${daemonSettingsFile} \
+                ${cfg.extraOptions}
+            ''];
+          ExecReload=[
+            ""
+            "${pkgs.procps}/bin/kill -s HUP $MAINPID"
+          ];
+        };
+
+        path = [ pkgs.kmod ] ++ optional (cfg.storageDriver == "zfs") pkgs.zfs
+          ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      };
+
+      systemd.sockets.docker = {
+        description = "Docker Socket for the API";
+        wantedBy = [ "sockets.target" ];
+        socketConfig = {
+          ListenStream = cfg.listenOptions;
+          SocketMode = "0660";
+          SocketUser = "root";
+          SocketGroup = "docker";
+        };
+      };
+
+      systemd.services.docker-prune = {
+        description = "Prune docker resources";
+
+        restartIfChanged = false;
+        unitConfig.X-StopOnRemoval = false;
+
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          ${cfg.package}/bin/docker system prune -f ${toString cfg.autoPrune.flags}
+        '';
+
+        startAt = optional cfg.autoPrune.enable cfg.autoPrune.dates;
+      };
+
+      assertions = [
+        { assertion = cfg.enableNvidia -> config.hardware.opengl.driSupport32Bit or false;
+          message = "Option enableNvidia requires 32bit support libraries";
+        }];
+
+      virtualisation.docker.daemon.settings = {
+        group = "docker";
+        hosts = [ "fd://" ];
+        log-driver = mkDefault cfg.logDriver;
+        storage-driver = mkIf (cfg.storageDriver != null) (mkDefault cfg.storageDriver);
+        live-restore = mkDefault cfg.liveRestore;
+        runtimes = mkIf cfg.enableNvidia {
+          nvidia = {
+            path = "${pkgs.nvidia-docker}/bin/nvidia-container-runtime";
+          };
+        };
+      };
+    }
+  ]);
+
+  imports = [
+    (mkRemovedOptionModule ["virtualisation" "docker" "socketActivation"] "This option was removed and socket activation is now always active")
+  ];
+
+}
diff --git a/nixos/modules/virtualisation/ec2-amis.nix b/nixos/modules/virtualisation/ec2-amis.nix
new file mode 100644
index 00000000000..1ffb326ba7a
--- /dev/null
+++ b/nixos/modules/virtualisation/ec2-amis.nix
@@ -0,0 +1,9 @@
+# Compatibility shim
+let
+  lib = import ../../../lib;
+  inherit (lib) mapAttrs;
+  everything = import ./amazon-ec2-amis.nix;
+  doAllVersions = mapAttrs (versionName: doRegion);
+  doRegion = mapAttrs (regionName: systems: systems.x86_64-linux);
+in
+  doAllVersions everything
diff --git a/nixos/modules/virtualisation/ec2-data.nix b/nixos/modules/virtualisation/ec2-data.nix
new file mode 100644
index 00000000000..1b764e7e4d8
--- /dev/null
+++ b/nixos/modules/virtualisation/ec2-data.nix
@@ -0,0 +1,91 @@
+# This module defines a systemd service that sets the SSH host key and
+# authorized client key and host name of virtual machines running on
+# Amazon EC2, Eucalyptus and OpenStack Compute (Nova).
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "ec2" "metadata" ] "")
+  ];
+
+  config = {
+
+    systemd.services.apply-ec2-data =
+      { description = "Apply EC2 Data";
+
+        wantedBy = [ "multi-user.target" "sshd.service" ];
+        before = [ "sshd.service" ];
+
+        path = [ pkgs.iproute2 ];
+
+        script =
+          ''
+            ${optionalString (config.networking.hostName == "") ''
+              echo "setting host name..."
+              if [ -s /etc/ec2-metadata/hostname ]; then
+                  ${pkgs.nettools}/bin/hostname $(cat /etc/ec2-metadata/hostname)
+              fi
+            ''}
+
+            if ! [ -e /root/.ssh/authorized_keys ]; then
+                echo "obtaining SSH key..."
+                mkdir -m 0700 -p /root/.ssh
+                if [ -s /etc/ec2-metadata/public-keys-0-openssh-key ]; then
+                    cat /etc/ec2-metadata/public-keys-0-openssh-key >> /root/.ssh/authorized_keys
+                    echo "new key added to authorized_keys"
+                    chmod 600 /root/.ssh/authorized_keys
+                fi
+            fi
+
+            # Extract the intended SSH host key for this machine from
+            # the supplied user data, if available.  Otherwise sshd will
+            # generate one normally.
+            userData=/etc/ec2-metadata/user-data
+
+            mkdir -m 0755 -p /etc/ssh
+
+            if [ -s "$userData" ]; then
+              key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+              fi
+
+              key="$(sed 's/|/\n/g; s/SSH_HOST_ED25519_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_ED25519_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_ed25519_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_ed25519_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+              fi
+            fi
+          '';
+
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+    systemd.services.print-host-key =
+      { description = "Print SSH Host Key";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "sshd.service" ];
+        script =
+          ''
+            # Print the host public key on the console so that the user
+            # can obtain it securely by parsing the output of
+            # ec2-get-console-output.
+            echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
+            for i in /etc/ssh/ssh_host_*_key.pub; do
+                ${config.programs.ssh.package}/bin/ssh-keygen -l -f $i > /dev/console
+            done
+            echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
+          '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+  };
+}
diff --git a/nixos/modules/virtualisation/ec2-metadata-fetcher.nix b/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
new file mode 100644
index 00000000000..760f024f33f
--- /dev/null
+++ b/nixos/modules/virtualisation/ec2-metadata-fetcher.nix
@@ -0,0 +1,77 @@
+{ curl, targetRoot, wgetExtraOptions }:
+# Note: be very cautious about dependencies, each dependency grows
+# the closure of the initrd. Ideally we would not even require curl,
+# but there is no reasonable way to send an HTTP PUT request without
+# it. Note: do not be fooled: the wget referenced in this script
+# is busybox's wget, not the fully featured one with --method support.
+#
+# Make sure that every package you depend on here is already listed as
+# a channel blocker for both the full-sized and small channels.
+# Otherwise, we risk breaking user deploys in released channels.
+#
+# Also note: OpenStack's metadata service for its instances aims to be
+# compatible with the EC2 IMDS. Where possible, try to keep the set of
+# fetched metadata in sync with ./openstack-metadata-fetcher.nix .
+''
+  metaDir=${targetRoot}etc/ec2-metadata
+  mkdir -m 0755 -p "$metaDir"
+  rm -f "$metaDir/*"
+
+  get_imds_token() {
+    # retry-delay of 1 selected to give the system a second to get going,
+    # but not add a lot to the bootup time
+    ${curl}/bin/curl \
+      -v \
+      --retry 3 \
+      --retry-delay 1 \
+      --fail \
+      -X PUT \
+      --connect-timeout 1 \
+      -H "X-aws-ec2-metadata-token-ttl-seconds: 600" \
+      http://169.254.169.254/latest/api/token
+  }
+
+  preflight_imds_token() {
+    # retry-delay of 1 selected to give the system a second to get going,
+    # but not add a lot to the bootup time
+    ${curl}/bin/curl \
+      -v \
+      --retry 3 \
+      --retry-delay 1 \
+      --fail \
+      --connect-timeout 1 \
+      -H "X-aws-ec2-metadata-token: $IMDS_TOKEN" \
+      http://169.254.169.254/1.0/meta-data/instance-id
+  }
+
+  try=1
+  while [ $try -le 3 ]; do
+    echo "(attempt $try/3) getting an EC2 instance metadata service v2 token..."
+    IMDS_TOKEN=$(get_imds_token) && break
+    try=$((try + 1))
+    sleep 1
+  done
+
+  if [ "x$IMDS_TOKEN" == "x" ]; then
+    echo "failed to fetch an IMDS2v token."
+  fi
+
+  try=1
+  while [ $try -le 10 ]; do
+    echo "(attempt $try/10) validating the EC2 instance metadata service v2 token..."
+    preflight_imds_token && break
+    try=$((try + 1))
+    sleep 1
+  done
+
+  echo "getting EC2 instance metadata..."
+
+  wget_imds() {
+    wget ${wgetExtraOptions} --header "X-aws-ec2-metadata-token: $IMDS_TOKEN" "$@";
+  }
+
+  wget_imds -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
+  (umask 077 && wget_imds -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data)
+  wget_imds -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
+  wget_imds -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
+''
diff --git a/nixos/modules/virtualisation/ecs-agent.nix b/nixos/modules/virtualisation/ecs-agent.nix
new file mode 100644
index 00000000000..aa38a02ea08
--- /dev/null
+++ b/nixos/modules/virtualisation/ecs-agent.nix
@@ -0,0 +1,45 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ecs-agent;
+in {
+  options.services.ecs-agent = {
+    enable = mkEnableOption "Amazon ECS agent";
+
+    package = mkOption {
+      type = types.path;
+      description = "The ECS agent package to use";
+      default = pkgs.ecs-agent;
+      defaultText = literalExpression "pkgs.ecs-agent";
+    };
+
+    extra-environment = mkOption {
+      type = types.attrsOf types.str;
+      description = "The environment the ECS agent should run with. See the ECS agent documentation for keys that work here.";
+      default = {};
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # This service doesn't run if docker isn't running, and unlike potentially remote services like e.g., postgresql, docker has
+    # to be running locally so `docker.enable` will always be set if the ECS agent is enabled.
+    virtualisation.docker.enable = true;
+
+    systemd.services.ecs-agent = {
+      inherit (cfg.package.meta) description;
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = cfg.extra-environment;
+
+      script = ''
+        if [ ! -z "$ECS_DATADIR" ]; then
+          mkdir -p "$ECS_DATADIR"
+        fi
+        ${cfg.package}/bin/agent
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/gce-images.nix b/nixos/modules/virtualisation/gce-images.nix
new file mode 100644
index 00000000000..7b027619a44
--- /dev/null
+++ b/nixos/modules/virtualisation/gce-images.nix
@@ -0,0 +1,17 @@
+let self = {
+  "14.12" = "gs://nixos-cloud-images/nixos-14.12.471.1f09b77-x86_64-linux.raw.tar.gz";
+  "15.09" = "gs://nixos-cloud-images/nixos-15.09.425.7870f20-x86_64-linux.raw.tar.gz";
+  "16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz";
+  "17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
+  "18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
+  "18.09" = "gs://nixos-cloud-images/nixos-image-18.09.1228.a4c4cbb613c-x86_64-linux.raw.tar.gz";
+
+  # This format will be handled by the upcoming NixOPS 2.0 release.
+  # The old images based on a GS object are deprecated.
+  "20.09" = {
+    project = "nixos-cloud";
+    name = "nixos-image-20-09-3531-3858fbc08e6-x86-64-linux";
+  };
+
+  latest = self."20.09";
+}; in self
diff --git a/nixos/modules/virtualisation/google-compute-config.nix b/nixos/modules/virtualisation/google-compute-config.nix
new file mode 100644
index 00000000000..44d2a589511
--- /dev/null
+++ b/nixos/modules/virtualisation/google-compute-config.nix
@@ -0,0 +1,102 @@
+{ config, lib, pkgs, ... }:
+with lib;
+{
+  imports = [
+    ../profiles/headless.nix
+    ../profiles/qemu-guest.nix
+  ];
+
+
+  fileSystems."/" = {
+    fsType = "ext4";
+    device = "/dev/disk/by-label/nixos";
+    autoResize = true;
+  };
+
+  boot.growPartition = true;
+  boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "virtio_scsi" ];
+  boot.kernelModules = [ "virtio_pci" "virtio_net" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using SSH keys
+  # and disable password authentication in general
+  services.openssh.enable = true;
+  services.openssh.permitRootLogin = "prohibit-password";
+  services.openssh.passwordAuthentication = mkDefault false;
+
+  # enable OS Login. This also requires setting enable-oslogin=TRUE metadata on
+  # instance or project level
+  security.googleOsLogin.enable = true;
+
+  # Use GCE udev rules for dynamic disk volumes
+  services.udev.packages = [ pkgs.google-guest-configs ];
+  services.udev.path = [ pkgs.google-guest-configs ];
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  # Rely on GCP's firewall instead
+  networking.firewall.enable = mkDefault false;
+
+  # Configure default metadata hostnames
+  networking.extraHosts = ''
+    169.254.169.254 metadata.google.internal metadata
+  '';
+
+  networking.timeServers = [ "metadata.google.internal" ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  # GC has 1460 MTU
+  networking.interfaces.eth0.mtu = 1460;
+
+  systemd.packages = [ pkgs.google-guest-agent ];
+  systemd.services.google-guest-agent = {
+    wantedBy = [ "multi-user.target" ];
+    restartTriggers = [ config.environment.etc."default/instance_configs.cfg".source ];
+    path = lib.optional config.users.mutableUsers pkgs.shadow;
+  };
+  systemd.services.google-startup-scripts.wantedBy = [ "multi-user.target" ];
+  systemd.services.google-shutdown-scripts.wantedBy = [ "multi-user.target" ];
+
+  security.sudo.extraRules = mkIf config.users.mutableUsers [
+    { groups = [ "google-sudoers" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+  ];
+
+  users.groups.google-sudoers = mkIf config.users.mutableUsers { };
+
+  boot.extraModprobeConfig = lib.readFile "${pkgs.google-guest-configs}/etc/modprobe.d/gce-blacklist.conf";
+
+  environment.etc."sysctl.d/60-gce-network-security.conf".source = "${pkgs.google-guest-configs}/etc/sysctl.d/60-gce-network-security.conf";
+
+  environment.etc."default/instance_configs.cfg".text = ''
+    [Accounts]
+    useradd_cmd = useradd -m -s /run/current-system/sw/bin/bash -p * {user}
+
+    [Daemons]
+    accounts_daemon = ${boolToString config.users.mutableUsers}
+
+    [InstanceSetup]
+    # Make sure GCE image does not replace host key that NixOps sets.
+    set_host_keys = false
+
+    [MetadataScripts]
+    default_shell = ${pkgs.stdenv.shell}
+
+    [NetworkInterfaces]
+    dhclient_script = ${pkgs.google-guest-configs}/bin/google-dhclient-script
+    # We set up network interfaces declaratively.
+    setup = false
+  '';
+}
diff --git a/nixos/modules/virtualisation/google-compute-image.nix b/nixos/modules/virtualisation/google-compute-image.nix
new file mode 100644
index 00000000000..0c72696f802
--- /dev/null
+++ b/nixos/modules/virtualisation/google-compute-image.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.googleComputeImage;
+  defaultConfigFile = pkgs.writeText "configuration.nix" ''
+    { ... }:
+    {
+      imports = [
+        <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>
+      ];
+    }
+  '';
+in
+{
+
+  imports = [ ./google-compute-config.nix ];
+
+  options = {
+    virtualisation.googleComputeImage.diskSize = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = "auto";
+      example = 1536;
+      description = ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.googleComputeImage.configFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = ''
+        A path to a configuration file which will be placed at `/etc/nixos/configuration.nix`
+        and be used when switching to a new configuration.
+        If set to `null`, a default configuration is used, where the only import is
+        `<nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>`.
+      '';
+    };
+
+    virtualisation.googleComputeImage.compressionLevel = mkOption {
+      type = types.int;
+      default = 6;
+      description = ''
+        GZIP compression level of the resulting disk image (1-9).
+      '';
+    };
+  };
+
+  #### implementation
+  config = {
+
+    system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
+      name = "google-compute-image";
+      postVM = ''
+        PATH=$PATH:${with pkgs; lib.makeBinPath [ gnutar gzip ]}
+        pushd $out
+        mv $diskImage disk.raw
+        tar -Sc disk.raw | gzip -${toString cfg.compressionLevel} > \
+          nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw.tar.gz
+        rm $out/disk.raw
+        popd
+      '';
+      format = "raw";
+      configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+}
diff --git a/nixos/modules/virtualisation/grow-partition.nix b/nixos/modules/virtualisation/grow-partition.nix
new file mode 100644
index 00000000000..444c0bc1630
--- /dev/null
+++ b/nixos/modules/virtualisation/grow-partition.nix
@@ -0,0 +1,3 @@
+# This profile is deprecated, use boot.growPartition directly.
+builtins.trace "the profile <nixos/modules/virtualisation/grow-partition.nix> is deprecated, use boot.growPartition instead"
+{ }
diff --git a/nixos/modules/virtualisation/hyperv-guest.nix b/nixos/modules/virtualisation/hyperv-guest.nix
new file mode 100644
index 00000000000..fb6502644b8
--- /dev/null
+++ b/nixos/modules/virtualisation/hyperv-guest.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.hypervGuest;
+
+in {
+  options = {
+    virtualisation.hypervGuest = {
+      enable = mkEnableOption "Hyper-V Guest Support";
+
+      videoMode = mkOption {
+        type = types.str;
+        default = "1152x864";
+        example = "1024x768";
+        description = ''
+          Resolution at which to initialize the video adapter.
+
+          Supports screen resolution up to Full HD 1920x1080 with 32 bit color
+          on Windows Server 2012, and 1600x1200 with 16 bit color on Windows
+          Server 2008 R2 or earlier.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot = {
+      initrd.kernelModules = [
+        "hv_balloon" "hv_netvsc" "hv_storvsc" "hv_utils" "hv_vmbus"
+      ];
+
+      initrd.availableKernelModules = [ "hyperv_keyboard" ];
+
+      kernelParams = [
+        "video=hyperv_fb:${cfg.videoMode}" "elevator=noop"
+      ];
+    };
+
+    environment.systemPackages = [ config.boot.kernelPackages.hyperv-daemons.bin ];
+
+    # enable hotadding cpu/memory
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "hyperv-cpu-and-memory-hotadd-udev-rules";
+      destination = "/etc/udev/rules.d/99-hyperv-cpu-and-memory-hotadd.rules";
+      text = ''
+        # Memory hotadd
+        SUBSYSTEM=="memory", ACTION=="add", DEVPATH=="/devices/system/memory/memory[0-9]*", TEST=="state", ATTR{state}="online"
+
+        # CPU hotadd
+        SUBSYSTEM=="cpu", ACTION=="add", DEVPATH=="/devices/system/cpu/cpu[0-9]*", TEST=="online", ATTR{online}="1"
+      '';
+    });
+
+    systemd = {
+      packages = [ config.boot.kernelPackages.hyperv-daemons.lib ];
+
+      services.hv-vss.unitConfig.ConditionPathExists = [ "/dev/vmbus/hv_vss" ];
+
+      targets.hyperv-daemons = {
+        wantedBy = [ "multi-user.target" ];
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/hyperv-image.nix b/nixos/modules/virtualisation/hyperv-image.nix
new file mode 100644
index 00000000000..6845d675009
--- /dev/null
+++ b/nixos/modules/virtualisation/hyperv-image.nix
@@ -0,0 +1,71 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.hyperv;
+
+in {
+  options = {
+    hyperv = {
+      baseImageSize = mkOption {
+        type = with types; either (enum [ "auto" ]) int;
+        default = "auto";
+        example = 2048;
+        description = ''
+          The size of the hyper-v base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-hyperv-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the hyper-v appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vhdx";
+        description = ''
+          The file name of the hyper-v appliance.
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.hypervImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=dynamic -O vhdx $diskImage $out/${cfg.vmFileName}
+        rm $diskImage
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.hypervGuest.enable = true;
+  };
+}
diff --git a/nixos/modules/virtualisation/kubevirt.nix b/nixos/modules/virtualisation/kubevirt.nix
new file mode 100644
index 00000000000..408822b6af0
--- /dev/null
+++ b/nixos/modules/virtualisation/kubevirt.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=ttyS0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    services.qemuGuest.enable = true;
+    services.openssh.enable = true;
+    services.cloud-init.enable = true;
+    systemd.services."serial-getty@ttyS0".enable = true;
+
+    system.build.kubevirtImage = import ../../lib/make-disk-image.nix {
+      inherit lib config pkgs;
+      format = "qcow2";
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/kvmgt.nix b/nixos/modules/virtualisation/kvmgt.nix
new file mode 100644
index 00000000000..5e7a73bec90
--- /dev/null
+++ b/nixos/modules/virtualisation/kvmgt.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.kvmgt;
+
+  kernelPackages = config.boot.kernelPackages;
+
+  vgpuOptions = {
+    uuid = mkOption {
+      type = with types; listOf str;
+      description = "UUID(s) of VGPU device. You can generate one with <package>libossp_uuid</package>.";
+    };
+  };
+
+in {
+  options = {
+    virtualisation.kvmgt = {
+      enable = mkEnableOption ''
+        KVMGT (iGVT-g) VGPU support. Allows Qemu/KVM guests to share host's Intel integrated graphics card.
+        Currently only one graphical device can be shared. To allow users to access the device without root add them
+        to the kvm group: <literal>users.extraUsers.&lt;yourusername&gt;.extraGroups = [ "kvm" ];</literal>
+      '';
+      # multi GPU support is under the question
+      device = mkOption {
+        type = types.str;
+        default = "0000:00:02.0";
+        description = "PCI ID of graphics card. You can figure it with <command>ls /sys/class/mdev_bus</command>.";
+      };
+      vgpus = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule [ { options = vgpuOptions; } ]);
+        description = ''
+          Virtual GPUs to be used in Qemu. You can find devices via <command>ls /sys/bus/pci/devices/*/mdev_supported_types</command>
+          and find info about device via <command>cat /sys/bus/pci/devices/*/mdev_supported_types/i915-GVTg_V5_4/description</command>
+        '';
+        example = {
+          i915-GVTg_V5_8.uuid = [ "a297db4a-f4c2-11e6-90f6-d3b88d6c9525" ];
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast kernelPackages.kernel.version "4.16";
+      message = "KVMGT is not properly supported for kernels older than 4.16";
+    };
+
+    boot.kernelModules = [ "kvmgt" ];
+    boot.kernelParams = [ "i915.enable_gvt=1" ];
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="vfio", OWNER="root", GROUP="kvm"
+    '';
+
+    systemd = let
+      vgpus = listToAttrs (flatten (mapAttrsToList
+        (mdev: opt: map (id: nameValuePair "kvmgt-${id}" { inherit mdev; uuid = id; }) opt.uuid)
+        cfg.vgpus));
+    in {
+      paths = mapAttrs (_: opt:
+        {
+          description = "KVMGT VGPU ${opt.uuid} path";
+          wantedBy = [ "multi-user.target" ];
+          pathConfig = {
+            PathExists = "/sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${opt.mdev}/create";
+          };
+        }) vgpus;
+
+      services = mapAttrs (_: opt:
+        {
+          description = "KVMGT VGPU ${opt.uuid}";
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.runtimeShell} -c 'echo ${opt.uuid} > /sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${opt.mdev}/create'";
+            ExecStop = "${pkgs.runtimeShell} -c 'echo 1 > /sys/bus/pci/devices/${cfg.device}/${opt.uuid}/remove'";
+          };
+        }) vgpus;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ patryk27 ];
+}
diff --git a/nixos/modules/virtualisation/libvirtd.nix b/nixos/modules/virtualisation/libvirtd.nix
new file mode 100644
index 00000000000..ab87394a30e
--- /dev/null
+++ b/nixos/modules/virtualisation/libvirtd.nix
@@ -0,0 +1,392 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.libvirtd;
+  vswitch = config.virtualisation.vswitch;
+  configFile = pkgs.writeText "libvirtd.conf" ''
+    auth_unix_ro = "polkit"
+    auth_unix_rw = "polkit"
+    ${cfg.extraConfig}
+  '';
+  ovmfFilePrefix = if pkgs.stdenv.isAarch64 then "AAVMF" else "OVMF";
+  qemuConfigFile = pkgs.writeText "qemu.conf" ''
+    ${optionalString cfg.qemu.ovmf.enable ''
+      nvram = [ "/run/libvirt/nix-ovmf/${ovmfFilePrefix}_CODE.fd:/run/libvirt/nix-ovmf/${ovmfFilePrefix}_VARS.fd" ]
+    ''}
+    ${optionalString (!cfg.qemu.runAsRoot) ''
+      user = "qemu-libvirtd"
+      group = "qemu-libvirtd"
+    ''}
+    ${cfg.qemu.verbatimConfig}
+  '';
+  dirName = "libvirt";
+  subDirs = list: [ dirName ] ++ map (e: "${dirName}/${e}") list;
+
+  ovmfModule = types.submodule {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Allows libvirtd to take advantage of OVMF when creating new
+          QEMU VMs with UEFI boot.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.OVMF;
+        defaultText = literalExpression "pkgs.OVMF";
+        example = literalExpression "pkgs.OVMFFull";
+        description = ''
+          OVMF package to use.
+        '';
+      };
+    };
+  };
+
+  swtpmModule = types.submodule {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Allows libvirtd to use swtpm to create an emulated TPM.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.swtpm;
+        defaultText = literalExpression "pkgs.swtpm";
+        description = ''
+          swtpm package to use.
+        '';
+      };
+    };
+  };
+
+  qemuModule = types.submodule {
+    options = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.qemu;
+        defaultText = literalExpression "pkgs.qemu";
+        description = ''
+          Qemu package to use with libvirt.
+          `pkgs.qemu` can emulate alien architectures (e.g. aarch64 on x86)
+          `pkgs.qemu_kvm` saves disk space allowing to emulate only host architectures.
+        '';
+      };
+
+      runAsRoot = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          If true,  libvirtd runs qemu as root.
+          If false, libvirtd runs qemu as unprivileged user qemu-libvirtd.
+          Changing this option to false may cause file permission issues
+          for existing guests. To fix these, manually change ownership
+          of affected files in /var/lib/libvirt/qemu to qemu-libvirtd.
+        '';
+      };
+
+      verbatimConfig = mkOption {
+        type = types.lines;
+        default = ''
+          namespaces = []
+        '';
+        description = ''
+          Contents written to the qemu configuration file, qemu.conf.
+          Make sure to include a proper namespace configuration when
+          supplying custom configuration.
+        '';
+      };
+
+      ovmf = mkOption {
+        type = ovmfModule;
+        default = { };
+        description = ''
+          QEMU's OVMF options.
+        '';
+      };
+
+      swtpm = mkOption {
+        type = swtpmModule;
+        default = { };
+        description = ''
+          QEMU's swtpm options.
+        '';
+      };
+    };
+  };
+in
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "libvirtd" "enableKVM" ]
+      "Set the option `virtualisation.libvirtd.qemu.package' instead.")
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuPackage" ]
+      [ "virtualisation" "libvirtd" "qemu" "package" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuRunAsRoot" ]
+      [ "virtualisation" "libvirtd" "qemu" "runAsRoot" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuVerbatimConfig" ]
+      [ "virtualisation" "libvirtd" "qemu" "verbatimConfig" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuOvmf" ]
+      [ "virtualisation" "libvirtd" "qemu" "ovmf" "enable" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuOvmfPackage" ]
+      [ "virtualisation" "libvirtd" "qemu" "ovmf" "package" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuSwtpm" ]
+      [ "virtualisation" "libvirtd" "qemu" "swtpm" "enable" ])
+  ];
+
+  ###### interface
+
+  options.virtualisation.libvirtd = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        This option enables libvirtd, a daemon that manages
+        virtual machines. Users in the "libvirtd" group can interact with
+        the daemon (e.g. to start or stop VMs) using the
+        <command>virsh</command> command line tool, among others.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.libvirt;
+      defaultText = literalExpression "pkgs.libvirt";
+      description = ''
+        libvirt package to use.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = ''
+        Extra contents appended to the libvirtd configuration file,
+        libvirtd.conf.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--verbose" ];
+      description = ''
+        Extra command line arguments passed to libvirtd on startup.
+      '';
+    };
+
+    onBoot = mkOption {
+      type = types.enum [ "start" "ignore" ];
+      default = "start";
+      description = ''
+        Specifies the action to be done to / on the guests when the host boots.
+        The "start" option starts all guests that were running prior to shutdown
+        regardless of their autostart settings. The "ignore" option will not
+        start the formerly running guest on boot. However, any guest marked as
+        autostart will still be automatically started by libvirtd.
+      '';
+    };
+
+    onShutdown = mkOption {
+      type = types.enum [ "shutdown" "suspend" ];
+      default = "suspend";
+      description = ''
+        When shutting down / restarting the host what method should
+        be used to gracefully halt the guests. Setting to "shutdown"
+        will cause an ACPI shutdown of each guest. "suspend" will
+        attempt to save the state of the guests ready to restore on boot.
+      '';
+    };
+
+    allowedBridges = mkOption {
+      type = types.listOf types.str;
+      default = [ "virbr0" ];
+      description = ''
+        List of bridge devices that can be used by qemu:///session
+      '';
+    };
+
+    qemu = mkOption {
+      type = qemuModule;
+      default = { };
+      description = ''
+        QEMU related options.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = config.security.polkit.enable;
+        message = "The libvirtd module currently requires Polkit to be enabled ('security.polkit.enable = true').";
+      }
+      {
+        assertion = builtins.elem "fd" cfg.qemu.ovmf.package.outputs;
+        message = "The option 'virtualisation.libvirtd.qemuOvmfPackage' needs a package that has an 'fd' output.";
+      }
+    ];
+
+    environment = {
+      # this file is expected in /etc/qemu and not sysconfdir (/var/lib)
+      etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n"
+        (e:
+          "allow ${e}")
+        cfg.allowedBridges;
+      systemPackages = with pkgs; [ libressl.nc iptables cfg.package cfg.qemu.package ];
+      etc.ethertypes.source = "${pkgs.iptables}/etc/ethertypes";
+    };
+
+    boot.kernelModules = [ "tun" ];
+
+    users.groups.libvirtd.gid = config.ids.gids.libvirtd;
+
+    # libvirtd runs qemu as this user and group by default
+    users.extraGroups.qemu-libvirtd.gid = config.ids.gids.qemu-libvirtd;
+    users.extraUsers.qemu-libvirtd = {
+      uid = config.ids.uids.qemu-libvirtd;
+      isNormalUser = false;
+      group = "qemu-libvirtd";
+    };
+
+    security.wrappers.qemu-bridge-helper = {
+      setuid = true;
+      owner = "root";
+      group = "root";
+      source = "/run/${dirName}/nix-helpers/qemu-bridge-helper";
+    };
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.libvirtd-config = {
+      description = "Libvirt Virtual Machine Management Daemon - configuration";
+      script = ''
+        # Copy default libvirt network config .xml files to /var/lib
+        # Files modified by the user will not be overwritten
+        for i in $(cd ${cfg.package}/var/lib && echo \
+            libvirt/qemu/networks/*.xml libvirt/qemu/networks/autostart/*.xml \
+            libvirt/nwfilter/*.xml );
+        do
+            mkdir -p /var/lib/$(dirname $i) -m 755
+            cp -npd ${cfg.package}/var/lib/$i /var/lib/$i
+        done
+
+        # Copy generated qemu config to libvirt directory
+        cp -f ${qemuConfigFile} /var/lib/${dirName}/qemu.conf
+
+        # stable (not GC'able as in /nix/store) paths for using in <emulator> section of xml configs
+        for emulator in ${cfg.package}/libexec/libvirt_lxc ${cfg.qemu.package}/bin/qemu-kvm ${cfg.qemu.package}/bin/qemu-system-*; do
+          ln -s --force "$emulator" /run/${dirName}/nix-emulators/
+        done
+
+        for helper in libexec/qemu-bridge-helper bin/qemu-pr-helper; do
+          ln -s --force ${cfg.qemu.package}/$helper /run/${dirName}/nix-helpers/
+        done
+
+        ${optionalString cfg.qemu.ovmf.enable ''
+          ln -s --force ${cfg.qemu.ovmf.package.fd}/FV/${ovmfFilePrefix}_CODE.fd /run/${dirName}/nix-ovmf/
+          ln -s --force ${cfg.qemu.ovmf.package.fd}/FV/${ovmfFilePrefix}_VARS.fd /run/${dirName}/nix-ovmf/
+        ''}
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RuntimeDirectoryPreserve = "yes";
+        LogsDirectory = subDirs [ "qemu" ];
+        RuntimeDirectory = subDirs [ "nix-emulators" "nix-helpers" "nix-ovmf" ];
+        StateDirectory = subDirs [ "dnsmasq" ];
+      };
+    };
+
+    systemd.services.libvirtd = {
+      requires = [ "libvirtd-config.service" ];
+      after = [ "libvirtd-config.service" ]
+        ++ optional vswitch.enable "ovs-vswitchd.service";
+
+      environment.LIBVIRTD_ARGS = escapeShellArgs (
+        [
+          "--config"
+          configFile
+          "--timeout"
+          "120" # from ${libvirt}/var/lib/sysconfig/libvirtd
+        ] ++ cfg.extraOptions
+      );
+
+      path = [ cfg.qemu.package ] # libvirtd requires qemu-img to manage disk images
+        ++ optional vswitch.enable vswitch.package
+        ++ optional cfg.qemu.swtpm.enable cfg.qemu.swtpm.package;
+
+      serviceConfig = {
+        Type = "notify";
+        KillMode = "process"; # when stopping, leave the VMs alone
+        Restart = "no";
+      };
+      restartIfChanged = false;
+    };
+
+    systemd.services.libvirt-guests = {
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ coreutils gawk cfg.package ];
+      restartIfChanged = false;
+
+      environment.ON_BOOT = "${cfg.onBoot}";
+      environment.ON_SHUTDOWN = "${cfg.onShutdown}";
+    };
+
+    systemd.sockets.virtlogd = {
+      description = "Virtual machine log manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlogd-sock" ];
+    };
+
+    systemd.services.virtlogd = {
+      description = "Virtual machine log manager";
+      serviceConfig.ExecStart = "@${cfg.package}/sbin/virtlogd virtlogd";
+      restartIfChanged = false;
+    };
+
+    systemd.sockets.virtlockd = {
+      description = "Virtual machine lock manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlockd-sock" ];
+    };
+
+    systemd.services.virtlockd = {
+      description = "Virtual machine lock manager";
+      serviceConfig.ExecStart = "@${cfg.package}/sbin/virtlockd virtlockd";
+      restartIfChanged = false;
+    };
+
+    # https://libvirt.org/daemons.html#monolithic-systemd-integration
+    systemd.sockets.libvirtd.wantedBy = [ "sockets.target" ];
+
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id == "org.libvirt.unix.manage" &&
+          subject.isInGroup("libvirtd")) {
+          return polkit.Result.YES;
+        }
+      });
+    '';
+  };
+}
diff --git a/nixos/modules/virtualisation/lxc-container.nix b/nixos/modules/virtualisation/lxc-container.nix
new file mode 100644
index 00000000000..9816cc2332f
--- /dev/null
+++ b/nixos/modules/virtualisation/lxc-container.nix
@@ -0,0 +1,174 @@
+{ lib, config, pkgs, ... }:
+
+with lib;
+
+let
+  templateSubmodule = { ... }: {
+    options = {
+      enable = mkEnableOption "this template";
+
+      target = mkOption {
+        description = "Path in the container";
+        type = types.path;
+      };
+      template = mkOption {
+        description = ".tpl file for rendering the target";
+        type = types.path;
+      };
+      when = mkOption {
+        description = "Events which trigger a rewrite (create, copy)";
+        type = types.listOf (types.str);
+      };
+      properties = mkOption {
+        description = "Additional properties";
+        type = types.attrs;
+        default = {};
+      };
+    };
+  };
+
+  toYAML = name: data: pkgs.writeText name (generators.toYAML {} data);
+
+  cfg = config.virtualisation.lxc;
+  templates = if cfg.templates != {} then let
+    list = mapAttrsToList (name: value: { inherit name; } // value)
+      (filterAttrs (name: value: value.enable) cfg.templates);
+  in
+    {
+      files = map (tpl: {
+        source = tpl.template;
+        target = "/templates/${tpl.name}.tpl";
+      }) list;
+      properties = listToAttrs (map (tpl: nameValuePair tpl.target {
+        when = tpl.when;
+        template = "${tpl.name}.tpl";
+        properties = tpl.properties;
+      }) list);
+    }
+  else { files = []; properties = {}; };
+
+in
+{
+  imports = [
+    ../installer/cd-dvd/channel.nix
+    ../profiles/minimal.nix
+    ../profiles/clone-config.nix
+  ];
+
+  options = {
+    virtualisation.lxc = {
+      templates = mkOption {
+        description = "Templates for LXD";
+        type = types.attrsOf (types.submodule (templateSubmodule));
+        default = {};
+        example = literalExpression ''
+          {
+            # create /etc/hostname on container creation
+            "hostname" = {
+              enable = true;
+              target = "/etc/hostname";
+              template = builtins.writeFile "hostname.tpl" "{{ container.name }}";
+              when = [ "create" ];
+            };
+            # create /etc/nixos/hostname.nix with a configuration for keeping the hostname applied
+            "hostname-nix" = {
+              enable = true;
+              target = "/etc/nixos/hostname.nix";
+              template = builtins.writeFile "hostname-nix.tpl" "{ ... }: { networking.hostName = "{{ container.name }}"; }";
+              # copy keeps the file updated when the container is changed
+              when = [ "create" "copy" ];
+            };
+            # copy allow the user to specify a custom configuration.nix
+            "configuration-nix" = {
+              enable = true;
+              target = "/etc/nixos/configuration.nix";
+              template = builtins.writeFile "configuration-nix" "{{ config_get(\"user.user-data\", properties.default) }}";
+              when = [ "create" ];
+            };
+          };
+        '';
+      };
+    };
+  };
+
+  config = {
+    boot.isContainer = true;
+    boot.postBootCommands =
+      ''
+        # After booting, register the contents of the Nix store in the Nix
+        # database.
+        if [ -f /nix-path-registration ]; then
+          ${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration &&
+          rm /nix-path-registration
+        fi
+
+        # nixos-rebuild also requires a "system" profile
+        ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
+      '';
+
+    system.build.metadata = pkgs.callPackage ../../lib/make-system-tarball.nix {
+      contents = [
+        {
+          source = toYAML "metadata.yaml" {
+            architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.system)) 0;
+            creation_date = 1;
+            properties = {
+              description = "NixOS ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.system}";
+              os = "nixos";
+              release = "${config.system.nixos.codeName}";
+            };
+            templates = templates.properties;
+          };
+          target = "/metadata.yaml";
+        }
+      ] ++ templates.files;
+    };
+
+    # TODO: build rootfs as squashfs for faster unpack
+    system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
+      extraArgs = "--owner=0";
+
+      storeContents = [
+        {
+          object = config.system.build.toplevel;
+          symlink = "none";
+        }
+      ];
+
+      contents = [
+        {
+          source = config.system.build.toplevel + "/init";
+          target = "/sbin/init";
+        }
+      ];
+
+      extraCommands = "mkdir -p proc sys dev";
+    };
+
+    # Add the overrides from lxd distrobuilder
+    systemd.extraConfig = ''
+      [Service]
+      ProtectProc=default
+      ProtectControlGroups=no
+      ProtectKernelTunables=no
+    '';
+
+    # Allow the user to login as root without password.
+    users.users.root.initialHashedPassword = mkOverride 150 "";
+
+    system.activationScripts.installInitScript = mkForce ''
+      ln -fs $systemConfig/init /sbin/init
+    '';
+
+    # Some more help text.
+    services.getty.helpLine =
+      ''
+
+        Log in as "root" with an empty password.
+      '';
+
+    # Containers should be light-weight, so start sshd on demand.
+    services.openssh.enable = mkDefault true;
+    services.openssh.startWhenNeeded = mkDefault true;
+  };
+}
diff --git a/nixos/modules/virtualisation/lxc.nix b/nixos/modules/virtualisation/lxc.nix
new file mode 100644
index 00000000000..0f8b22a45df
--- /dev/null
+++ b/nixos/modules/virtualisation/lxc.nix
@@ -0,0 +1,86 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.lxc;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.lxc = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            This enables Linux Containers (LXC), which provides tools
+            for creating and managing system or application containers
+            on Linux.
+          '';
+      };
+
+    systemConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            This is the system-wide LXC config. See
+            <citerefentry><refentrytitle>lxc.system.conf</refentrytitle>
+            <manvolnum>5</manvolnum></citerefentry>.
+          '';
+      };
+
+    defaultConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            Default config (default.conf) for new containers, i.e. for
+            network config. See <citerefentry><refentrytitle>lxc.container.conf
+            </refentrytitle><manvolnum>5</manvolnum></citerefentry>.
+          '';
+      };
+
+    usernetConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          ''
+            This is the config file for managing unprivileged user network
+            administration access in LXC. See <citerefentry>
+            <refentrytitle>lxc-usernet</refentrytitle><manvolnum>5</manvolnum>
+            </citerefentry>.
+          '';
+      };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.lxc ];
+    environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
+    environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
+    environment.etc."lxc/default.conf".text = cfg.defaultConfig;
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor.packages = [ pkgs.lxc ];
+    security.apparmor.policies = {
+      "bin.lxc-start".profile = ''
+        include ${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start
+      '';
+      "lxc-containers".profile = ''
+        include ${pkgs.lxc}/etc/apparmor.d/lxc-containers
+      '';
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/lxcfs.nix b/nixos/modules/virtualisation/lxcfs.nix
new file mode 100644
index 00000000000..b2457403463
--- /dev/null
+++ b/nixos/modules/virtualisation/lxcfs.nix
@@ -0,0 +1,45 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.lxc.lxcfs;
+in {
+  meta.maintainers = [ maintainers.mic92 ];
+
+  ###### interface
+  options.virtualisation.lxc.lxcfs = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This enables LXCFS, a FUSE filesystem for LXC.
+          To use lxcfs in include the following configuration in your
+          container configuration:
+          <code>
+            virtualisation.lxc.defaultConfig = "lxc.include = ''${pkgs.lxcfs}/share/lxc/config/common.conf.d/00-lxcfs.conf";
+          </code>
+        '';
+      };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.lxcfs = {
+      description = "FUSE filesystem for LXC";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "lxc.service" ];
+      restartIfChanged = false;
+      serviceConfig = {
+        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/lib/lxcfs";
+        ExecStart="${pkgs.lxcfs}/bin/lxcfs /var/lib/lxcfs";
+        ExecStopPost="-${pkgs.fuse}/bin/fusermount -u /var/lib/lxcfs";
+        KillMode="process";
+        Restart="on-failure";
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/lxd.nix b/nixos/modules/virtualisation/lxd.nix
new file mode 100644
index 00000000000..18451b147ff
--- /dev/null
+++ b/nixos/modules/virtualisation/lxd.nix
@@ -0,0 +1,182 @@
+# Systemd services for lxd.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.lxd;
+in {
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
+  ];
+
+  ###### interface
+
+  options = {
+    virtualisation.lxd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables lxd, a daemon that manages
+          containers. Users in the "lxd" group can interact with
+          the daemon (e.g. to start or stop containers) using the
+          <command>lxc</command> command line tool, among others.
+
+          Most of the time, you'll also want to start lxcfs, so
+          that containers can "see" the limits:
+          <code>
+            virtualisation.lxc.lxcfs.enable = true;
+          </code>
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.lxd;
+        defaultText = literalExpression "pkgs.lxd";
+        description = ''
+          The LXD package to use.
+        '';
+      };
+
+      lxcPackage = mkOption {
+        type = types.package;
+        default = pkgs.lxc;
+        defaultText = literalExpression "pkgs.lxc";
+        description = ''
+          The LXC package to use with LXD (required for AppArmor profiles).
+        '';
+      };
+
+      zfsSupport = mkOption {
+        type = types.bool;
+        default = config.boot.zfs.enabled;
+        defaultText = literalExpression "config.boot.zfs.enabled";
+        description = ''
+          Enables lxd to use zfs as a storage for containers.
+
+          This option is enabled by default if a zfs pool is configured
+          with nixos.
+        '';
+      };
+
+      recommendedSysctlSettings = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enables various settings to avoid common pitfalls when
+          running containers requiring many file operations.
+          Fixes errors like "Too many open files" or
+          "neighbour: ndisc_cache: neighbor table overflow!".
+          See https://lxd.readthedocs.io/en/latest/production-setup/
+          for details.
+        '';
+      };
+
+      startTimeout = mkOption {
+        type = types.int;
+        default = 600;
+        apply = toString;
+        description = ''
+          Time to wait (in seconds) for LXD to become ready to process requests.
+          If LXD does not reply within the configured time, lxd.service will be
+          considered failed and systemd will attempt to restart it.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    # Note: the following options are also declared in virtualisation.lxc, but
+    # the latter can't be simply enabled to reuse the formers, because it
+    # does a bunch of unrelated things.
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor = {
+      packages = [ cfg.lxcPackage ];
+      policies = {
+        "bin.lxc-start".profile = ''
+          include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
+        '';
+        "lxc-containers".profile = ''
+          include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
+        '';
+      };
+    };
+
+    # TODO: remove once LXD gets proper support for cgroupsv2
+    # (currently most of the e.g. CPU accounting stuff doesn't work)
+    systemd.enableUnifiedCgroupHierarchy = false;
+
+    systemd.sockets.lxd = {
+      description = "LXD UNIX socket";
+      wantedBy = [ "sockets.target" ];
+
+      socketConfig = {
+        ListenStream = "/var/lib/lxd/unix.socket";
+        SocketMode = "0660";
+        SocketGroup = "lxd";
+        Service = "lxd.service";
+      };
+    };
+
+    systemd.services.lxd = {
+      description = "LXD Container Management Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "lxcfs.service" ];
+      requires = [ "network-online.target" "lxd.socket"  "lxcfs.service" ];
+      documentation = [ "man:lxd(1)" ];
+
+      path = optional cfg.zfsSupport config.boot.zfs.package;
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
+        ExecStartPost = "${cfg.package}/bin/lxd waitready --timeout=${cfg.startTimeout}";
+        ExecStop = "${cfg.package}/bin/lxd shutdown";
+
+        KillMode = "process"; # when stopping, leave the containers alone
+        LimitMEMLOCK = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "infinity";
+        TasksMax = "infinity";
+
+        Restart = "on-failure";
+        TimeoutStartSec = "${cfg.startTimeout}s";
+        TimeoutStopSec = "30s";
+
+        # By default, `lxd` loads configuration files from hard-coded
+        # `/usr/share/lxc/config` - since this is a no-go for us, we have to
+        # explicitly tell it where the actual configuration files are
+        Environment = mkIf (config.virtualisation.lxc.lxcfs.enable)
+          "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
+      };
+    };
+
+    users.groups.lxd = {};
+
+    users.users.root = {
+      subUidRanges = [ { startUid = 1000000; count = 65536; } ];
+      subGidRanges = [ { startGid = 1000000; count = 65536; } ];
+    };
+
+    boot.kernel.sysctl = mkIf cfg.recommendedSysctlSettings {
+      "fs.inotify.max_queued_events" = 1048576;
+      "fs.inotify.max_user_instances" = 1048576;
+      "fs.inotify.max_user_watches" = 1048576;
+      "vm.max_map_count" = 262144;
+      "kernel.dmesg_restrict" = 1;
+      "net.ipv4.neigh.default.gc_thresh3" = 8192;
+      "net.ipv6.neigh.default.gc_thresh3" = 8192;
+      "kernel.keys.maxkeys" = 2000;
+    };
+
+    boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" ]
+      ++ optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
+  };
+}
diff --git a/nixos/modules/virtualisation/nixos-containers.nix b/nixos/modules/virtualisation/nixos-containers.nix
new file mode 100644
index 00000000000..0838a57f0f3
--- /dev/null
+++ b/nixos/modules/virtualisation/nixos-containers.nix
@@ -0,0 +1,866 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  # The container's init script, a small wrapper around the regular
+  # NixOS stage-2 init script.
+  containerInit = (cfg:
+    let
+      renderExtraVeth = (name: cfg:
+        ''
+        echo "Bringing ${name} up"
+        ip link set dev ${name} up
+        ${optionalString (cfg.localAddress != null) ''
+          echo "Setting ip for ${name}"
+          ip addr add ${cfg.localAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.localAddress6 != null) ''
+          echo "Setting ip6 for ${name}"
+          ip -6 addr add ${cfg.localAddress6} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress != null) ''
+          echo "Setting route to host for ${name}"
+          ip route add ${cfg.hostAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress6 != null) ''
+          echo "Setting route6 to host for ${name}"
+          ip -6 route add ${cfg.hostAddress6} dev ${name}
+        ''}
+        ''
+        );
+    in
+      pkgs.writeScript "container-init"
+      ''
+        #! ${pkgs.runtimeShell} -e
+
+        # Exit early if we're asked to shut down.
+        trap "exit 0" SIGRTMIN+3
+
+        # Initialise the container side of the veth pair.
+        if [ -n "$HOST_ADDRESS" ]   || [ -n "$HOST_ADDRESS6" ]  ||
+           [ -n "$LOCAL_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS6" ] ||
+           [ -n "$HOST_BRIDGE" ]; then
+          ip link set host0 name eth0
+          ip link set dev eth0 up
+
+          if [ -n "$LOCAL_ADDRESS" ]; then
+            ip addr add $LOCAL_ADDRESS dev eth0
+          fi
+          if [ -n "$LOCAL_ADDRESS6" ]; then
+            ip -6 addr add $LOCAL_ADDRESS6 dev eth0
+          fi
+          if [ -n "$HOST_ADDRESS" ]; then
+            ip route add $HOST_ADDRESS dev eth0
+            ip route add default via $HOST_ADDRESS
+          fi
+          if [ -n "$HOST_ADDRESS6" ]; then
+            ip -6 route add $HOST_ADDRESS6 dev eth0
+            ip -6 route add default via $HOST_ADDRESS6
+          fi
+        fi
+
+        ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+
+        # Start the regular stage 2 script.
+        # We source instead of exec to not lose an early stop signal, which is
+        # also the only _reliable_ shutdown signal we have since early stop
+        # does not execute ExecStop* commands.
+        set +e
+        . "$1"
+      ''
+    );
+
+  nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}");
+
+  startScript = cfg:
+    ''
+      mkdir -p -m 0755 "$root/etc" "$root/var/lib"
+      mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers
+      if ! [ -e "$root/etc/os-release" ]; then
+        touch "$root/etc/os-release"
+      fi
+
+      if ! [ -e "$root/etc/machine-id" ]; then
+        touch "$root/etc/machine-id"
+      fi
+
+      mkdir -p -m 0755 \
+        "/nix/var/nix/profiles/per-container/$INSTANCE" \
+        "/nix/var/nix/gcroots/per-container/$INSTANCE"
+
+      cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf"
+
+      if [ "$PRIVATE_NETWORK" = 1 ]; then
+        extraFlags+=" --private-network"
+      fi
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        extraFlags+=" --network-veth"
+      fi
+
+      if [ -n "$HOST_PORT" ]; then
+        OIFS=$IFS
+        IFS=","
+        for i in $HOST_PORT
+        do
+            extraFlags+=" --port=$i"
+        done
+        IFS=$OIFS
+      fi
+
+      if [ -n "$HOST_BRIDGE" ]; then
+        extraFlags+=" --network-bridge=$HOST_BRIDGE"
+      fi
+
+      extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
+
+      for iface in $INTERFACES; do
+        extraFlags+=" --network-interface=$iface"
+      done
+
+      for iface in $MACVLANS; do
+        extraFlags+=" --network-macvlan=$iface"
+      done
+
+      # If the host is 64-bit and the container is 32-bit, add a
+      # --personality flag.
+      ${optionalString (config.nixpkgs.localSystem.system == "x86_64-linux") ''
+        if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
+          extraFlags+=" --personality=x86"
+        fi
+      ''}
+
+      # Run systemd-nspawn without startup notification (we'll
+      # wait for the container systemd to signal readiness)
+      # Kill signal handling means systemd-nspawn will pass a system-halt signal
+      # to the container systemd when it receives SIGTERM for container shutdown;
+      # containerInit and stage2 have to handle this as well.
+      exec ${config.systemd.package}/bin/systemd-nspawn \
+        --keep-unit \
+        -M "$INSTANCE" -D "$root" $extraFlags \
+        $EXTRA_NSPAWN_FLAGS \
+        --notify-ready=yes \
+        --kill-signal=SIGRTMIN+3 \
+        --bind-ro=/nix/store \
+        --bind-ro=/nix/var/nix/db \
+        --bind-ro=/nix/var/nix/daemon-socket \
+        --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
+        --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
+        ${optionalString (!cfg.ephemeral) "--link-journal=try-guest"} \
+        --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
+        --setenv HOST_BRIDGE="$HOST_BRIDGE" \
+        --setenv HOST_ADDRESS="$HOST_ADDRESS" \
+        --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \
+        --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \
+        --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \
+        --setenv HOST_PORT="$HOST_PORT" \
+        --setenv PATH="$PATH" \
+        ${optionalString cfg.ephemeral "--ephemeral"} \
+        ${if cfg.additionalCapabilities != null && cfg.additionalCapabilities != [] then
+          ''--capability="${concatStringsSep "," cfg.additionalCapabilities}"'' else ""
+        } \
+        ${if cfg.tmpfs != null && cfg.tmpfs != [] then
+          ''--tmpfs=${concatStringsSep " --tmpfs=" cfg.tmpfs}'' else ""
+        } \
+        ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init"
+    '';
+
+  preStartScript = cfg:
+    ''
+      # Clean up existing machined registration and interfaces.
+      machinectl terminate "$INSTANCE" 2> /dev/null || true
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        ip link del dev "ve-$INSTANCE" 2> /dev/null || true
+        ip link del dev "vb-$INSTANCE" 2> /dev/null || true
+      fi
+
+      ${concatStringsSep "\n" (
+        mapAttrsToList (name: cfg:
+          "ip link del dev ${name} 2> /dev/null || true "
+        ) cfg.extraVeths
+      )}
+   '';
+
+  postStartScript = (cfg:
+    let
+      ipcall = cfg: ipcmd: variable: attribute:
+        if cfg.${attribute} == null then
+          ''
+            if [ -n "${variable}" ]; then
+              ${ipcmd} add ${variable} dev $ifaceHost
+            fi
+          ''
+        else
+          "${ipcmd} add ${cfg.${attribute}} dev $ifaceHost";
+      renderExtraVeth = name: cfg:
+        if cfg.hostBridge != null then
+          ''
+            # Add ${name} to bridge ${cfg.hostBridge}
+            ip link set dev ${name} master ${cfg.hostBridge} up
+          ''
+        else
+          ''
+            echo "Bring ${name} up"
+            ip link set dev ${name} up
+            # Set IPs and routes for ${name}
+            ${optionalString (cfg.hostAddress != null) ''
+              ip addr add ${cfg.hostAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.hostAddress6 != null) ''
+              ip -6 addr add ${cfg.hostAddress6} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress != null) ''
+              ip route add ${cfg.localAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress6 != null) ''
+              ip -6 route add ${cfg.localAddress6} dev ${name}
+            ''}
+          '';
+    in
+      ''
+        if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+           [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+          if [ -z "$HOST_BRIDGE" ]; then
+            ifaceHost=ve-$INSTANCE
+            ip link set dev $ifaceHost up
+
+            ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"}
+            ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"}
+            ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"}
+            ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"}
+          fi
+        fi
+        ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+      ''
+  );
+
+  serviceDirectives = cfg: {
+    ExecReload = pkgs.writeScript "reload-container"
+      ''
+        #! ${pkgs.runtimeShell} -e
+        ${pkgs.nixos-container}/bin/nixos-container run "$INSTANCE" -- \
+          bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
+      '';
+
+    SyslogIdentifier = "container %i";
+
+    EnvironmentFile = "-/etc/containers/%i.conf";
+
+    Type = "notify";
+
+    RuntimeDirectory = lib.optional cfg.ephemeral "containers/%i";
+
+    # Note that on reboot, systemd-nspawn returns 133, so this
+    # unit will be restarted. On poweroff, it returns 0, so the
+    # unit won't be restarted.
+    RestartForceExitStatus = "133";
+    SuccessExitStatus = "133";
+
+    # Some containers take long to start
+    # especially when you automatically start many at once
+    TimeoutStartSec = cfg.timeoutStartSec;
+
+    Restart = "on-failure";
+
+    Slice = "machine.slice";
+    Delegate = true;
+
+    # We rely on systemd-nspawn turning a SIGTERM to itself into a shutdown
+    # signal (SIGRTMIN+3) for the inner container.
+    KillMode = "mixed";
+    KillSignal = "TERM";
+
+    DevicePolicy = "closed";
+    DeviceAllow = map (d: "${d.node} ${d.modifier}") cfg.allowedDevices;
+  };
+
+  system = config.nixpkgs.localSystem.system;
+  kernelVersion = config.boot.kernelPackages.kernel.version;
+
+  bindMountOpts = { name, ... }: {
+
+    options = {
+      mountPoint = mkOption {
+        example = "/mnt/usb";
+        type = types.str;
+        description = "Mount point on the container file system.";
+      };
+      hostPath = mkOption {
+        default = null;
+        example = "/home/alice";
+        type = types.nullOr types.str;
+        description = "Location of the host path to be mounted.";
+      };
+      isReadOnly = mkOption {
+        default = true;
+        type = types.bool;
+        description = "Determine whether the mounted path will be accessed in read-only mode.";
+      };
+    };
+
+    config = {
+      mountPoint = mkDefault name;
+    };
+
+  };
+
+  allowedDeviceOpts = { ... }: {
+    options = {
+      node = mkOption {
+        example = "/dev/net/tun";
+        type = types.str;
+        description = "Path to device node";
+      };
+      modifier = mkOption {
+        example = "rw";
+        type = types.str;
+        description = ''
+          Device node access modifier. Takes a combination
+          <literal>r</literal> (read), <literal>w</literal> (write), and
+          <literal>m</literal> (mknod). See the
+          <literal>systemd.resource-control(5)</literal> man page for more
+          information.'';
+      };
+    };
+  };
+
+  mkBindFlag = d:
+               let flagPrefix = if d.isReadOnly then " --bind-ro=" else " --bind=";
+                   mountstr = if d.hostPath != null then "${d.hostPath}:${d.mountPoint}" else "${d.mountPoint}";
+               in flagPrefix + mountstr ;
+
+  mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs);
+
+  networkOptions = {
+    hostBridge = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "br0";
+      description = ''
+        Put the host-side of the veth-pair into the named bridge.
+        Only one of hostAddress* or hostBridge can be given.
+      '';
+    };
+
+    forwardPorts = mkOption {
+      type = types.listOf (types.submodule {
+        options = {
+          protocol = mkOption {
+            type = types.str;
+            default = "tcp";
+            description = "The protocol specifier for port forwarding between host and container";
+          };
+          hostPort = mkOption {
+            type = types.int;
+            description = "Source port of the external interface on host";
+          };
+          containerPort = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            description = "Target port of container";
+          };
+        };
+      });
+      default = [];
+      example = [ { protocol = "tcp"; hostPort = 8080; containerPort = 80; } ];
+      description = ''
+        List of forwarded ports from host to container. Each forwarded port
+        is specified by protocol, hostPort and containerPort. By default,
+        protocol is tcp and hostPort and containerPort are assumed to be
+        the same if containerPort is not explicitly given.
+      '';
+    };
+
+
+    hostAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.1";
+      description = ''
+        The IPv4 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    hostAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::1";
+      description = ''
+        The IPv6 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    localAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.2";
+      description = ''
+        The IPv4 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /32 and routing is
+        set up from localAddress to hostAddress and back.
+      '';
+    };
+
+    localAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::2";
+      description = ''
+        The IPv6 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /128 and routing is
+        set up from localAddress6 to hostAddress6 and back.
+      '';
+    };
+
+  };
+
+  dummyConfig =
+    {
+      extraVeths = {};
+      additionalCapabilities = [];
+      ephemeral = false;
+      timeoutStartSec = "1min";
+      allowedDevices = [];
+      hostAddress = null;
+      hostAddress6 = null;
+      localAddress = null;
+      localAddress6 = null;
+      tmpfs = null;
+    };
+
+in
+
+{
+  options = {
+
+    boot.isContainer = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether this NixOS machine is a lightweight container running
+        in another NixOS system.
+      '';
+    };
+
+    boot.enableContainers = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether to enable support for NixOS containers. Defaults to true
+        (at no cost if containers are not actually used).
+      '';
+    };
+
+    containers = mkOption {
+      type = types.attrsOf (types.submodule (
+        { config, options, name, ... }:
+        {
+          options = {
+            config = mkOption {
+              description = ''
+                A specification of the desired configuration of this
+                container, as a NixOS module.
+              '';
+              type = lib.mkOptionType {
+                name = "Toplevel NixOS config";
+                merge = loc: defs: (import "${toString config.nixpkgs}/nixos/lib/eval-config.nix" {
+                  inherit system;
+                  modules =
+                    let
+                      extraConfig = {
+                        _file = "module at ${__curPos.file}:${toString __curPos.line}";
+                        config = {
+                          boot.isContainer = true;
+                          networking.hostName = mkDefault name;
+                          networking.useDHCP = false;
+                          assertions = [
+                            {
+                              assertion =
+                                (builtins.compareVersions kernelVersion "5.8" <= 0)
+                                -> config.privateNetwork
+                                -> stringLength name <= 11;
+                              message = ''
+                                Container name `${name}` is too long: When `privateNetwork` is enabled, container names can
+                                not be longer than 11 characters, because the container's interface name is derived from it.
+                                You should either make the container name shorter or upgrade to a more recent kernel that
+                                supports interface altnames (i.e. at least Linux 5.8 - please see https://github.com/NixOS/nixpkgs/issues/38509
+                                for details).
+                              '';
+                            }
+                          ];
+                        };
+                      };
+                    in [ extraConfig ] ++ (map (x: x.value) defs);
+                  prefix = [ "containers" name ];
+                }).config;
+              };
+            };
+
+            path = mkOption {
+              type = types.path;
+              example = "/nix/var/nix/profiles/per-container/webserver";
+              description = ''
+                As an alternative to specifying
+                <option>config</option>, you can specify the path to
+                the evaluated NixOS system configuration, typically a
+                symlink to a system profile.
+              '';
+            };
+
+            additionalCapabilities = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "CAP_NET_ADMIN" "CAP_MKNOD" ];
+              description = ''
+                Grant additional capabilities to the container.  See the
+                capabilities(7) and systemd-nspawn(1) man pages for more
+                information.
+              '';
+            };
+
+            nixpkgs = mkOption {
+              type = types.path;
+              default = pkgs.path;
+              defaultText = literalExpression "pkgs.path";
+              description = ''
+                A path to the nixpkgs that provide the modules, pkgs and lib for evaluating the container.
+
+                To only change the <literal>pkgs</literal> argument used inside the container modules,
+                set the <literal>nixpkgs.*</literal> options in the container <option>config</option>.
+                Setting <literal>config.nixpkgs.pkgs = pkgs</literal> speeds up the container evaluation
+                by reusing the system pkgs, but the <literal>nixpkgs.config</literal> option in the
+                container config is ignored in this case.
+              '';
+            };
+
+            ephemeral = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Runs container in ephemeral mode with the empty root filesystem at boot.
+                This way container will be bootstrapped from scratch on each boot
+                and will be cleaned up on shutdown leaving no traces behind.
+                Useful for completely stateless, reproducible containers.
+
+                Note that this option might require to do some adjustments to the container configuration,
+                e.g. you might want to set
+                <varname>systemd.network.networks.$interface.dhcpV4Config.ClientIdentifier</varname> to "mac"
+                if you use <varname>macvlans</varname> option.
+                This way dhcp client identifier will be stable between the container restarts.
+
+                Note that the container journal will not be linked to the host if this option is enabled.
+              '';
+            };
+
+            enableTun = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Allows the container to create and setup tunnel interfaces
+                by granting the <literal>NET_ADMIN</literal> capability and
+                enabling access to <literal>/dev/net/tun</literal>.
+              '';
+            };
+
+            privateNetwork = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether to give the container its own private virtual
+                Ethernet interface.  The interface is called
+                <literal>eth0</literal>, and is hooked up to the interface
+                <literal>ve-<replaceable>container-name</replaceable></literal>
+                on the host.  If this option is not set, then the
+                container shares the network interfaces of the host,
+                and can bind to any port on any interface.
+              '';
+            };
+
+            interfaces = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of interfaces to be moved into the container.
+              '';
+            };
+
+            macvlans = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = ''
+                The list of host interfaces from which macvlans will be
+                created. For each interface specified, a macvlan interface
+                will be created and moved to the container.
+              '';
+            };
+
+            extraVeths = mkOption {
+              type = with types; attrsOf (submodule { options = networkOptions; });
+              default = {};
+              description = ''
+                Extra veth-pairs to be created for the container.
+              '';
+            };
+
+            autoStart = mkOption {
+              type = types.bool;
+              default = false;
+              description = ''
+                Whether the container is automatically started at boot-time.
+              '';
+            };
+
+            timeoutStartSec = mkOption {
+              type = types.str;
+              default = "1min";
+              description = ''
+                Time for the container to start. In case of a timeout,
+                the container processes get killed.
+                See <citerefentry><refentrytitle>systemd.time</refentrytitle>
+                <manvolnum>7</manvolnum></citerefentry>
+                for more information about the format.
+               '';
+            };
+
+            bindMounts = mkOption {
+              type = with types; attrsOf (submodule bindMountOpts);
+              default = {};
+              example = literalExpression ''
+                { "/home" = { hostPath = "/home/alice";
+                              isReadOnly = false; };
+                }
+              '';
+
+              description =
+                ''
+                  An extra list of directories that is bound to the container.
+                '';
+            };
+
+            allowedDevices = mkOption {
+              type = with types; listOf (submodule allowedDeviceOpts);
+              default = [];
+              example = [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              description = ''
+                A list of device nodes to which the containers has access to.
+              '';
+            };
+
+            tmpfs = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "/var" ];
+              description = ''
+                Mounts a set of tmpfs file systems into the container.
+                Multiple paths can be specified.
+                Valid items must conform to the --tmpfs argument
+                of systemd-nspawn. See systemd-nspawn(1) for details.
+              '';
+            };
+
+            extraFlags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
+              description = ''
+                Extra flags passed to the systemd-nspawn command.
+                See systemd-nspawn(1) for details.
+              '';
+            };
+
+            # Removed option. See `checkAssertion` below for the accompanying error message.
+            pkgs = mkOption { visible = false; };
+          } // networkOptions;
+
+          config = let
+            # Throw an error when removed option `pkgs` is used.
+            # Because this is a submodule we cannot use `mkRemovedOptionModule` or option `assertions`.
+            optionPath = "containers.${name}.pkgs";
+            files = showFiles options.pkgs.files;
+            checkAssertion = if options.pkgs.isDefined then throw ''
+              The option definition `${optionPath}' in ${files} no longer has any effect; please remove it.
+
+              Alternatively, you can use the following options:
+              - containers.${name}.nixpkgs
+                This sets the nixpkgs (and thereby the modules, pkgs and lib) that
+                are used for evaluating the container.
+
+              - containers.${name}.config.nixpkgs.pkgs
+                This only sets the `pkgs` argument used inside the container modules.
+            ''
+            else null;
+          in {
+            path = builtins.seq checkAssertion
+              mkIf options.config.isDefined config.config.system.build.toplevel;
+          };
+        }));
+
+      default = {};
+      example = literalExpression
+        ''
+          { webserver =
+              { path = "/nix/var/nix/profiles/webserver";
+              };
+            database =
+              { config =
+                  { config, pkgs, ... }:
+                  { services.postgresql.enable = true;
+                    services.postgresql.package = pkgs.postgresql_10;
+
+                    system.stateVersion = "21.05";
+                  };
+              };
+          }
+        '';
+      description = ''
+        A set of NixOS system configurations to be run as lightweight
+        containers.  Each container appears as a service
+        <literal>container-<replaceable>name</replaceable></literal>
+        on the host system, allowing it to be started and stopped via
+        <command>systemctl</command>.
+      '';
+    };
+
+  };
+
+
+  config = mkIf (config.boot.enableContainers) (let
+
+    unit = {
+      description = "Container '%i'";
+
+      unitConfig.RequiresMountsFor = "/var/lib/containers/%i";
+
+      path = [ pkgs.iproute2 ];
+
+      environment = {
+        root = "/var/lib/containers/%i";
+        INSTANCE = "%i";
+      };
+
+      preStart = preStartScript dummyConfig;
+
+      script = startScript dummyConfig;
+
+      postStart = postStartScript dummyConfig;
+
+      restartIfChanged = false;
+
+      serviceConfig = serviceDirectives dummyConfig;
+    };
+  in {
+    systemd.targets.multi-user.wants = [ "machines.target" ];
+
+    systemd.services = listToAttrs (filter (x: x.value != null) (
+      # The generic container template used by imperative containers
+      [{ name = "container@"; value = unit; }]
+      # declarative containers
+      ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" (let
+          containerConfig = cfg // (
+          if cfg.enableTun then
+            {
+              allowedDevices = cfg.allowedDevices
+                ++ [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              additionalCapabilities = cfg.additionalCapabilities
+                ++ [ "CAP_NET_ADMIN" ];
+            }
+          else {});
+        in
+          recursiveUpdate unit {
+            preStart = preStartScript containerConfig;
+            script = startScript containerConfig;
+            postStart = postStartScript containerConfig;
+            serviceConfig = serviceDirectives containerConfig;
+            unitConfig.RequiresMountsFor = lib.optional (!containerConfig.ephemeral) "/var/lib/containers/%i";
+            environment.root = if containerConfig.ephemeral then "/run/containers/%i" else "/var/lib/containers/%i";
+          } // (
+          if containerConfig.autoStart then
+            {
+              wantedBy = [ "machines.target" ];
+              wants = [ "network.target" ];
+              after = [ "network.target" ];
+              restartTriggers = [
+                containerConfig.path
+                config.environment.etc."containers/${name}.conf".source
+              ];
+              restartIfChanged = true;
+            }
+          else {})
+      )) config.containers)
+    ));
+
+    # Generate a configuration file in /etc/containers for each
+    # container so that container@.target can get the container
+    # configuration.
+    environment.etc =
+      let mkPortStr = p: p.protocol + ":" + (toString p.hostPort) + ":" + (if p.containerPort == null then toString p.hostPort else toString p.containerPort);
+      in mapAttrs' (name: cfg: nameValuePair "containers/${name}.conf"
+      { text =
+          ''
+            SYSTEM_PATH=${cfg.path}
+            ${optionalString cfg.privateNetwork ''
+              PRIVATE_NETWORK=1
+              ${optionalString (cfg.hostBridge != null) ''
+                HOST_BRIDGE=${cfg.hostBridge}
+              ''}
+              ${optionalString (length cfg.forwardPorts > 0) ''
+                HOST_PORT=${concatStringsSep "," (map mkPortStr cfg.forwardPorts)}
+              ''}
+              ${optionalString (cfg.hostAddress != null) ''
+                HOST_ADDRESS=${cfg.hostAddress}
+              ''}
+              ${optionalString (cfg.hostAddress6 != null) ''
+                HOST_ADDRESS6=${cfg.hostAddress6}
+              ''}
+              ${optionalString (cfg.localAddress != null) ''
+                LOCAL_ADDRESS=${cfg.localAddress}
+              ''}
+              ${optionalString (cfg.localAddress6 != null) ''
+                LOCAL_ADDRESS6=${cfg.localAddress6}
+              ''}
+            ''}
+            INTERFACES="${toString cfg.interfaces}"
+            MACVLANS="${toString cfg.macvlans}"
+            ${optionalString cfg.autoStart ''
+              AUTO_START=1
+            ''}
+            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
+              optionalString (cfg.extraFlags != [])
+                (" " + concatStringsSep " " cfg.extraFlags)}"
+          '';
+      }) config.containers;
+
+    # Generate /etc/hosts entries for the containers.
+    networking.extraHosts = concatStrings (mapAttrsToList (name: cfg: optionalString (cfg.localAddress != null)
+      ''
+        ${head (splitString "/" cfg.localAddress)} ${name}.containers
+      '') config.containers);
+
+    networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
+
+    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
+      # Don't manage interfaces created by nixos-container.
+      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
+    '';
+
+    environment.systemPackages = [ pkgs.nixos-container ];
+
+    boot.kernelModules = [
+      "bridge"
+      "macvlan"
+      "tap"
+      "tun"
+    ];
+  });
+}
diff --git a/nixos/modules/virtualisation/oci-containers.nix b/nixos/modules/virtualisation/oci-containers.nix
new file mode 100644
index 00000000000..f4048172783
--- /dev/null
+++ b/nixos/modules/virtualisation/oci-containers.nix
@@ -0,0 +1,369 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.oci-containers;
+  proxy_env = config.networking.proxy.envVars;
+
+  defaultBackend = options.virtualisation.oci-containers.backend.default;
+
+  containerOptions =
+    { ... }: {
+
+      options = {
+
+        image = mkOption {
+          type = with types; str;
+          description = "OCI image to run.";
+          example = "library/hello-world";
+        };
+
+        imageFile = mkOption {
+          type = with types; nullOr package;
+          default = null;
+          description = ''
+            Path to an image file to load before running the image. This can
+            be used to bypass pulling the image from the registry.
+
+            The <literal>image</literal> attribute must match the name and
+            tag of the image contained in this file, as they will be used to
+            run the container with that image. If they do not match, the
+            image will be pulled from the registry as usual.
+          '';
+          example = literalExpression "pkgs.dockerTools.buildImage {...};";
+        };
+
+        login = {
+
+          username = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = "Username for login.";
+          };
+
+          passwordFile = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = "Path to file containing password.";
+            example = "/etc/nixos/dockerhub-password.txt";
+          };
+
+          registry = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = "Registry where to login to.";
+            example = "https://docker.pkg.github.com";
+          };
+
+        };
+
+        cmd = mkOption {
+          type =  with types; listOf str;
+          default = [];
+          description = "Commandline arguments to pass to the image's entrypoint.";
+          example = literalExpression ''
+            ["--port=9000"]
+          '';
+        };
+
+        entrypoint = mkOption {
+          type = with types; nullOr str;
+          description = "Override the default entrypoint of the image.";
+          default = null;
+          example = "/bin/my-app";
+        };
+
+        environment = mkOption {
+          type = with types; attrsOf str;
+          default = {};
+          description = "Environment variables to set for this container.";
+          example = literalExpression ''
+            {
+              DATABASE_HOST = "db.example.com";
+              DATABASE_PORT = "3306";
+            }
+        '';
+        };
+
+        environmentFiles = mkOption {
+          type = with types; listOf path;
+          default = [];
+          description = "Environment files for this container.";
+          example = literalExpression ''
+            [
+              /path/to/.env
+              /path/to/.env.secret
+            ]
+        '';
+        };
+
+        log-driver = mkOption {
+          type = types.str;
+          default = "journald";
+          description = ''
+            Logging driver for the container.  The default of
+            <literal>"journald"</literal> means that the container's logs will be
+            handled as part of the systemd unit.
+
+            For more details and a full list of logging drivers, refer to respective backends documentation.
+
+            For Docker:
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">Docker engine documentation</link>
+
+            For Podman:
+            Refer to the docker-run(1) man page.
+          '';
+        };
+
+        ports = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            Network ports to publish from the container to the outer host.
+
+            Valid formats:
+
+            <itemizedlist>
+              <listitem>
+                <para>
+                  <literal>&lt;ip&gt;:&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;ip&gt;::&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+              <listitem>
+                <para>
+                  <literal>&lt;containerPort&gt;</literal>
+                </para>
+              </listitem>
+            </itemizedlist>
+
+            Both <literal>hostPort</literal> and
+            <literal>containerPort</literal> can be specified as a range of
+            ports.  When specifying ranges for both, the number of container
+            ports in the range must match the number of host ports in the
+            range.  Example: <literal>1234-1236:1234-1236/tcp</literal>
+
+            When specifying a range for <literal>hostPort</literal> only, the
+            <literal>containerPort</literal> must <emphasis>not</emphasis> be a
+            range.  In this case, the container port is published somewhere
+            within the specified <literal>hostPort</literal> range.  Example:
+            <literal>1234-1236:1234/tcp</literal>
+
+            Refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#expose-incoming-ports">
+            Docker engine documentation</link> for full details.
+          '';
+          example = literalExpression ''
+            [
+              "8080:9000"
+            ]
+          '';
+        };
+
+        user = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = ''
+            Override the username or UID (and optionally groupname or GID) used
+            in the container.
+          '';
+          example = "nobody:nogroup";
+        };
+
+        volumes = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            List of volumes to attach to this container.
+
+            Note that this is a list of <literal>"src:dst"</literal> strings to
+            allow for <literal>src</literal> to refer to
+            <literal>/nix/store</literal> paths, which would be difficult with an
+            attribute set.  There are also a variety of mount options available
+            as a third field; please refer to the
+            <link xlink:href="https://docs.docker.com/engine/reference/run/#volume-shared-filesystems">
+            docker engine documentation</link> for details.
+          '';
+          example = literalExpression ''
+            [
+              "volume_name:/path/inside/container"
+              "/path/on/host:/path/inside/container"
+            ]
+          '';
+        };
+
+        workdir = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = "Override the default working directory for the container.";
+          example = "/var/lib/hello_world";
+        };
+
+        dependsOn = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = ''
+            Define which other containers this one depends on. They will be added to both After and Requires for the unit.
+
+            Use the same name as the attribute under <literal>virtualisation.oci-containers.containers</literal>.
+          '';
+          example = literalExpression ''
+            virtualisation.oci-containers.containers = {
+              node1 = {};
+              node2 = {
+                dependsOn = [ "node1" ];
+              }
+            }
+          '';
+        };
+
+        extraOptions = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = "Extra options for <command>${defaultBackend} run</command>.";
+          example = literalExpression ''
+            ["--network=host"]
+          '';
+        };
+
+        autoStart = mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            When enabled, the container is automatically started on boot.
+            If this option is set to false, the container has to be started on-demand via its service.
+          '';
+        };
+      };
+    };
+
+  isValidLogin = login: login.username != null && login.passwordFile != null && login.registry != null;
+
+  mkService = name: container: let
+    dependsOn = map (x: "${cfg.backend}-${x}.service") container.dependsOn;
+  in {
+    wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
+    after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ] ++ dependsOn;
+    requires = dependsOn;
+    environment = proxy_env;
+
+    path =
+      if cfg.backend == "docker" then [ config.virtualisation.docker.package ]
+      else if cfg.backend == "podman" then [ config.virtualisation.podman.package ]
+      else throw "Unhandled backend: ${cfg.backend}";
+
+    preStart = ''
+      ${cfg.backend} rm -f ${name} || true
+      ${optionalString (isValidLogin container.login) ''
+        cat ${container.login.passwordFile} | \
+          ${cfg.backend} login \
+            ${container.login.registry} \
+            --username ${container.login.username} \
+            --password-stdin
+        ''}
+      ${optionalString (container.imageFile != null) ''
+        ${cfg.backend} load -i ${container.imageFile}
+        ''}
+      '';
+
+    script = concatStringsSep " \\\n  " ([
+      "exec ${cfg.backend} run"
+      "--rm"
+      "--name=${escapeShellArg name}"
+      "--log-driver=${container.log-driver}"
+    ] ++ optional (container.entrypoint != null)
+      "--entrypoint=${escapeShellArg container.entrypoint}"
+      ++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
+      ++ map (f: "--env-file ${escapeShellArg f}") container.environmentFiles
+      ++ map (p: "-p ${escapeShellArg p}") container.ports
+      ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
+      ++ map (v: "-v ${escapeShellArg v}") container.volumes
+      ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
+      ++ map escapeShellArg container.extraOptions
+      ++ [container.image]
+      ++ map escapeShellArg container.cmd
+    );
+
+    preStop = "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}";
+    postStop = "${cfg.backend} rm -f ${name} || true";
+
+    serviceConfig = {
+      ### There is no generalized way of supporting `reload` for docker
+      ### containers. Some containers may respond well to SIGHUP sent to their
+      ### init process, but it is not guaranteed; some apps have other reload
+      ### mechanisms, some don't have a reload signal at all, and some docker
+      ### images just have broken signal handling.  The best compromise in this
+      ### case is probably to leave ExecReload undefined, so `systemctl reload`
+      ### will at least result in an error instead of potentially undefined
+      ### behaviour.
+      ###
+      ### Advanced users can still override this part of the unit to implement
+      ### a custom reload handler, since the result of all this is a normal
+      ### systemd service from the perspective of the NixOS module system.
+      ###
+      # ExecReload = ...;
+      ###
+
+      TimeoutStartSec = 0;
+      TimeoutStopSec = 120;
+      Restart = "always";
+    };
+  };
+
+in {
+  imports = [
+    (
+      lib.mkChangedOptionModule
+      [ "docker-containers"  ]
+      [ "virtualisation" "oci-containers" ]
+      (oldcfg: {
+        backend = "docker";
+        containers = lib.mapAttrs (n: v: builtins.removeAttrs (v // {
+          extraOptions = v.extraDockerOptions or [];
+        }) [ "extraDockerOptions" ]) oldcfg.docker-containers;
+      })
+    )
+  ];
+
+  options.virtualisation.oci-containers = {
+
+    backend = mkOption {
+      type = types.enum [ "podman" "docker" ];
+      default =
+        # TODO: Once https://github.com/NixOS/nixpkgs/issues/77925 is resolved default to podman
+        # if versionAtLeast config.system.stateVersion "20.09" then "podman"
+        # else "docker";
+        "docker";
+      description = "The underlying Docker implementation to use.";
+    };
+
+    containers = mkOption {
+      default = {};
+      type = types.attrsOf (types.submodule containerOptions);
+      description = "OCI (Docker) containers to run as systemd services.";
+    };
+
+  };
+
+  config = lib.mkIf (cfg.containers != {}) (lib.mkMerge [
+    {
+      systemd.services = mapAttrs' (n: v: nameValuePair "${cfg.backend}-${n}" (mkService n v)) cfg.containers;
+    }
+    (lib.mkIf (cfg.backend == "podman") {
+      virtualisation.podman.enable = true;
+    })
+    (lib.mkIf (cfg.backend == "docker") {
+      virtualisation.docker.enable = true;
+    })
+  ]);
+
+}
diff --git a/nixos/modules/virtualisation/openstack-config.nix b/nixos/modules/virtualisation/openstack-config.nix
new file mode 100644
index 00000000000..d01e0f23aba
--- /dev/null
+++ b/nixos/modules/virtualisation/openstack-config.nix
@@ -0,0 +1,58 @@
+{ pkgs, lib, ... }:
+
+with lib;
+
+let
+  metadataFetcher = import ./openstack-metadata-fetcher.nix {
+    targetRoot = "/";
+    wgetExtraOptions = "--retry-connrefused";
+  };
+in
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+    ../profiles/headless.nix
+    # The Openstack Metadata service exposes data on an EC2 API also.
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=ttyS0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      permitRootLogin = "prohibit-password";
+      passwordAuthentication = mkDefault false;
+    };
+
+    # Force getting the hostname from Openstack metadata.
+    networking.hostName = mkDefault "";
+
+    systemd.services.openstack-init = {
+      path = [ pkgs.wget ];
+      description = "Fetch Metadata on startup";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "apply-ec2-data.service" "amazon-init.service"];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      script = metadataFetcher;
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixos/modules/virtualisation/openstack-metadata-fetcher.nix b/nixos/modules/virtualisation/openstack-metadata-fetcher.nix
new file mode 100644
index 00000000000..25104bb4766
--- /dev/null
+++ b/nixos/modules/virtualisation/openstack-metadata-fetcher.nix
@@ -0,0 +1,22 @@
+{ targetRoot, wgetExtraOptions }:
+
+# OpenStack's metadata service aims to be EC2-compatible. Where
+# possible, try to keep the set of fetched metadata in sync with
+# ./ec2-metadata-fetcher.nix .
+''
+  metaDir=${targetRoot}etc/ec2-metadata
+  mkdir -m 0755 -p "$metaDir"
+  rm -f "$metaDir/*"
+
+  echo "getting instance metadata..."
+
+  wget_imds() {
+    wget ${wgetExtraOptions} "$@"
+  }
+
+  wget_imds -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
+  # When no user-data is provided, the OpenStack metadata server doesn't expose the user-data route.
+  (umask 077 && wget_imds -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data || rm -f "$metaDir/user-data")
+  wget_imds -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
+  wget_imds -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
+''
diff --git a/nixos/modules/virtualisation/openvswitch.nix b/nixos/modules/virtualisation/openvswitch.nix
new file mode 100644
index 00000000000..436a375fb5e
--- /dev/null
+++ b/nixos/modules/virtualisation/openvswitch.nix
@@ -0,0 +1,145 @@
+# Systemd services for openvswitch
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vswitch;
+
+in {
+
+  options.virtualisation.vswitch = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable Open vSwitch. A configuration daemon (ovs-server)
+        will be started.
+        '';
+    };
+
+    resetOnStart = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to reset the Open vSwitch configuration database to a default
+        configuration on every start of the systemd <literal>ovsdb.service</literal>.
+        '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openvswitch;
+      defaultText = literalExpression "pkgs.openvswitch";
+      description = ''
+        Open vSwitch package to use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (let
+
+    # Where the communication sockets live
+    runDir = "/run/openvswitch";
+
+    # The path to the an initialized version of the database
+    db = pkgs.stdenv.mkDerivation {
+      name = "vswitch.db";
+      dontUnpack = true;
+      buildPhase = "true";
+      buildInputs = with pkgs; [
+        cfg.package
+      ];
+      installPhase = "mkdir -p $out";
+    };
+
+  in {
+    environment.systemPackages = [ cfg.package ];
+    boot.kernelModules = [ "tun" "openvswitch" ];
+
+    boot.extraModulePackages = [ cfg.package ];
+
+    systemd.services.ovsdb = {
+      description = "Open_vSwitch Database Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      path = [ cfg.package ];
+      restartTriggers = [ db cfg.package ];
+      # Create the config database
+      preStart =
+        ''
+        mkdir -p ${runDir}
+        mkdir -p /var/db/openvswitch
+        chmod +w /var/db/openvswitch
+        ${optionalString cfg.resetOnStart "rm -f /var/db/openvswitch/conf.db"}
+        if [[ ! -e /var/db/openvswitch/conf.db ]]; then
+          ${cfg.package}/bin/ovsdb-tool create \
+            "/var/db/openvswitch/conf.db" \
+            "${cfg.package}/share/openvswitch/vswitch.ovsschema"
+        fi
+        chmod -R +w /var/db/openvswitch
+        if ${cfg.package}/bin/ovsdb-tool needs-conversion /var/db/openvswitch/conf.db | grep -q "yes"
+        then
+          echo "Performing database upgrade"
+          ${cfg.package}/bin/ovsdb-tool convert /var/db/openvswitch/conf.db
+        else
+          echo "Database already up to date"
+        fi
+        '';
+      serviceConfig = {
+        ExecStart =
+          ''
+          ${cfg.package}/bin/ovsdb-server \
+            --remote=punix:${runDir}/db.sock \
+            --private-key=db:Open_vSwitch,SSL,private_key \
+            --certificate=db:Open_vSwitch,SSL,certificate \
+            --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert \
+            --unixctl=ovsdb.ctl.sock \
+            --pidfile=/run/openvswitch/ovsdb.pid \
+            --detach \
+            /var/db/openvswitch/conf.db
+          '';
+        Restart = "always";
+        RestartSec = 3;
+        PIDFile = "/run/openvswitch/ovsdb.pid";
+        # Use service type 'forking' to correctly determine when ovsdb-server is ready.
+        Type = "forking";
+      };
+      postStart = ''
+        ${cfg.package}/bin/ovs-vsctl --timeout 3 --retry --no-wait init
+      '';
+    };
+
+    systemd.services.ovs-vswitchd = {
+      description = "Open_vSwitch Daemon";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "ovsdb.service" ];
+      after = [ "ovsdb.service" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/ovs-vswitchd \
+          --pidfile=/run/openvswitch/ovs-vswitchd.pid \
+          --detach
+        '';
+        PIDFile = "/run/openvswitch/ovs-vswitchd.pid";
+        # Use service type 'forking' to correctly determine when vswitchd is ready.
+        Type = "forking";
+        Restart = "always";
+        RestartSec = 3;
+      };
+    };
+
+  });
+
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "vswitch" "ipsec" ] ''
+      OpenVSwitch IPSec functionality has been removed, because it depended on racoon,
+      which was removed from nixpkgs, because it was abanoded upstream.
+    '')
+  ];
+
+  meta.maintainers = with maintainers; [ netixx ];
+
+}
diff --git a/nixos/modules/virtualisation/parallels-guest.nix b/nixos/modules/virtualisation/parallels-guest.nix
new file mode 100644
index 00000000000..d950cecff6f
--- /dev/null
+++ b/nixos/modules/virtualisation/parallels-guest.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  prl-tools = config.hardware.parallels.package;
+in
+
+{
+
+  options = {
+    hardware.parallels = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This enables Parallels Tools for Linux guests, along with provided
+          video, mouse and other hardware drivers.
+        '';
+      };
+
+      autoMountShares = mkOption {
+        type = types.bool;
+        default = true;
+        description = ''
+          Control prlfsmountd service. When this service is running, shares can not be manually
+          mounted through `mount -t prl_fs ...` as this service will remount and trample any set options.
+          Recommended to enable for simple file sharing, but extended share use such as for code should
+          disable this to manually mount shares.
+        '';
+      };
+
+      package = mkOption {
+        type = types.nullOr types.package;
+        default = config.boot.kernelPackages.prl-tools;
+        defaultText = literalExpression "config.boot.kernelPackages.prl-tools";
+        description = ''
+          Defines which package to use for prl-tools. Override to change the version.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf config.hardware.parallels.enable {
+    services.xserver = {
+      drivers = singleton
+        { name = "prlvideo"; modules = [ prl-tools ]; };
+
+      screenSection = ''
+        Option "NoMTRR"
+      '';
+
+      config = ''
+        Section "InputClass"
+          Identifier "prlmouse"
+          MatchIsPointer "on"
+          MatchTag "prlmouse"
+          Driver "prlmouse"
+        EndSection
+      '';
+    };
+
+    hardware.opengl.package = prl-tools;
+    hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.prl-tools.override { libsOnly = true; kernel = null; };
+    hardware.opengl.setLdLibraryPath = true;
+
+    services.udev.packages = [ prl-tools ];
+
+    environment.systemPackages = [ prl-tools ];
+
+    boot.extraModulePackages = [ prl-tools ];
+
+    boot.kernelModules = [ "prl_tg" "prl_eth" "prl_fs" "prl_fs_freeze" ];
+
+    services.timesyncd.enable = false;
+
+    systemd.services.prltoolsd = {
+      description = "Parallels Tools' service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${prl-tools}/bin/prltoolsd -f";
+        PIDFile = "/var/run/prltoolsd.pid";
+      };
+    };
+
+    systemd.services.prlfsmountd = mkIf config.hardware.parallels.autoMountShares {
+      description = "Parallels Shared Folders Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = rec {
+        ExecStart = "${prl-tools}/sbin/prlfsmountd ${PIDFile}";
+        ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /media";
+        ExecStopPost = "${prl-tools}/sbin/prlfsmountd -u";
+        PIDFile = "/run/prlfsmountd.pid";
+      };
+    };
+
+    systemd.services.prlshprint = {
+      description = "Parallels Shared Printer Tool";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "cups.service" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${prl-tools}/bin/prlshprint";
+      };
+    };
+
+    systemd.user.services = {
+      prlcc = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcc";
+        };
+      };
+      prldnd = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prldnd";
+        };
+      };
+      prl_wmouse_d  = {
+        description = "Parallels Walking Mouse Daemon";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prl_wmouse_d";
+        };
+      };
+      prlcp = {
+        description = "Parallels CopyPaste Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcp";
+        };
+      };
+      prlsga = {
+        description = "Parallels Shared Guest Applications Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlsga";
+        };
+      };
+      prlshprof = {
+        description = "Parallels Shared Profile Tool";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlshprof";
+        };
+      };
+    };
+
+  };
+}
diff --git a/nixos/modules/virtualisation/podman/default.nix b/nixos/modules/virtualisation/podman/default.nix
new file mode 100644
index 00000000000..94fd727a4b5
--- /dev/null
+++ b/nixos/modules/virtualisation/podman/default.nix
@@ -0,0 +1,184 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.virtualisation.podman;
+  toml = pkgs.formats.toml { };
+  json = pkgs.formats.json { };
+
+  inherit (lib) mkOption types;
+
+  podmanPackage = (pkgs.podman.override { inherit (cfg) extraPackages; });
+
+  # Provides a fake "docker" binary mapping to podman
+  dockerCompat = pkgs.runCommand "${podmanPackage.pname}-docker-compat-${podmanPackage.version}" {
+    outputs = [ "out" "man" ];
+    inherit (podmanPackage) meta;
+  } ''
+    mkdir -p $out/bin
+    ln -s ${podmanPackage}/bin/podman $out/bin/docker
+
+    mkdir -p $man/share/man/man1
+    for f in ${podmanPackage.man}/share/man/man1/*; do
+      basename=$(basename $f | sed s/podman/docker/g)
+      ln -s $f $man/share/man/man1/$basename
+    done
+  '';
+
+  net-conflist = pkgs.runCommand "87-podman-bridge.conflist" {
+    nativeBuildInputs = [ pkgs.jq ];
+    extraPlugins = builtins.toJSON cfg.defaultNetwork.extraPlugins;
+    jqScript = ''
+      . + { "plugins": (.plugins + $extraPlugins) }
+    '';
+  } ''
+    jq <${cfg.package}/etc/cni/net.d/87-podman-bridge.conflist \
+      --argjson extraPlugins "$extraPlugins" \
+      "$jqScript" \
+      >$out
+  '';
+
+in
+{
+  imports = [
+    ./dnsname.nix
+    ./network-socket.nix
+    (lib.mkRenamedOptionModule [ "virtualisation" "podman" "libpod" ] [ "virtualisation" "containers" "containersConf" ])
+  ];
+
+  meta = {
+    maintainers = lib.teams.podman.members;
+  };
+
+  options.virtualisation.podman = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          This option enables Podman, a daemonless container engine for
+          developing, managing, and running OCI Containers on your Linux System.
+
+          It is a drop-in replacement for the <command>docker</command> command.
+        '';
+      };
+
+    dockerSocket.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Make the Podman socket available in place of the Docker socket, so
+        Docker tools can find the Podman socket.
+
+        Podman implements the Docker API.
+
+        Users must be in the <code>podman</code> group in order to connect. As
+        with Docker, members of this group can gain root access.
+      '';
+    };
+
+    dockerCompat = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Create an alias mapping <command>docker</command> to <command>podman</command>.
+      '';
+    };
+
+    enableNvidia = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enable use of NVidia GPUs from within podman containers.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = lib.literalExpression ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = ''
+        Extra packages to be installed in the Podman wrapper.
+      '';
+    };
+
+    package = lib.mkOption {
+      type = types.package;
+      default = podmanPackage;
+      internal = true;
+      description = ''
+        The final Podman package (including extra packages).
+      '';
+    };
+
+    defaultNetwork.extraPlugins = lib.mkOption {
+      type = types.listOf json.type;
+      default = [];
+      description = ''
+        Extra CNI plugin configurations to add to podman's default network.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    {
+      environment.systemPackages = [ cfg.package ]
+        ++ lib.optional cfg.dockerCompat dockerCompat;
+
+      environment.etc."cni/net.d/87-podman-bridge.conflist".source = net-conflist;
+
+      virtualisation.containers = {
+        enable = true; # Enable common /etc/containers configuration
+        containersConf.settings = lib.optionalAttrs cfg.enableNvidia {
+          engine = {
+            conmon_env_vars = [ "PATH=${lib.makeBinPath [ pkgs.nvidia-podman ]}" ];
+            runtimes.nvidia = [ "${pkgs.nvidia-podman}/bin/nvidia-container-runtime" ];
+          };
+        };
+      };
+
+      systemd.packages = [ cfg.package ];
+
+      systemd.services.podman.serviceConfig = {
+        ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
+      };
+
+      systemd.sockets.podman.wantedBy = [ "sockets.target" ];
+      systemd.sockets.podman.socketConfig.SocketGroup = "podman";
+
+      systemd.tmpfiles.packages = [
+        # The /run/podman rule interferes with our podman group, so we remove
+        # it and let the systemd socket logic take care of it.
+        (pkgs.runCommand "podman-tmpfiles-nixos" { package = cfg.package; } ''
+          mkdir -p $out/lib/tmpfiles.d/
+          grep -v 'D! /run/podman 0700 root root' \
+            <$package/lib/tmpfiles.d/podman.conf \
+            >$out/lib/tmpfiles.d/podman.conf
+        '') ];
+
+      systemd.tmpfiles.rules =
+        lib.optionals cfg.dockerSocket.enable [
+          "L! /run/docker.sock - - - - /run/podman/podman.sock"
+        ];
+
+      users.groups.podman = {};
+
+      assertions = [
+        {
+          assertion = cfg.dockerCompat -> !config.virtualisation.docker.enable;
+          message = "Option dockerCompat conflicts with docker";
+        }
+        {
+          assertion = cfg.dockerSocket.enable -> !config.virtualisation.docker.enable;
+          message = ''
+            The options virtualisation.podman.dockerSocket.enable and virtualisation.docker.enable conflict, because only one can serve the socket.
+          '';
+        }
+      ];
+    }
+  ]);
+}
diff --git a/nixos/modules/virtualisation/podman/dnsname.nix b/nixos/modules/virtualisation/podman/dnsname.nix
new file mode 100644
index 00000000000..beef1975507
--- /dev/null
+++ b/nixos/modules/virtualisation/podman/dnsname.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib)
+    mkOption
+    mkIf
+    types
+    ;
+
+  cfg = config.virtualisation.podman;
+
+in
+{
+  options = {
+    virtualisation.podman = {
+
+      defaultNetwork.dnsname.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable DNS resolution in the default podman network.
+        '';
+      };
+
+    };
+  };
+
+  config = {
+    virtualisation.containers.containersConf.cniPlugins = mkIf cfg.defaultNetwork.dnsname.enable [ pkgs.dnsname-cni ];
+    virtualisation.podman.defaultNetwork.extraPlugins =
+      lib.optional cfg.defaultNetwork.dnsname.enable {
+        type = "dnsname";
+        domainName = "dns.podman";
+        capabilities.aliases = true;
+      };
+  };
+}
diff --git a/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix b/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix
new file mode 100644
index 00000000000..a0e7e433164
--- /dev/null
+++ b/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkg, ... }:
+let
+  inherit (lib)
+    mkOption
+    types
+    ;
+
+  cfg = config.virtualisation.podman.networkSocket;
+
+in
+{
+  options.virtualisation.podman.networkSocket = {
+    server = mkOption {
+      type = types.enum [ "ghostunnel" ];
+    };
+  };
+
+  config = lib.mkIf (cfg.enable && cfg.server == "ghostunnel") {
+
+    services.ghostunnel = {
+      enable = true;
+      servers."podman-socket" = {
+        inherit (cfg.tls) cert key cacert;
+        listen = "${cfg.listenAddress}:${toString cfg.port}";
+        target = "unix:/run/podman/podman.sock";
+        allowAll = lib.mkDefault true;
+      };
+    };
+    systemd.services.ghostunnel-server-podman-socket.serviceConfig.SupplementaryGroups = ["podman"];
+
+  };
+
+  meta.maintainers = lib.teams.podman.members ++ [ lib.maintainers.roberth ];
+}
diff --git a/nixos/modules/virtualisation/podman/network-socket.nix b/nixos/modules/virtualisation/podman/network-socket.nix
new file mode 100644
index 00000000000..94d8da9d2b6
--- /dev/null
+++ b/nixos/modules/virtualisation/podman/network-socket.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkg, ... }:
+let
+  inherit (lib)
+    mkOption
+    types
+    ;
+
+  cfg = config.virtualisation.podman.networkSocket;
+
+in
+{
+  imports = [
+    ./network-socket-ghostunnel.nix
+  ];
+
+  options.virtualisation.podman.networkSocket = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Make the Podman and Docker compatibility API available over the network
+        with TLS client certificate authentication.
+
+        This allows Docker clients to connect with the equivalents of the Docker
+        CLI <code>-H</code> and <code>--tls*</code> family of options.
+
+        For certificate setup, see https://docs.docker.com/engine/security/protect-access/
+
+        This option is independent of <xref linkend="opt-virtualisation.podman.dockerSocket.enable"/>.
+      '';
+    };
+
+    server = mkOption {
+      type = types.enum [];
+      description = ''
+        Choice of TLS proxy server.
+      '';
+      example = "ghostunnel";
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to open the port in the firewall.
+      '';
+    };
+
+    tls.cacert = mkOption {
+      type = types.path;
+      description = ''
+        Path to CA certificate to use for client authentication.
+      '';
+    };
+
+    tls.cert = mkOption {
+      type = types.path;
+      description = ''
+        Path to certificate describing the server.
+      '';
+    };
+
+    tls.key = mkOption {
+      type = types.path;
+      description = ''
+        Path to the private key corresponding to the server certificate.
+
+        Use a string for this setting. Otherwise it will be copied to the Nix
+        store first, where it is readable by any system process.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 2376;
+      description = ''
+        TCP port number for receiving TLS connections.
+      '';
+    };
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = ''
+        Interface address for receiving TLS connections.
+      '';
+    };
+  };
+
+  config = {
+    networking.firewall.allowedTCPPorts =
+      lib.optional (cfg.enable && cfg.openFirewall) cfg.port;
+  };
+
+  meta.maintainers = lib.teams.podman.members ++ [ lib.maintainers.roberth ];
+}
diff --git a/nixos/modules/virtualisation/proxmox-image.nix b/nixos/modules/virtualisation/proxmox-image.nix
new file mode 100644
index 00000000000..c537d5aed44
--- /dev/null
+++ b/nixos/modules/virtualisation/proxmox-image.nix
@@ -0,0 +1,169 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options.proxmox = {
+    qemuConf = {
+      # essential configs
+      boot = mkOption {
+        type = types.str;
+        default = "";
+        example = "order=scsi0;net0";
+        description = ''
+          Default boot device. PVE will try all devices in its default order if this value is empty.
+        '';
+      };
+      scsihw = mkOption {
+        type = types.str;
+        default = "virtio-scsi-pci";
+        example = "lsi";
+        description = ''
+          SCSI controller type. Must be one of the supported values given in
+          <link xlink:href="https://pve.proxmox.com/wiki/Qemu/KVM_Virtual_Machines"/>
+        '';
+      };
+      virtio0 = mkOption {
+        type = types.str;
+        default = "local-lvm:vm-9999-disk-0";
+        example = "ceph:vm-123-disk-0";
+        description = ''
+          Configuration for the default virtio disk. It can be used as a cue for PVE to autodetect the target sotrage.
+          This parameter is required by PVE even if it isn't used.
+        '';
+      };
+      ostype = mkOption {
+        type = types.str;
+        default = "l26";
+        description = ''
+          Guest OS type
+        '';
+      };
+      cores = mkOption {
+        type = types.ints.positive;
+        default = 1;
+        description = ''
+          Guest core count
+        '';
+      };
+      memory = mkOption {
+        type = types.ints.positive;
+        default = 1024;
+        description = ''
+          Guest memory in MB
+        '';
+      };
+
+      # optional configs
+      name = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}";
+        description = ''
+          VM name
+        '';
+      };
+      net0 = mkOption {
+        type = types.commas;
+        default = "virtio=00:00:00:00:00:00,bridge=vmbr0,firewall=1";
+        description = ''
+          Configuration for the default interface. When restoring from VMA, check the
+          "unique" box to ensure device mac is randomized.
+        '';
+      };
+      serial0 = mkOption {
+        type = types.str;
+        default = "socket";
+        example = "/dev/ttyS0";
+        description = ''
+          Create a serial device inside the VM (n is 0 to 3), and pass through a host serial device (i.e. /dev/ttyS0),
+          or create a unix socket on the host side (use qm terminal to open a terminal connection).
+        '';
+      };
+      agent = mkOption {
+        type = types.bool;
+        apply = x: if x then "1" else "0";
+        default = true;
+        description = ''
+          Expect guest to have qemu agent running
+        '';
+      };
+    };
+    qemuExtraConf = mkOption {
+      type = with types; attrsOf (oneOf [ str int ]);
+      default = {};
+      example = literalExpression ''{
+        cpu = "host";
+        onboot = 1;
+      }'';
+      description = ''
+        Additional options appended to qemu-server.conf
+      '';
+    };
+    filenameSuffix = mkOption {
+      type = types.str;
+      default = config.proxmox.qemuConf.name;
+      example = "999-nixos_template";
+      description = ''
+        Filename of the image will be vzdump-qemu-''${filenameSuffix}.vma.zstd.
+        This will also determine the default name of the VM on restoring the VMA.
+        Start this value with a number if you want the VMA to be detected as a backup of
+        any specific VMID.
+      '';
+    };
+  };
+
+  config = let
+    cfg = config.proxmox;
+    cfgLine = name: value: ''
+      ${name}: ${builtins.toString value}
+    '';
+    cfgFile = fileName: properties: pkgs.writeTextDir fileName ''
+      # generated by NixOS
+      ${lib.concatStrings (lib.mapAttrsToList cfgLine properties)}
+      #qmdump#map:virtio0:drive-virtio0:local-lvm:raw:
+    '';
+  in {
+    system.build.VMA = import ../../lib/make-disk-image.nix {
+      name = "proxmox-${cfg.filenameSuffix}";
+      postVM = let
+        # Build qemu with PVE's patch that adds support for the VMA format
+        vma = pkgs.qemu_kvm.overrideAttrs ( super: {
+          patches = let
+            rev = "cc707c362ea5c8d832aac270d1ffa7ac66a8908f";
+            path = "debian/patches/pve/0025-PVE-Backup-add-vma-backup-format-code.patch";
+            vma-patch = pkgs.fetchpatch {
+              url = "https://git.proxmox.com/?p=pve-qemu.git;a=blob_plain;hb=${rev};f=${path}";
+              sha256 = "1z467xnmfmry3pjy7p34psd5xdil9x0apnbvfz8qbj0bf9fgc8zf";
+            };
+          in super.patches ++ [ vma-patch ];
+          buildInputs = super.buildInputs ++ [ pkgs.libuuid ];
+        });
+      in
+      ''
+        ${vma}/bin/vma create "vzdump-qemu-${cfg.filenameSuffix}.vma" \
+          -c ${cfgFile "qemu-server.conf" (cfg.qemuConf // cfg.qemuExtraConf)}/qemu-server.conf drive-virtio0=$diskImage
+        rm $diskImage
+        ${pkgs.zstd}/bin/zstd "vzdump-qemu-${cfg.filenameSuffix}.vma"
+        mv "vzdump-qemu-${cfg.filenameSuffix}.vma.zst" $out/
+      '';
+      format = "raw";
+      inherit config lib pkgs;
+    };
+
+    boot = {
+      growPartition = true;
+      kernelParams = [ "console=ttyS0" ];
+      loader.grub.device = lib.mkDefault "/dev/vda";
+      loader.timeout = 0;
+      initrd.availableKernelModules = [ "uas" "virtio_blk" "virtio_pci" ];
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    services.qemuGuest.enable = lib.mkDefault true;
+  };
+}
diff --git a/nixos/modules/virtualisation/proxmox-lxc.nix b/nixos/modules/virtualisation/proxmox-lxc.nix
new file mode 100644
index 00000000000..3913b474afb
--- /dev/null
+++ b/nixos/modules/virtualisation/proxmox-lxc.nix
@@ -0,0 +1,64 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options.proxmoxLXC = {
+    privileged = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to enable privileged mounts
+      '';
+    };
+    manageNetwork = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Whether to manage network interfaces through nix options
+        When false, systemd-networkd is enabled to accept network
+        configuration from proxmox.
+      '';
+    };
+  };
+
+  config =
+    let
+      cfg = config.proxmoxLXC;
+    in
+    {
+      system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
+        storeContents = [{
+          object = config.system.build.toplevel;
+          symlink = "none";
+        }];
+
+        contents = [{
+          source = config.system.build.toplevel + "/init";
+          target = "/sbin/init";
+        }];
+
+        extraCommands = "mkdir -p root etc/systemd/network";
+      };
+
+      boot = {
+        isContainer = true;
+        loader.initScript.enable = true;
+      };
+
+      networking = mkIf (!cfg.manageNetwork) {
+        useDHCP = false;
+        useHostResolvConf = false;
+        useNetworkd = true;
+      };
+
+      services.openssh = {
+        enable = mkDefault true;
+        startWhenNeeded = mkDefault true;
+      };
+
+      systemd.mounts = mkIf (!cfg.privileged)
+        [{ where = "/sys/kernel/debug"; enable = false; }];
+
+    };
+}
diff --git a/nixos/modules/virtualisation/qemu-guest-agent.nix b/nixos/modules/virtualisation/qemu-guest-agent.nix
new file mode 100644
index 00000000000..39273e523e8
--- /dev/null
+++ b/nixos/modules/virtualisation/qemu-guest-agent.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.qemuGuest;
+in {
+
+  options.services.qemuGuest = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = "Whether to enable the qemu guest agent.";
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.qemu_kvm.ga;
+        defaultText = literalExpression "pkgs.qemu_kvm.ga";
+        description = "The QEMU guest agent package.";
+      };
+  };
+
+  config = mkIf cfg.enable (
+      mkMerge [
+    {
+
+      services.udev.extraRules = ''
+        SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service"
+      '';
+
+      systemd.services.qemu-guest-agent = {
+        description = "Run the QEMU Guest Agent";
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/qemu-ga --statedir /run/qemu-ga";
+          Restart = "always";
+          RestartSec = 0;
+          # Runtime directory and mode
+          RuntimeDirectory = "qemu-ga";
+          RuntimeDirectoryMode = "0755";
+        };
+      };
+    }
+  ]
+  );
+}
diff --git a/nixos/modules/virtualisation/qemu-vm.nix b/nixos/modules/virtualisation/qemu-vm.nix
new file mode 100644
index 00000000000..51438935894
--- /dev/null
+++ b/nixos/modules/virtualisation/qemu-vm.nix
@@ -0,0 +1,1016 @@
+# This module creates a virtual machine from the NixOS configuration.
+# Building the `config.system.build.vm' attribute gives you a command
+# that starts a KVM/QEMU VM running the NixOS configuration defined in
+# `config'.  The Nix store is shared read-only with the host, which
+# makes (re)building VMs very efficient.  However, it also means you
+# can't reconfigure the guest inside the guest - you need to rebuild
+# the VM in the host.  On the other hand, the root filesystem is a
+# read/writable disk image persistent across VM reboots.
+
+{ config, lib, pkgs, options, ... }:
+
+with lib;
+
+let
+
+  qemu-common = import ../../lib/qemu-common.nix { inherit lib pkgs; };
+
+  cfg = config.virtualisation;
+
+  qemu = cfg.qemu.package;
+
+  consoles = lib.concatMapStringsSep " " (c: "console=${c}") cfg.qemu.consoles;
+
+  driveOpts = { ... }: {
+
+    options = {
+
+      file = mkOption {
+        type = types.str;
+        description = "The file image used for this drive.";
+      };
+
+      driveExtraOpts = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = "Extra options passed to drive flag.";
+      };
+
+      deviceExtraOpts = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = "Extra options passed to device flag.";
+      };
+
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description =
+          "A name for the drive. Must be unique in the drives list. Not passed to qemu.";
+      };
+
+    };
+
+  };
+
+  driveCmdline = idx: { file, driveExtraOpts, deviceExtraOpts, ... }:
+    let
+      drvId = "drive${toString idx}";
+      mkKeyValue = generators.mkKeyValueDefault {} "=";
+      mkOpts = opts: concatStringsSep "," (mapAttrsToList mkKeyValue opts);
+      driveOpts = mkOpts (driveExtraOpts // {
+        index = idx;
+        id = drvId;
+        "if" = "none";
+        inherit file;
+      });
+      deviceOpts = mkOpts (deviceExtraOpts // {
+        drive = drvId;
+      });
+      device =
+        if cfg.qemu.diskInterface == "scsi" then
+          "-device lsi53c895a -device scsi-hd,${deviceOpts}"
+        else
+          "-device virtio-blk-pci,${deviceOpts}";
+    in
+      "-drive ${driveOpts} ${device}";
+
+  drivesCmdLine = drives: concatStringsSep "\\\n    " (imap1 driveCmdline drives);
+
+
+  # Creates a device name from a 1-based a numerical index, e.g.
+  # * `driveDeviceName 1` -> `/dev/vda`
+  # * `driveDeviceName 2` -> `/dev/vdb`
+  driveDeviceName = idx:
+    let letter = elemAt lowerChars (idx - 1);
+    in if cfg.qemu.diskInterface == "scsi" then
+      "/dev/sd${letter}"
+    else
+      "/dev/vd${letter}";
+
+  lookupDriveDeviceName = driveName: driveList:
+    (findSingle (drive: drive.name == driveName)
+      (throw "Drive ${driveName} not found")
+      (throw "Multiple drives named ${driveName}") driveList).device;
+
+  addDeviceNames =
+    imap1 (idx: drive: drive // { device = driveDeviceName idx; });
+
+  efiPrefix =
+    if pkgs.stdenv.hostPlatform.isx86 then "${pkgs.OVMF.fd}/FV/OVMF"
+    else if pkgs.stdenv.isAarch64 then "${pkgs.OVMF.fd}/FV/AAVMF"
+    else throw "No EFI firmware available for platform";
+  efiFirmware = "${efiPrefix}_CODE.fd";
+  efiVarsDefault = "${efiPrefix}_VARS.fd";
+
+  # Shell script to start the VM.
+  startVM =
+    ''
+      #! ${pkgs.runtimeShell}
+
+      set -e
+
+      NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}")
+
+      if ! test -e "$NIX_DISK_IMAGE"; then
+          ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
+            ${toString config.virtualisation.diskSize}M
+      fi
+
+      # Create a directory for storing temporary data of the running VM.
+      if [ -z "$TMPDIR" ] || [ -z "$USE_TMPDIR" ]; then
+          TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
+      fi
+
+      ${lib.optionalString cfg.useNixStoreImage
+      ''
+        # Create a writable copy/snapshot of the store image.
+        ${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${storeImage}/nixos.qcow2 "$TMPDIR"/store.img
+      ''}
+
+      # Create a directory for exchanging data with the VM.
+      mkdir -p "$TMPDIR/xchg"
+
+      ${lib.optionalString cfg.useBootLoader
+      ''
+        # Create a writable copy/snapshot of the boot disk.
+        # A writable boot disk can be booted from automatically.
+        ${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${bootDisk}/disk.img "$TMPDIR/disk.img"
+
+        NIX_EFI_VARS=$(readlink -f "''${NIX_EFI_VARS:-${cfg.efiVars}}")
+
+        ${lib.optionalString cfg.useEFIBoot
+        ''
+          # VM needs writable EFI vars
+          if ! test -e "$NIX_EFI_VARS"; then
+            cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS"
+            chmod 0644 "$NIX_EFI_VARS"
+          fi
+        ''}
+      ''}
+
+      cd "$TMPDIR"
+
+      ${lib.optionalString (cfg.emptyDiskImages != []) "idx=0"}
+      ${flip concatMapStrings cfg.emptyDiskImages (size: ''
+        if ! test -e "empty$idx.qcow2"; then
+            ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M"
+        fi
+        idx=$((idx + 1))
+      '')}
+
+      # Start QEMU.
+      exec ${qemu-common.qemuBinary qemu} \
+          -name ${config.system.name} \
+          -m ${toString config.virtualisation.memorySize} \
+          -smp ${toString config.virtualisation.cores} \
+          -device virtio-rng-pci \
+          ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
+          ${concatStringsSep " \\\n    "
+            (mapAttrsToList
+              (tag: share: "-virtfs local,path=${share.source},security_model=none,mount_tag=${tag}")
+              config.virtualisation.sharedDirectories)} \
+          ${drivesCmdLine config.virtualisation.qemu.drives} \
+          ${concatStringsSep " \\\n    " config.virtualisation.qemu.options} \
+          $QEMU_OPTS \
+          "$@"
+    '';
+
+
+  regInfo = pkgs.closureInfo { rootPaths = config.virtualisation.additionalPaths; };
+
+
+  # Generate a hard disk image containing a /boot partition and GRUB
+  # in the MBR.  Used when the `useBootLoader' option is set.
+  # Uses `runInLinuxVM` to create the image in a throwaway VM.
+  # See note [Disk layout with `useBootLoader`].
+  # FIXME: use nixos/lib/make-disk-image.nix.
+  bootDisk =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "nixos-boot-disk"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/disk.img
+              ${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M"
+              ${if cfg.useEFIBoot then ''
+                efiVars=$out/efi-vars.fd
+                cp ${efiVarsDefault} $efiVars
+                chmod 0644 $efiVars
+              '' else ""}
+            '';
+          buildInputs = [ pkgs.util-linux ];
+          QEMU_OPTS = "-nographic -serial stdio -monitor none"
+                      + lib.optionalString cfg.useEFIBoot (
+                        " -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
+                      + " -drive if=pflash,format=raw,unit=1,file=$efiVars");
+        }
+        ''
+          # Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility
+          ${pkgs.gptfdisk}/bin/sgdisk \
+            --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \
+            --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \
+            --attributes=1:set:1 \
+            --attributes=2:set:2 \
+            --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \
+            --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+            --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
+            --hybrid 2 \
+            --recompute-chs /dev/vda
+
+          ${optionalString (config.boot.loader.grub.device != "/dev/vda")
+            # In this throwaway VM, we only have the /dev/vda disk, but the
+            # actual VM described by `config` (used by `switch-to-configuration`
+            # below) may set `boot.loader.grub.device` to a different device
+            # that's nonexistent in the throwaway VM.
+            # Create a symlink for that device, so that the `grub-install`
+            # by `switch-to-configuration` will hit /dev/vda anyway.
+            ''
+              ln -s /dev/vda ${config.boot.loader.grub.device}
+            ''
+          }
+
+          ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2
+          export MTOOLS_SKIP_CHECK=1
+          ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot
+
+          # Mount /boot; load necessary modules first.
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_cp437.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_iso8859-1.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/fat.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/vfat.ko.xz || true
+          ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/efivarfs/efivarfs.ko.xz || true
+          mkdir /boot
+          mount /dev/vda2 /boot
+
+          ${optionalString config.boot.loader.efi.canTouchEfiVariables ''
+            mount -t efivarfs efivarfs /sys/firmware/efi/efivars
+          ''}
+
+          # This is needed for GRUB 0.97, which doesn't know about virtio devices.
+          mkdir /boot/grub
+          echo '(hd0) /dev/vda' > /boot/grub/device.map
+
+          # This is needed for systemd-boot to find ESP, and udev is not available here to create this
+          mkdir -p /dev/block
+          ln -s /dev/vda2 /dev/block/254:2
+
+          # Set up system profile (normally done by nixos-rebuild / nix-env --set)
+          mkdir -p /nix/var/nix/profiles
+          ln -s ${config.system.build.toplevel} /nix/var/nix/profiles/system-1-link
+          ln -s /nix/var/nix/profiles/system-1-link /nix/var/nix/profiles/system
+
+          # Install bootloader
+          touch /etc/NIXOS
+          export NIXOS_INSTALL_BOOTLOADER=1
+          ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /boot
+        '' # */
+    );
+
+  storeImage = import ../../lib/make-disk-image.nix {
+    inherit pkgs config lib;
+    additionalPaths = [ regInfo ];
+    format = "qcow2";
+    onlyNixStore = true;
+    partitionTableType = "none";
+    installBootLoader = false;
+    diskSize = "auto";
+    additionalSpace = "0M";
+    copyChannel = false;
+  };
+
+in
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+    (mkRenamedOptionModule [ "virtualisation" "pathsInNixDB" ] [ "virtualisation" "additionalPaths" ])
+  ];
+
+  options = {
+
+    virtualisation.fileSystems = options.fileSystems;
+
+    virtualisation.memorySize =
+      mkOption {
+        type = types.ints.positive;
+        default = 1024;
+        description =
+          ''
+            The memory size in megabytes of the virtual machine.
+          '';
+      };
+
+    virtualisation.msize =
+      mkOption {
+        type = types.ints.positive;
+        default = 16384;
+        description =
+          ''
+            The msize (maximum packet size) option passed to 9p file systems, in
+            bytes. Increasing this should increase performance significantly,
+            at the cost of higher RAM usage.
+          '';
+      };
+
+    virtualisation.diskSize =
+      mkOption {
+        type = types.nullOr types.ints.positive;
+        default = 1024;
+        description =
+          ''
+            The disk size in megabytes of the virtual machine.
+          '';
+      };
+
+    virtualisation.diskImage =
+      mkOption {
+        type = types.str;
+        default = "./${config.system.name}.qcow2";
+        defaultText = literalExpression ''"./''${config.system.name}.qcow2"'';
+        description =
+          ''
+            Path to the disk image containing the root filesystem.
+            The image will be created on startup if it does not
+            exist.
+          '';
+      };
+
+    virtualisation.bootDevice =
+      mkOption {
+        type = types.path;
+        example = "/dev/vda";
+        description =
+          ''
+            The disk to be used for the root filesystem.
+          '';
+      };
+
+    virtualisation.emptyDiskImages =
+      mkOption {
+        type = types.listOf types.ints.positive;
+        default = [];
+        description =
+          ''
+            Additional disk images to provide to the VM. The value is
+            a list of size in megabytes of each disk. These disks are
+            writeable by the VM.
+          '';
+      };
+
+    virtualisation.graphics =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Whether to run QEMU with a graphics window, or in nographic mode.
+            Serial console will be enabled on both settings, but this will
+            change the preferred console.
+            '';
+      };
+
+    virtualisation.resolution =
+      mkOption {
+        type = options.services.xserver.resolutions.type.nestedTypes.elemType;
+        default = { x = 1024; y = 768; };
+        description =
+          ''
+            The resolution of the virtual machine display.
+          '';
+      };
+
+    virtualisation.cores =
+      mkOption {
+        type = types.ints.positive;
+        default = 1;
+        description =
+          ''
+            Specify the number of cores the guest is permitted to use.
+            The number can be higher than the available cores on the
+            host system.
+          '';
+      };
+
+    virtualisation.sharedDirectories =
+      mkOption {
+        type = types.attrsOf
+          (types.submodule {
+            options.source = mkOption {
+              type = types.str;
+              description = "The path of the directory to share, can be a shell variable";
+            };
+            options.target = mkOption {
+              type = types.path;
+              description = "The mount point of the directory inside the virtual machine";
+            };
+          });
+        default = { };
+        example = {
+          my-share = { source = "/path/to/be/shared"; target = "/mnt/shared"; };
+        };
+        description =
+          ''
+            An attributes set of directories that will be shared with the
+            virtual machine using VirtFS (9P filesystem over VirtIO).
+            The attribute name will be used as the 9P mount tag.
+          '';
+      };
+
+    virtualisation.additionalPaths =
+      mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description =
+          ''
+            A list of paths whose closure should be made available to
+            the VM.
+
+            When 9p is used, the closure is registered in the Nix
+            database in the VM. All other paths in the host Nix store
+            appear in the guest Nix store as well, but are considered
+            garbage (because they are not registered in the Nix
+            database of the guest).
+
+            When <option>virtualisation.useNixStoreImage</option> is
+            set, the closure is copied to the Nix store image.
+          '';
+      };
+
+    virtualisation.forwardPorts = mkOption {
+      type = types.listOf
+        (types.submodule {
+          options.from = mkOption {
+            type = types.enum [ "host" "guest" ];
+            default = "host";
+            description =
+              ''
+                Controls the direction in which the ports are mapped:
+
+                - <literal>"host"</literal> means traffic from the host ports
+                is forwarded to the given guest port.
+
+                - <literal>"guest"</literal> means traffic from the guest ports
+                is forwarded to the given host port.
+              '';
+          };
+          options.proto = mkOption {
+            type = types.enum [ "tcp" "udp" ];
+            default = "tcp";
+            description = "The protocol to forward.";
+          };
+          options.host.address = mkOption {
+            type = types.str;
+            default = "";
+            description = "The IPv4 address of the host.";
+          };
+          options.host.port = mkOption {
+            type = types.port;
+            description = "The host port to be mapped.";
+          };
+          options.guest.address = mkOption {
+            type = types.str;
+            default = "";
+            description = "The IPv4 address on the guest VLAN.";
+          };
+          options.guest.port = mkOption {
+            type = types.port;
+            description = "The guest port to be mapped.";
+          };
+        });
+      default = [];
+      example = lib.literalExpression
+        ''
+        [ # forward local port 2222 -> 22, to ssh into the VM
+          { from = "host"; host.port = 2222; guest.port = 22; }
+
+          # forward local port 80 -> 10.0.2.10:80 in the VLAN
+          { from = "guest";
+            guest.address = "10.0.2.10"; guest.port = 80;
+            host.address = "127.0.0.1"; host.port = 80;
+          }
+        ]
+        '';
+      description =
+        ''
+          When using the SLiRP user networking (default), this option allows to
+          forward ports to/from the host/guest.
+
+          <warning><para>
+            If the NixOS firewall on the virtual machine is enabled, you also
+            have to open the guest ports to enable the traffic between host and
+            guest.
+          </para></warning>
+
+          <note><para>Currently QEMU supports only IPv4 forwarding.</para></note>
+        '';
+    };
+
+    virtualisation.vlans =
+      mkOption {
+        type = types.listOf types.ints.unsigned;
+        default = [ 1 ];
+        example = [ 1 2 ];
+        description =
+          ''
+            Virtual networks to which the VM is connected.  Each
+            number <replaceable>N</replaceable> in this list causes
+            the VM to have a virtual Ethernet interface attached to a
+            separate virtual network on which it will be assigned IP
+            address
+            <literal>192.168.<replaceable>N</replaceable>.<replaceable>M</replaceable></literal>,
+            where <replaceable>M</replaceable> is the index of this VM
+            in the list of VMs.
+          '';
+      };
+
+    virtualisation.writableStore =
+      mkOption {
+        type = types.bool;
+        default = true; # FIXME
+        description =
+          ''
+            If enabled, the Nix store in the VM is made writable by
+            layering an overlay filesystem on top of the host's Nix
+            store.
+          '';
+      };
+
+    virtualisation.writableStoreUseTmpfs =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          ''
+            Use a tmpfs for the writable store instead of writing to the VM's
+            own filesystem.
+          '';
+      };
+
+    networking.primaryIPAddress =
+      mkOption {
+        type = types.str;
+        default = "";
+        internal = true;
+        description = "Primary IP address used in /etc/hosts.";
+      };
+
+    virtualisation.qemu = {
+      package =
+        mkOption {
+          type = types.package;
+          default = pkgs.qemu_kvm;
+          example = "pkgs.qemu_test";
+          description = "QEMU package to use.";
+        };
+
+      options =
+        mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = [ "-vga std" ];
+          description = "Options passed to QEMU.";
+        };
+
+      consoles = mkOption {
+        type = types.listOf types.str;
+        default = let
+          consoles = [ "${qemu-common.qemuSerialDevice},115200n8" "tty0" ];
+        in if cfg.graphics then consoles else reverseList consoles;
+        example = [ "console=tty1" ];
+        description = ''
+          The output console devices to pass to the kernel command line via the
+          <literal>console</literal> parameter, the primary console is the last
+          item of this list.
+
+          By default it enables both serial console and
+          <literal>tty0</literal>. The preferred console (last one) is based on
+          the value of <option>virtualisation.graphics</option>.
+        '';
+      };
+
+      networkingOptions =
+        mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          example = [
+            "-net nic,netdev=user.0,model=virtio"
+            "-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
+          ];
+          description = ''
+            Networking-related command-line options that should be passed to qemu.
+            The default is to use userspace networking (SLiRP).
+
+            If you override this option, be advised to keep
+            ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the example)
+            to keep the default runtime behaviour.
+          '';
+        };
+
+      drives =
+        mkOption {
+          type = types.listOf (types.submodule driveOpts);
+          description = "Drives passed to qemu.";
+          apply = addDeviceNames;
+        };
+
+      diskInterface =
+        mkOption {
+          type = types.enum [ "virtio" "scsi" "ide" ];
+          default = "virtio";
+          example = "scsi";
+          description = "The interface used for the virtual hard disks.";
+        };
+
+      guestAgent.enable =
+        mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Enable the Qemu guest agent.
+          '';
+        };
+
+      virtioKeyboard =
+        mkOption {
+          type = types.bool;
+          default = true;
+          description = ''
+            Enable the virtio-keyboard device.
+          '';
+        };
+    };
+
+    virtualisation.useNixStoreImage =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Build and use a disk image for the Nix store, instead of
+          accessing the host's one through 9p.
+
+          For applications which do a lot of reads from the store,
+          this can drastically improve performance, but at the cost of
+          disk space and image build time.
+        '';
+      };
+
+    virtualisation.useBootLoader =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            If enabled, the virtual machine will be booted using the
+            regular boot loader (i.e., GRUB 1 or 2).  This allows
+            testing of the boot loader.  If
+            disabled (the default), the VM directly boots the NixOS
+            kernel and initial ramdisk, bypassing the boot loader
+            altogether.
+          '';
+      };
+
+    virtualisation.useEFIBoot =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          ''
+            If enabled, the virtual machine will provide a EFI boot
+            manager.
+            useEFIBoot is ignored if useBootLoader == false.
+          '';
+      };
+
+    virtualisation.efiVars =
+      mkOption {
+        type = types.str;
+        default = "./${config.system.name}-efi-vars.fd";
+        defaultText = literalExpression ''"./''${config.system.name}-efi-vars.fd"'';
+        description =
+          ''
+            Path to nvram image containing UEFI variables.  The will be created
+            on startup if it does not exist.
+          '';
+      };
+
+    virtualisation.bios =
+      mkOption {
+        type = types.nullOr types.package;
+        default = null;
+        description =
+          ''
+            An alternate BIOS (such as <package>qboot</package>) with which to start the VM.
+            Should contain a file named <literal>bios.bin</literal>.
+            If <literal>null</literal>, QEMU's builtin SeaBIOS will be used.
+          '';
+      };
+
+  };
+
+  config = {
+
+    assertions =
+      lib.concatLists (lib.flip lib.imap cfg.forwardPorts (i: rule:
+        [
+          { assertion = rule.from == "guest" -> rule.proto == "tcp";
+            message =
+              ''
+                Invalid virtualisation.forwardPorts.<entry ${toString i}>.proto:
+                  Guest forwarding supports only TCP connections.
+              '';
+          }
+          { assertion = rule.from == "guest" -> lib.hasPrefix "10.0.2." rule.guest.address;
+            message =
+              ''
+                Invalid virtualisation.forwardPorts.<entry ${toString i}>.guest.address:
+                  The address must be in the default VLAN (10.0.2.0/24).
+              '';
+          }
+        ]));
+
+    # Note [Disk layout with `useBootLoader`]
+    #
+    # If `useBootLoader = true`, we configure 2 drives:
+    # `/dev/?da` for the root disk, and `/dev/?db` for the boot disk
+    # which has the `/boot` partition and the boot loader.
+    # Concretely:
+    #
+    # * The second drive's image `disk.img` is created in `bootDisk = ...`
+    #   using a throwaway VM. Note that there the disk is always `/dev/vda`,
+    #   even though in the final VM it will be at `/dev/*b`.
+    # * The disks are attached in `virtualisation.qemu.drives`.
+    #   Their order makes them appear as devices `a`, `b`, etc.
+    # * `fileSystems."/boot"` is adjusted to be on device `b`.
+
+    # If `useBootLoader`, GRUB goes to the second disk, see
+    # note [Disk layout with `useBootLoader`].
+    boot.loader.grub.device = mkVMOverride (
+      if cfg.useBootLoader
+        then driveDeviceName 2 # second disk
+        else cfg.bootDevice
+    );
+    boot.loader.grub.gfxmodeBios = with cfg.resolution; "${toString x}x${toString y}";
+
+    boot.initrd.extraUtilsCommands =
+      ''
+        # We need mke2fs in the initrd.
+        copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
+      '';
+
+    boot.initrd.postDeviceCommands =
+      ''
+        # If the disk image appears to be empty, run mke2fs to
+        # initialise.
+        FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true)
+        if test -z "$FSTYPE"; then
+            mke2fs -t ext4 ${cfg.bootDevice}
+        fi
+      '';
+
+    boot.initrd.postMountCommands =
+      ''
+        # Mark this as a NixOS machine.
+        mkdir -p $targetRoot/etc
+        echo -n > $targetRoot/etc/NIXOS
+
+        # Fix the permissions on /tmp.
+        chmod 1777 $targetRoot/tmp
+
+        mkdir -p $targetRoot/boot
+
+        ${optionalString cfg.writableStore ''
+          echo "mounting overlay filesystem on /nix/store..."
+          mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
+          mount -t overlay overlay $targetRoot/nix/store \
+            -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
+        ''}
+      '';
+
+    # After booting, register the closure of the paths in
+    # `virtualisation.additionalPaths' in the Nix database in the VM.  This
+    # allows Nix operations to work in the VM.  The path to the
+    # registration file is passed through the kernel command line to
+    # allow `system.build.toplevel' to be included.  (If we had a direct
+    # reference to ${regInfo} here, then we would get a cyclic
+    # dependency.)
+    boot.postBootCommands =
+      ''
+        if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then
+          ${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]}
+        fi
+      '';
+
+    boot.initrd.availableKernelModules =
+      optional cfg.writableStore "overlay"
+      ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx";
+
+    virtualisation.bootDevice = mkDefault (driveDeviceName 1);
+
+    virtualisation.additionalPaths = [ config.system.build.toplevel ];
+
+    virtualisation.sharedDirectories = {
+      nix-store = mkIf (!cfg.useNixStoreImage) {
+        source = builtins.storeDir;
+        target = "/nix/store";
+      };
+      xchg = {
+        source = ''"$TMPDIR"/xchg'';
+        target = "/tmp/xchg";
+      };
+      shared = {
+        source = ''"''${SHARED_DIR:-$TMPDIR/xchg}"'';
+        target = "/tmp/shared";
+      };
+    };
+
+    virtualisation.qemu.networkingOptions =
+      let
+        forwardingOptions = flip concatMapStrings cfg.forwardPorts
+          ({ proto, from, host, guest }:
+            if from == "host"
+              then "hostfwd=${proto}:${host.address}:${toString host.port}-" +
+                   "${guest.address}:${toString guest.port},"
+              else "'guestfwd=${proto}:${guest.address}:${toString guest.port}-" +
+                   "cmd:${pkgs.netcat}/bin/nc ${host.address} ${toString host.port}',"
+          );
+      in
+      [
+        "-net nic,netdev=user.0,model=virtio"
+        "-netdev user,id=user.0,${forwardingOptions}\"$QEMU_NET_OPTS\""
+      ];
+
+    # FIXME: Consolidate this one day.
+    virtualisation.qemu.options = mkMerge [
+      (mkIf cfg.qemu.virtioKeyboard [
+        "-device virtio-keyboard"
+      ])
+      (mkIf pkgs.stdenv.hostPlatform.isx86 [
+        "-usb" "-device usb-tablet,bus=usb-bus.0"
+      ])
+      (mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [
+        "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet"
+      ])
+      (mkIf (!cfg.useBootLoader) [
+        "-kernel ${config.system.build.toplevel}/kernel"
+        "-initrd ${config.system.build.toplevel}/initrd"
+        ''-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS"''
+      ])
+      (mkIf cfg.useEFIBoot [
+        "-drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
+        "-drive if=pflash,format=raw,unit=1,file=$NIX_EFI_VARS"
+      ])
+      (mkIf (cfg.bios != null) [
+        "-bios ${cfg.bios}/bios.bin"
+      ])
+      (mkIf (!cfg.graphics) [
+        "-nographic"
+      ])
+    ];
+
+    virtualisation.qemu.drives = mkMerge [
+      [{
+        name = "root";
+        file = ''"$NIX_DISK_IMAGE"'';
+        driveExtraOpts.cache = "writeback";
+        driveExtraOpts.werror = "report";
+      }]
+      (mkIf cfg.useNixStoreImage [{
+        name = "nix-store";
+        file = ''"$TMPDIR"/store.img'';
+        deviceExtraOpts.bootindex = if cfg.useBootLoader then "3" else "2";
+      }])
+      (mkIf cfg.useBootLoader [
+        # The order of this list determines the device names, see
+        # note [Disk layout with `useBootLoader`].
+        {
+          name = "boot";
+          file = ''"$TMPDIR"/disk.img'';
+          driveExtraOpts.media = "disk";
+          deviceExtraOpts.bootindex = "1";
+        }
+      ])
+      (imap0 (idx: _: {
+        file = "$(pwd)/empty${toString idx}.qcow2";
+        driveExtraOpts.werror = "report";
+      }) cfg.emptyDiskImages)
+    ];
+
+    # Mount the host filesystem via 9P, and bind-mount the Nix store
+    # of the host into our own filesystem.  We use mkVMOverride to
+    # allow this module to be applied to "normal" NixOS system
+    # configuration, where the regular value for the `fileSystems'
+    # attribute should be disregarded for the purpose of building a VM
+    # test image (since those filesystems don't exist in the VM).
+    fileSystems =
+    let
+      mkSharedDir = tag: share:
+        {
+          name =
+            if tag == "nix-store" && cfg.writableStore
+              then "/nix/.ro-store"
+              else share.target;
+          value.device = tag;
+          value.fsType = "9p";
+          value.neededForBoot = true;
+          value.options =
+            [ "trans=virtio" "version=9p2000.L"  "msize=${toString cfg.msize}" ]
+            ++ lib.optional (tag == "nix-store") "cache=loose";
+        };
+    in
+      mkVMOverride (cfg.fileSystems //
+      {
+        "/".device = cfg.bootDevice;
+
+        "/tmp" = mkIf config.boot.tmpOnTmpfs
+          { device = "tmpfs";
+            fsType = "tmpfs";
+            neededForBoot = true;
+            # Sync with systemd's tmp.mount;
+            options = [ "mode=1777" "strictatime" "nosuid" "nodev" "size=${toString config.boot.tmpOnTmpfsSize}" ];
+          };
+
+        "/nix/${if cfg.writableStore then ".ro-store" else "store"}" =
+          mkIf cfg.useNixStoreImage
+            { device = "${lookupDriveDeviceName "nix-store" cfg.qemu.drives}";
+              neededForBoot = true;
+              options = [ "ro" ];
+            };
+
+        "/nix/.rw-store" = mkIf (cfg.writableStore && cfg.writableStoreUseTmpfs)
+          { fsType = "tmpfs";
+            options = [ "mode=0755" ];
+            neededForBoot = true;
+          };
+
+        "/boot" = mkIf cfg.useBootLoader
+          # see note [Disk layout with `useBootLoader`]
+          { device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk`
+            fsType = "vfat";
+            noCheck = true; # fsck fails on a r/o filesystem
+          };
+      } // lib.mapAttrs' mkSharedDir cfg.sharedDirectories);
+
+    swapDevices = mkVMOverride [ ];
+    boot.initrd.luks.devices = mkVMOverride {};
+
+    # Don't run ntpd in the guest.  It should get the correct time from KVM.
+    services.timesyncd.enable = false;
+
+    services.qemuGuest.enable = cfg.qemu.guestAgent.enable;
+
+    system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; }
+      ''
+        mkdir -p $out/bin
+        ln -s ${config.system.build.toplevel} $out/system
+        ln -s ${pkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${config.system.name}-vm
+      '';
+
+    # When building a regular system configuration, override whatever
+    # video driver the host uses.
+    services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
+    services.xserver.defaultDepth = mkVMOverride 0;
+    services.xserver.resolutions = mkVMOverride [ cfg.resolution ];
+    services.xserver.monitorSection =
+      ''
+        # Set a higher refresh rate so that resolutions > 800x600 work.
+        HorizSync 30-140
+        VertRefresh 50-160
+      '';
+
+    # Wireless won't work in the VM.
+    networking.wireless.enable = mkVMOverride false;
+    services.connman.enable = mkVMOverride false;
+
+    # Speed up booting by not waiting for ARP.
+    networking.dhcpcd.extraConfig = "noarp";
+
+    networking.usePredictableInterfaceNames = false;
+
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isEnabled "VIRTIO_BLK")
+        (isEnabled "VIRTIO_PCI")
+        (isEnabled "VIRTIO_NET")
+        (isEnabled "EXT4_FS")
+        (isEnabled "NET_9P_VIRTIO")
+        (isEnabled "9P_FS")
+        (isYes "BLK_DEV")
+        (isYes "PCI")
+        (isYes "NETDEVICES")
+        (isYes "NET_CORE")
+        (isYes "INET")
+        (isYes "NETWORK_FILESYSTEMS")
+      ] ++ optionals (!cfg.graphics) [
+        (isYes "SERIAL_8250_CONSOLE")
+        (isYes "SERIAL_8250")
+      ] ++ optionals (cfg.writableStore) [
+        (isEnabled "OVERLAY_FS")
+      ];
+
+  };
+
+  # uses types of services/x11/xserver.nix
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixos/modules/virtualisation/railcar.nix b/nixos/modules/virtualisation/railcar.nix
new file mode 100644
index 00000000000..e719e25650d
--- /dev/null
+++ b/nixos/modules/virtualisation/railcar.nix
@@ -0,0 +1,124 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.railcar;
+  generateUnit = name: containerConfig:
+    let
+      container = pkgs.ociTools.buildContainer {
+        args = [
+          (pkgs.writeShellScript "run.sh" containerConfig.cmd).outPath
+        ];
+      };
+    in
+      nameValuePair "railcar-${name}" {
+        enable = true;
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+            ExecStart = ''
+              ${cfg.package}/bin/railcar -r ${cfg.stateDir} run ${name} -b ${container}
+            '';
+            Type = containerConfig.runType;
+          };
+      };
+  mount = with types; (submodule {
+    options = {
+      type = mkOption {
+        type = str;
+        default = "none";
+        description = ''
+          The type of the filesystem to be mounted.
+          Linux: filesystem types supported by the kernel as listed in
+          `/proc/filesystems` (e.g., "minix", "ext2", "ext3", "jfs", "xfs",
+          "reiserfs", "msdos", "proc", "nfs", "iso9660"). For bind mounts
+          (when options include either bind or rbind), the type is a dummy,
+          often "none" (not listed in /proc/filesystems).
+        '';
+      };
+      source = mkOption {
+        type = str;
+        description = "Source for the in-container mount";
+      };
+      options = mkOption {
+        type = listOf str;
+        default = [ "bind" ];
+        description = ''
+          Mount options of the filesystem to be used.
+
+          Support options are listed in the mount(8) man page. Note that
+          both filesystem-independent and filesystem-specific options
+          are listed.
+        '';
+      };
+    };
+  });
+in
+{
+  options.services.railcar = {
+    enable = mkEnableOption "railcar";
+
+    containers = mkOption {
+      default = {};
+      description = "Declarative container configuration";
+      type = with types; attrsOf (submodule ({ name, config, ... }: {
+        options = {
+          cmd = mkOption {
+            type = types.lines;
+            description = "Command or script to run inside the container";
+          };
+
+          mounts = mkOption {
+            type = with types; attrsOf mount;
+            default = {};
+            description = ''
+              A set of mounts inside the container.
+
+              The defaults have been chosen for simple bindmounts, meaning
+              that you only need to provide the "source" parameter.
+            '';
+            example = { "/data" = { source = "/var/lib/data"; }; };
+          };
+
+          runType = mkOption {
+            type = types.str;
+            default = "oneshot";
+            description = "The systemd service run type";
+          };
+
+          os = mkOption {
+            type = types.str;
+            default = "linux";
+            description = "OS type of the container";
+          };
+
+          arch = mkOption {
+            type = types.str;
+            default = "x86_64";
+            description = "Computer architecture type of the container";
+          };
+        };
+      }));
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var/railcar";
+      description = "Railcar persistent state directory";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.railcar;
+      defaultText = literalExpression "pkgs.railcar";
+      description = "Railcar package to use";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services = flip mapAttrs' cfg.containers (name: containerConfig:
+      generateUnit name containerConfig
+    );
+  };
+}
+
diff --git a/nixos/modules/virtualisation/spice-usb-redirection.nix b/nixos/modules/virtualisation/spice-usb-redirection.nix
new file mode 100644
index 00000000000..255327f2622
--- /dev/null
+++ b/nixos/modules/virtualisation/spice-usb-redirection.nix
@@ -0,0 +1,26 @@
+{ config, pkgs, lib, ... }:
+{
+  options.virtualisation.spiceUSBRedirection.enable = lib.mkOption {
+    type = lib.types.bool;
+    default = false;
+    description = ''
+      Install the SPICE USB redirection helper with setuid
+      privileges. This allows unprivileged users to pass USB devices
+      connected to this machine to libvirt VMs, both local and
+      remote. Note that this allows users arbitrary access to USB
+      devices.
+    '';
+  };
+
+  config = lib.mkIf config.virtualisation.spiceUSBRedirection.enable {
+    environment.systemPackages = [ pkgs.spice-gtk ]; # For polkit actions
+    security.wrappers.spice-client-glib-usb-acl-helper = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_fowner+ep";
+      source = "${pkgs.spice-gtk}/bin/spice-client-glib-usb-acl-helper";
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.lheckemann ];
+}
diff --git a/nixos/modules/virtualisation/vagrant-guest.nix b/nixos/modules/virtualisation/vagrant-guest.nix
new file mode 100644
index 00000000000..263b1ebca08
--- /dev/null
+++ b/nixos/modules/virtualisation/vagrant-guest.nix
@@ -0,0 +1,58 @@
+# Minimal configuration that vagrant depends on
+
+{ config, pkgs, ... }:
+let
+  # Vagrant uses an insecure shared private key by default, but we
+  # don't use the authorizedKeys attribute under users because it should be
+  # removed on first boot and replaced with a random one. This script sets
+  # the correct permissions and installs the temporary key if no
+  # ~/.ssh/authorized_keys exists.
+  install-vagrant-ssh-key = pkgs.writeScriptBin "install-vagrant-ssh-key" ''
+    #!${pkgs.runtimeShell}
+    if [ ! -e ~/.ssh/authorized_keys ]; then
+      mkdir -m 0700 -p ~/.ssh
+      echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key" >> ~/.ssh/authorized_keys
+      chmod 0600 ~/.ssh/authorized_keys
+    fi
+  '';
+in
+{
+  # Enable the OpenSSH daemon.
+  services.openssh.enable = true;
+
+  # Packages used by Vagrant
+  environment.systemPackages = with pkgs; [
+    findutils
+    iputils
+    nettools
+    netcat
+    nfs-utils
+    rsync
+  ];
+
+  users.extraUsers.vagrant = {
+    isNormalUser    = true;
+    createHome      = true;
+    description     = "Vagrant user account";
+    extraGroups     = [ "users" "wheel" ];
+    home            = "/home/vagrant";
+    password        = "vagrant";
+    useDefaultShell = true;
+    uid             = 1000;
+  };
+
+  systemd.services.install-vagrant-ssh-key = {
+    description = "Vagrant SSH key install (if needed)";
+    after = [ "fs.target" ];
+    wants = [ "fs.target" ];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${install-vagrant-ssh-key}/bin/install-vagrant-ssh-key";
+      User = "vagrant";
+      # So it won't be (needlessly) restarted:
+      RemainAfterExit = true;
+    };
+  };
+
+  security.sudo.wheelNeedsPassword = false;
+}
diff --git a/nixos/modules/virtualisation/vagrant-virtualbox-image.nix b/nixos/modules/virtualisation/vagrant-virtualbox-image.nix
new file mode 100644
index 00000000000..2a921894ab6
--- /dev/null
+++ b/nixos/modules/virtualisation/vagrant-virtualbox-image.nix
@@ -0,0 +1,60 @@
+# Vagrant + VirtualBox
+
+{ config, pkgs, ... }:
+
+{
+  imports = [
+    ./vagrant-guest.nix
+    ./virtualbox-image.nix
+  ];
+
+  virtualbox.params = {
+    audio = "none";
+    audioin = "off";
+    audioout = "off";
+    usb = "off";
+    usbehci = "off";
+  };
+  sound.enable = false;
+  documentation.man.enable = false;
+  documentation.nixos.enable = false;
+
+  users.extraUsers.vagrant.extraGroups = [ "vboxsf" ];
+
+  # generate the box v1 format which is much easier to generate
+  # https://www.vagrantup.com/docs/boxes/format.html
+  system.build.vagrantVirtualbox = pkgs.runCommand
+    "virtualbox-vagrant.box"
+    {}
+    ''
+      mkdir workdir
+      cd workdir
+
+      # 1. create that metadata.json file
+      echo '{"provider":"virtualbox"}' > metadata.json
+
+      # 2. create a default Vagrantfile config
+      cat <<VAGRANTFILE > Vagrantfile
+      Vagrant.configure("2") do |config|
+        config.vm.base_mac = "0800275F0936"
+      end
+      VAGRANTFILE
+
+      # 3. add the exported VM files
+      tar xvf ${config.system.build.virtualBoxOVA}/*.ova
+
+      # 4. move the ovf to the fixed location
+      mv *.ovf box.ovf
+
+      # 5. generate OVF manifest file
+      rm *.mf
+      touch box.mf
+      for fname in *; do
+        checksum=$(sha256sum $fname | cut -d' ' -f 1)
+        echo "SHA256($fname)= $checksum" >> box.mf
+      done
+
+      # 6. compress everything back together
+      tar --owner=0 --group=0 --sort=name --numeric-owner -czf $out .
+    '';
+}
diff --git a/nixos/modules/virtualisation/virtualbox-guest.nix b/nixos/modules/virtualisation/virtualbox-guest.nix
new file mode 100644
index 00000000000..7b55b3b9759
--- /dev/null
+++ b/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -0,0 +1,93 @@
+# Module for VirtualBox guests.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.virtualbox.guest;
+  kernel = config.boot.kernelPackages;
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.virtualbox.guest = {
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = "Whether to enable the VirtualBox service and other guest additions.";
+    };
+
+    x11 = mkOption {
+      default = true;
+      type = types.bool;
+      description = "Whether to enable x11 graphics";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+    assertions = [{
+      assertion = pkgs.stdenv.hostPlatform.isx86;
+      message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    }];
+
+    environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.supportedFilesystems = [ "vboxsf" ];
+    boot.initrd.supportedFilesystems = [ "vboxsf" ];
+
+    users.groups.vboxsf.gid = config.ids.gids.vboxsf;
+
+    systemd.services.virtualbox =
+      { description = "VirtualBox Guest Services";
+
+        wantedBy = [ "multi-user.target" ];
+        requires = [ "dev-vboxguest.device" ];
+        after = [ "dev-vboxguest.device" ];
+
+        unitConfig.ConditionVirtualization = "oracle";
+
+        serviceConfig.ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxService VBoxService --foreground";
+      };
+
+    services.udev.extraRules =
+      ''
+        # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
+        # should restrict this to logged-in users.
+        KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
+
+        # Allow systemd dependencies on vboxguest.
+        SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
+      '';
+  } (mkIf cfg.x11 {
+    services.xserver.videoDrivers = [ "vmware" "virtualbox" "modesetting" ];
+
+    services.xserver.config =
+      ''
+        Section "InputDevice"
+          Identifier "VBoxMouse"
+          Driver "vboxmouse"
+        EndSection
+      '';
+
+    services.xserver.serverLayoutSection =
+      ''
+        InputDevice "VBoxMouse"
+      '';
+
+    services.xserver.displayManager.sessionCommands =
+      ''
+        PATH=${makeBinPath [ pkgs.gnugrep pkgs.which pkgs.xorg.xorgserver.out ]}:$PATH \
+          ${kernel.virtualboxGuestAdditions}/bin/VBoxClient-all
+      '';
+  })]);
+
+}
diff --git a/nixos/modules/virtualisation/virtualbox-host.nix b/nixos/modules/virtualisation/virtualbox-host.nix
new file mode 100644
index 00000000000..2acf54aae2e
--- /dev/null
+++ b/nixos/modules/virtualisation/virtualbox-host.nix
@@ -0,0 +1,168 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.virtualbox.host;
+
+  virtualbox = cfg.package.override {
+    inherit (cfg) enableHardening headless enableWebService;
+    extensionPack = if cfg.enableExtensionPack then pkgs.virtualboxExtpack else null;
+  };
+
+  kernelModules = config.boot.kernelPackages.virtualbox.override {
+    inherit virtualbox;
+  };
+
+in
+
+{
+  options.virtualisation.virtualbox.host = {
+    enable = mkEnableOption "VirtualBox" // {
+      description = ''
+        Whether to enable VirtualBox.
+
+        <note><para>
+          In order to pass USB devices from the host to the guests, the user
+          needs to be in the <literal>vboxusers</literal> group.
+        </para></note>
+      '';
+    };
+
+    enableExtensionPack = mkEnableOption "VirtualBox extension pack" // {
+      description = ''
+        Whether to install the Oracle Extension Pack for VirtualBox.
+
+        <important><para>
+          You must set <literal>nixpkgs.config.allowUnfree = true</literal> in
+          order to use this.  This requires you accept the VirtualBox PUEL.
+        </para></important>
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.virtualbox;
+      defaultText = literalExpression "pkgs.virtualbox";
+      description = ''
+        Which VirtualBox package to use.
+      '';
+    };
+
+    addNetworkInterface = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Automatically set up a vboxnet0 host-only network interface.
+      '';
+    };
+
+    enableHardening = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Enable hardened VirtualBox, which ensures that only the binaries in the
+        system path get access to the devices exposed by the kernel modules
+        instead of all users in the vboxusers group.
+
+        <important><para>
+          Disabling this can put your system's security at risk, as local users
+          in the vboxusers group can tamper with the VirtualBox device files.
+        </para></important>
+      '';
+    };
+
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Use VirtualBox installation without GUI and Qt dependency. Useful to enable on servers
+        and when virtual machines are controlled only via SSH.
+      '';
+    };
+
+    enableWebService = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Build VirtualBox web service tool (vboxwebsrv) to allow managing VMs via other webpage frontend tools. Useful for headless servers.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [{
+    warnings = mkIf (config.nixpkgs.config.virtualbox.enableExtensionPack or false)
+      ["'nixpkgs.virtualbox.enableExtensionPack' has no effect, please use 'virtualisation.virtualbox.host.enableExtensionPack'"];
+    boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
+    boot.extraModulePackages = [ kernelModules ];
+    environment.systemPackages = [ virtualbox ];
+
+    security.wrappers = let
+      mkSuid = program: {
+        source = "${virtualbox}/libexec/virtualbox/${program}";
+        owner = "root";
+        group = "vboxusers";
+        setuid = true;
+      };
+    in mkIf cfg.enableHardening
+      (builtins.listToAttrs (map (x: { name = x; value = mkSuid x; }) [
+      "VBoxHeadless"
+      "VBoxNetAdpCtl"
+      "VBoxNetDHCP"
+      "VBoxNetNAT"
+      "VBoxSDL"
+      "VBoxVolInfo"
+      "VirtualBoxVM"
+    ]));
+
+    users.groups.vboxusers.gid = config.ids.gids.vboxusers;
+
+    services.udev.extraRules =
+      ''
+        KERNEL=="vboxdrv",    OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        KERNEL=="vboxdrvu",   OWNER="root", GROUP="root",      MODE="0666", TAG+="systemd"
+        KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+        SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+      '';
+
+    # Since we lack the right setuid/setcap binaries, set up a host-only network by default.
+  } (mkIf cfg.addNetworkInterface {
+    systemd.services.vboxnet0 =
+      { description = "VirtualBox vboxnet0 Interface";
+        requires = [ "dev-vboxnetctl.device" ];
+        after = [ "dev-vboxnetctl.device" ];
+        wantedBy = [ "network.target" "sys-subsystem-net-devices-vboxnet0.device" ];
+        path = [ virtualbox ];
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Type = "oneshot";
+        serviceConfig.PrivateTmp = true;
+        environment.VBOX_USER_HOME = "/tmp";
+        script =
+          ''
+            if ! [ -e /sys/class/net/vboxnet0 ]; then
+              VBoxManage hostonlyif create
+              cat /tmp/VBoxSVC.log >&2
+            fi
+          '';
+        postStop =
+          ''
+            VBoxManage hostonlyif remove vboxnet0
+          '';
+      };
+
+    networking.interfaces.vboxnet0.ipv4.addresses = [{ address = "192.168.56.1"; prefixLength = 24; }];
+    # Make sure NetworkManager won't assume this interface being up
+    # means we have internet access.
+    networking.networkmanager.unmanaged = ["vboxnet0"];
+  }) (mkIf config.networking.useNetworkd {
+    systemd.network.networks."40-vboxnet0".extraConfig = ''
+      [Link]
+      RequiredForOnline=no
+    '';
+  })
+
+]);
+}
diff --git a/nixos/modules/virtualisation/virtualbox-image.nix b/nixos/modules/virtualisation/virtualbox-image.nix
new file mode 100644
index 00000000000..1a0c4df42cb
--- /dev/null
+++ b/nixos/modules/virtualisation/virtualbox-image.nix
@@ -0,0 +1,215 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualbox;
+
+in {
+
+  options = {
+    virtualbox = {
+      baseImageSize = mkOption {
+        type = with types; either (enum [ "auto" ]) int;
+        default = "auto";
+        example = 50 * 1024;
+        description = ''
+          The size of the VirtualBox base image in MiB.
+        '';
+      };
+      baseImageFreeSpace = mkOption {
+        type = with types; int;
+        default = 30 * 1024;
+        description = ''
+          Free space in the VirtualBox base image in MiB.
+        '';
+      };
+      memorySize = mkOption {
+        type = types.int;
+        default = 1536;
+        description = ''
+          The amount of RAM the VirtualBox appliance can use in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-ova-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the VirtualBox appliance.
+        '';
+      };
+      vmName = mkOption {
+        type = types.str;
+        default = "NixOS ${config.system.nixos.label} (${pkgs.stdenv.hostPlatform.system})";
+        description = ''
+          The name of the VirtualBox appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.ova";
+        description = ''
+          The file name of the VirtualBox appliance.
+        '';
+      };
+      params = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        example = {
+          audio = "alsa";
+          rtcuseutc = "on";
+          usb = "off";
+        };
+        description = ''
+          Parameters passed to the Virtualbox appliance.
+
+          Run <literal>VBoxManage modifyvm --help</literal> to see more options.
+        '';
+      };
+      exportParams = mkOption {
+        type = with types; listOf (oneOf [ str int bool (listOf str) ]);
+        example = [
+          "--vsys" "0" "--vendor" "ACME Inc."
+        ];
+        default = [];
+        description = ''
+          Parameters passed to the Virtualbox export command.
+
+          Run <literal>VBoxManage export --help</literal> to see more options.
+        '';
+      };
+      extraDisk = mkOption {
+        description = ''
+          Optional extra disk/hdd configuration.
+          The disk will be an 'ext4' partition on a separate VMDK file.
+        '';
+        default = null;
+        example = {
+          label = "storage";
+          mountPoint = "/home/demo/storage";
+          size = 100 * 1024;
+        };
+        type = types.nullOr (types.submodule {
+          options = {
+            size = mkOption {
+              type = types.int;
+              description = "Size in MiB";
+            };
+            label = mkOption {
+              type = types.str;
+              default = "vm-extra-storage";
+              description = "Label for the disk partition";
+            };
+            mountPoint = mkOption {
+              type = types.str;
+              description = "Path where to mount this disk.";
+            };
+          };
+        });
+      };
+    };
+  };
+
+  config = {
+
+    virtualbox.params = mkMerge [
+      (mapAttrs (name: mkDefault) {
+        acpi = "on";
+        vram = 32;
+        nictype1 = "virtio";
+        nic1 = "nat";
+        audiocontroller = "ac97";
+        audio = "alsa";
+        audioout = "on";
+        graphicscontroller = "vmsvga";
+        rtcuseutc = "on";
+        usb = "on";
+        usbehci = "on";
+        mouse = "usbtablet";
+      })
+      (mkIf (pkgs.stdenv.hostPlatform.system == "i686-linux") { pae = "on"; })
+    ];
+
+    system.build.virtualBoxOVA = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+
+      inherit pkgs lib config;
+      partitionTableType = "legacy";
+      diskSize = cfg.baseImageSize;
+      additionalSpace = "${toString cfg.baseImageFreeSpace}M";
+
+      postVM =
+        ''
+          export HOME=$PWD
+          export PATH=${pkgs.virtualbox}/bin:$PATH
+
+          echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
+          VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
+
+          ${optionalString (cfg.extraDisk != null) ''
+            echo "creating extra disk: data-disk.raw"
+            dataDiskImage=data-disk.raw
+            truncate -s ${toString cfg.extraDisk.size}M $dataDiskImage
+
+            parted --script $dataDiskImage -- \
+              mklabel msdos \
+              mkpart primary ext4 1MiB -1
+            eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
+            mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
+            echo "creating extra disk: data-disk.vmdk"
+            VBoxManage internalcommands createrawvmdk -filename data-disk.vmdk -rawdisk $dataDiskImage
+          ''}
+
+          echo "creating VirtualBox VM..."
+          vmName="${cfg.vmName}";
+          VBoxManage createvm --name "$vmName" --register \
+            --ostype ${if pkgs.stdenv.hostPlatform.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
+          VBoxManage modifyvm "$vmName" \
+            --memory ${toString cfg.memorySize} \
+            ${lib.cli.toGNUCommandLineShell { } cfg.params}
+          VBoxManage storagectl "$vmName" --name SATA --add sata --portcount 4 --bootable on --hostiocache on
+          VBoxManage storageattach "$vmName" --storagectl SATA --port 0 --device 0 --type hdd \
+            --medium disk.vmdk
+          ${optionalString (cfg.extraDisk != null) ''
+            VBoxManage storageattach "$vmName" --storagectl SATA --port 1 --device 0 --type hdd \
+            --medium data-disk.vmdk
+          ''}
+
+          echo "exporting VirtualBox VM..."
+          mkdir -p $out
+          fn="$out/${cfg.vmFileName}"
+          VBoxManage export "$vmName" --output "$fn" --options manifest ${escapeShellArgs cfg.exportParams}
+
+          rm -v $diskImage
+
+          mkdir -p $out/nix-support
+          echo "file ova $fn" >> $out/nix-support/hydra-build-products
+        '';
+    };
+
+    fileSystems = {
+      "/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+    } // (lib.optionalAttrs (cfg.extraDisk != null) {
+      ${cfg.extraDisk.mountPoint} = {
+        device = "/dev/disk/by-label/" + cfg.extraDisk.label;
+        autoResize = true;
+        fsType = "ext4";
+      };
+    });
+
+    boot.growPartition = true;
+    boot.loader.grub.device = "/dev/sda";
+
+    swapDevices = [{
+      device = "/var/swap";
+      size = 2048;
+    }];
+
+    virtualisation.virtualbox.guest.enable = true;
+
+  };
+}
diff --git a/nixos/modules/virtualisation/vmware-guest.nix b/nixos/modules/virtualisation/vmware-guest.nix
new file mode 100644
index 00000000000..3caed746ca9
--- /dev/null
+++ b/nixos/modules/virtualisation/vmware-guest.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vmware.guest;
+  open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
+  xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
+  ];
+
+  options.virtualisation.vmware.guest = {
+    enable = mkEnableOption "VMWare Guest Support";
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = "Whether to disable X11-related features.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.hostPlatform.isx86;
+      message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } ];
+
+    boot.initrd.availableKernelModules = [ "mptspi" ];
+    boot.initrd.kernelModules = [ "vmw_pvscsi" ];
+
+    environment.systemPackages = [ open-vm-tools ];
+
+    systemd.services.vmware =
+      { description = "VMWare Guest Service";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "display-manager.service" ];
+        unitConfig.ConditionVirtualization = "vmware";
+        serviceConfig.ExecStart = "${open-vm-tools}/bin/vmtoolsd";
+      };
+
+    # Mount the vmblock for drag-and-drop and copy-and-paste.
+    systemd.mounts = mkIf (!cfg.headless) [
+      {
+        description = "VMware vmblock fuse mount";
+        documentation = [ "https://github.com/vmware/open-vm-tools/blob/master/open-vm-tools/vmblock-fuse/design.txt" ];
+        unitConfig.ConditionVirtualization = "vmware";
+        what = "${open-vm-tools}/bin/vmware-vmblock-fuse";
+        where = "/run/vmblock-fuse";
+        type = "fuse";
+        options = "subtype=vmware-vmblock,default_permissions,allow_other";
+        wantedBy = [ "multi-user.target" ];
+      }
+    ];
+
+    security.wrappers.vmware-user-suid-wrapper = mkIf (!cfg.headless) {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${open-vm-tools}/bin/vmware-user-suid-wrapper";
+      };
+
+    environment.etc.vmware-tools.source = "${open-vm-tools}/etc/vmware-tools/*";
+
+    services.xserver = mkIf (!cfg.headless) {
+      videoDrivers = mkOverride 50 [ "vmware" ];
+      modules = [ xf86inputvmmouse ];
+
+      config = ''
+          Section "InputClass"
+            Identifier "VMMouse"
+            MatchDevicePath "/dev/input/event*"
+            MatchProduct "ImPS/2 Generic Wheel Mouse"
+            Driver "vmmouse"
+          EndSection
+        '';
+
+      displayManager.sessionCommands = ''
+          ${open-vm-tools}/bin/vmware-user-suid-wrapper
+        '';
+    };
+
+    services.udev.packages = [ open-vm-tools ];
+  };
+}
diff --git a/nixos/modules/virtualisation/vmware-image.nix b/nixos/modules/virtualisation/vmware-image.nix
new file mode 100644
index 00000000000..f6cd12e2bb7
--- /dev/null
+++ b/nixos/modules/virtualisation/vmware-image.nix
@@ -0,0 +1,91 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  boolToStr = value: if value then "on" else "off";
+  cfg = config.vmware;
+
+  subformats = [
+    "monolithicSparse"
+    "monolithicFlat"
+    "twoGbMaxExtentSparse"
+    "twoGbMaxExtentFlat"
+    "streamOptimized"
+  ];
+
+in {
+  options = {
+    vmware = {
+      baseImageSize = mkOption {
+        type = with types; either (enum [ "auto" ]) int;
+        default = "auto";
+        example = 2048;
+        description = ''
+          The size of the VMWare base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-vmware-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = ''
+          The name of the derivation for the VMWare appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vmdk";
+        description = ''
+          The file name of the VMWare appliance.
+        '';
+      };
+      vmSubformat = mkOption {
+        type = types.enum subformats;
+        default = "monolithicSparse";
+        description = "Specifies which VMDK subformat to use.";
+      };
+      vmCompat6 = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = "Create a VMDK version 6 image (instead of version 4).";
+      };
+    };
+  };
+
+  config = {
+    system.build.vmwareImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o compat6=${boolToStr cfg.vmCompat6},subformat=${cfg.vmSubformat} -O vmdk $diskImage $out/${cfg.vmFileName}
+        rm $diskImage
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.vmware.guest.enable = true;
+  };
+}
diff --git a/nixos/modules/virtualisation/waydroid.nix b/nixos/modules/virtualisation/waydroid.nix
new file mode 100644
index 00000000000..4fc798ff39f
--- /dev/null
+++ b/nixos/modules/virtualisation/waydroid.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.waydroid;
+  kernelPackages = config.boot.kernelPackages;
+  waydroidGbinderConf = pkgs.writeText "waydroid.conf" ''
+    [Protocol]
+    /dev/binder = aidl2
+    /dev/vndbinder = aidl2
+    /dev/hwbinder = hidl
+
+    [ServiceManager]
+    /dev/binder = aidl2
+    /dev/vndbinder = aidl2
+    /dev/hwbinder = hidl
+  '';
+
+in
+{
+
+  options.virtualisation.waydroid = {
+    enable = mkEnableOption "Waydroid";
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
+      message = "Waydroid needs user namespace support to work properly";
+    };
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isEnabled "ANDROID_BINDER_IPC")
+      (isEnabled "ANDROID_BINDERFS")
+      (isEnabled "ASHMEM")
+    ];
+
+    /* NOTE: we always enable this flag even if CONFIG_PSI_DEFAULT_DISABLED is not on
+      as reading the kernel config is not always possible and on kernels where it's
+      already on it will be no-op
+    */
+    boot.kernelParams = [ "psi=1" ];
+
+    environment.etc."gbinder.d/waydroid.conf".source = waydroidGbinderConf;
+
+    environment.systemPackages = with pkgs; [ waydroid ];
+
+    networking.firewall.trustedInterfaces = [ "waydroid0" ];
+
+    virtualisation.lxc.enable = true;
+
+    systemd.services.waydroid-container = {
+      description = "Waydroid Container";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [ getent iptables iproute kmod nftables util-linux which ];
+
+      unitConfig = {
+        ConditionPathExists = "/var/lib/waydroid/lxc/waydroid";
+      };
+
+      serviceConfig = {
+        ExecStart = "${pkgs.waydroid}/bin/waydroid container start";
+        ExecStop = "${pkgs.waydroid}/bin/waydroid container stop";
+        ExecStopPost = "${pkgs.waydroid}/bin/waydroid session stop";
+      };
+    };
+  };
+
+}
diff --git a/nixos/modules/virtualisation/xe-guest-utilities.nix b/nixos/modules/virtualisation/xe-guest-utilities.nix
new file mode 100644
index 00000000000..25ccbaebc07
--- /dev/null
+++ b/nixos/modules/virtualisation/xe-guest-utilities.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.xe-guest-utilities;
+in {
+  options = {
+    services.xe-guest-utilities = {
+      enable = mkEnableOption "the Xen guest utilities daemon";
+    };
+  };
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.xe-guest-utilities ];
+    systemd.tmpfiles.rules = [ "d /run/xenstored 0755 - - -" ];
+
+    systemd.services.xe-daemon = {
+      description = "xen daemon file";
+      wantedBy    = [ "multi-user.target" ];
+      after = [ "xe-linux-distribution.service" ];
+      requires = [ "proc-xen.mount" ];
+      path = [ pkgs.coreutils pkgs.iproute2 ];
+      serviceConfig = {
+        PIDFile = "/run/xe-daemon.pid";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-daemon -p /run/xe-daemon.pid";
+        ExecStop = "${pkgs.procps}/bin/pkill -TERM -F /run/xe-daemon.pid";
+      };
+    };
+
+    systemd.services.xe-linux-distribution = {
+      description = "xen linux distribution service";
+      wantedBy    = [ "multi-user.target" ];
+      before = [ "xend.service" ];
+      path = [ pkgs.xe-guest-utilities pkgs.coreutils pkgs.gawk pkgs.gnused ];
+      serviceConfig = {
+        Type = "simple";
+        RemainAfterExit = "yes";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-linux-distribution /var/cache/xe-linux-distribution";
+      };
+    };
+
+    systemd.mounts = [
+      { description = "Mount /proc/xen files";
+        what = "xenfs";
+        where = "/proc/xen";
+        type = "xenfs";
+        unitConfig = {
+          ConditionPathExists = "/proc/xen";
+          RefuseManualStop = "true";
+        };
+      }
+    ];
+  };
+}
diff --git a/nixos/modules/virtualisation/xen-dom0.nix b/nixos/modules/virtualisation/xen-dom0.nix
new file mode 100644
index 00000000000..975eed10cd2
--- /dev/null
+++ b/nixos/modules/virtualisation/xen-dom0.nix
@@ -0,0 +1,453 @@
+# Xen hypervisor (Dom0) support.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.xen;
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "xen" "qemu" ] "You don't need this option anymore, it will work without it.")
+    (mkRenamedOptionModule [ "virtualisation" "xen" "qemu-package" ] [ "virtualisation" "xen" "package-qemu" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    virtualisation.xen.enable =
+      mkOption {
+        default = false;
+        type = types.bool;
+        description =
+          ''
+            Setting this option enables the Xen hypervisor, a
+            virtualisation technology that allows multiple virtual
+            machines, known as <emphasis>domains</emphasis>, to run
+            concurrently on the physical machine.  NixOS runs as the
+            privileged <emphasis>Domain 0</emphasis>.  This option
+            requires a reboot to take effect.
+          '';
+      };
+
+    virtualisation.xen.package = mkOption {
+      type = types.package;
+      defaultText = literalExpression "pkgs.xen";
+      example = literalExpression "pkgs.xen-light";
+      description = ''
+        The package used for Xen binary.
+      '';
+      relatedPackages = [ "xen" "xen-light" ];
+    };
+
+    virtualisation.xen.package-qemu = mkOption {
+      type = types.package;
+      defaultText = literalExpression "pkgs.xen";
+      example = literalExpression "pkgs.qemu_xen-light";
+      description = ''
+        The package with qemu binaries for dom0 qemu and xendomains.
+      '';
+      relatedPackages = [ "xen"
+                          { name = "qemu_xen-light"; comment = "For use with pkgs.xen-light."; }
+                        ];
+    };
+
+    virtualisation.xen.bootParams =
+      mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description =
+          ''
+            Parameters passed to the Xen hypervisor at boot time.
+          '';
+      };
+
+    virtualisation.xen.domain0MemorySize =
+      mkOption {
+        default = 0;
+        example = 512;
+        type = types.addCheck types.int (n: n >= 0);
+        description =
+          ''
+            Amount of memory (in MiB) allocated to Domain 0 on boot.
+            If set to 0, all memory is assigned to Domain 0.
+          '';
+      };
+
+    virtualisation.xen.bridge = {
+        name = mkOption {
+          default = "xenbr0";
+          type = types.str;
+          description = ''
+              Name of bridge the Xen domUs connect to.
+            '';
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "172.16.0.1";
+          description = ''
+            IPv4 address of the bridge.
+          '';
+        };
+
+        prefixLength = mkOption {
+          type = types.addCheck types.int (n: n >= 0 && n <= 32);
+          default = 16;
+          description = ''
+            Subnet mask of the bridge interface, specified as the number of
+            bits in the prefix (<literal>24</literal>).
+            A DHCP server will provide IP addresses for the whole, remaining
+            subnet.
+          '';
+        };
+
+        forwardDns = mkOption {
+          type = types.bool;
+          default = false;
+          description = ''
+            If set to <literal>true</literal>, the DNS queries from the
+            hosts connected to the bridge will be forwarded to the DNS
+            servers specified in /etc/resolv.conf .
+            '';
+        };
+
+      };
+
+    virtualisation.xen.stored =
+      mkOption {
+        type = types.path;
+        description =
+          ''
+            Xen Store daemon to use. Defaults to oxenstored of the xen package.
+          '';
+      };
+
+    virtualisation.xen.domains = {
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description =
+            ''
+              Options defined here will override the defaults for xendomains.
+              The default options can be seen in the file included from
+              /etc/default/xendomains.
+            '';
+          };
+      };
+
+    virtualisation.xen.trace = mkEnableOption "Xen tracing";
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isx86_64;
+      message = "Xen currently not supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.boot.loader.grub.enable && (config.boot.loader.grub.efiSupport == false);
+      message = "Xen currently does not support EFI boot";
+    } ];
+
+    virtualisation.xen.package = mkDefault pkgs.xen;
+    virtualisation.xen.package-qemu = mkDefault pkgs.xen;
+    virtualisation.xen.stored = mkDefault "${cfg.package}/bin/oxenstored";
+
+    environment.systemPackages = [ cfg.package ];
+
+    boot.kernelModules =
+      [ "xen-evtchn" "xen-gntdev" "xen-gntalloc" "xen-blkback" "xen-netback"
+        "xen-pciback" "evtchn" "gntdev" "netbk" "blkbk" "xen-scsibk"
+        "usbbk" "pciback" "xen-acpi-processor" "blktap2" "tun" "netxen_nic"
+        "xen_wdt" "xen-acpi-processor" "xen-privcmd" "xen-scsiback"
+        "xenfs"
+      ];
+
+    # The xenfs module is needed in system.activationScripts.xen, but
+    # the modprobe command there fails silently. Include xenfs in the
+    # initrd as a work around.
+    boot.initrd.kernelModules = [ "xenfs" ];
+
+    # The radeonfb kernel module causes the screen to go black as soon
+    # as it's loaded, so don't load it.
+    boot.blacklistedKernelModules = [ "radeonfb" ];
+
+    # Increase the number of loopback devices from the default (8),
+    # which is way too small because every VM virtual disk requires a
+    # loopback device.
+    boot.extraModprobeConfig =
+      ''
+        options loop max_loop=64
+      '';
+
+    virtualisation.xen.bootParams = [] ++
+      optionals cfg.trace [ "loglvl=all" "guest_loglvl=all" ] ++
+      optional (cfg.domain0MemorySize != 0) "dom0_mem=${toString cfg.domain0MemorySize}M";
+
+    system.extraSystemBuilderCmds =
+      ''
+        ln -s ${cfg.package}/boot/xen.gz $out/xen.gz
+        echo "${toString cfg.bootParams}" > $out/xen-params
+      '';
+
+    # Mount the /proc/xen pseudo-filesystem.
+    system.activationScripts.xen =
+      ''
+        if [ -d /proc/xen ]; then
+            ${pkgs.kmod}/bin/modprobe xenfs 2> /dev/null
+            ${pkgs.util-linux}/bin/mountpoint -q /proc/xen || \
+                ${pkgs.util-linux}/bin/mount -t xenfs none /proc/xen
+        fi
+      '';
+
+    # Domain 0 requires a pvops-enabled kernel.
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isYes "XEN")
+        (isYes "X86_IO_APIC")
+        (isYes "ACPI")
+        (isYes "XEN_DOM0")
+        (isYes "PCI_XEN")
+        (isYes "XEN_DEV_EVTCHN")
+        (isYes "XENFS")
+        (isYes "XEN_COMPAT_XENFS")
+        (isYes "XEN_SYS_HYPERVISOR")
+        (isYes "XEN_GNTDEV")
+        (isYes "XEN_BACKEND")
+        (isModule "XEN_NETDEV_BACKEND")
+        (isModule "XEN_BLKDEV_BACKEND")
+        (isModule "XEN_PCIDEV_BACKEND")
+        (isYes "XEN_BALLOON")
+        (isYes "XEN_SCRUB_PAGES")
+      ];
+
+
+    environment.etc =
+      {
+        "xen/xl.conf".source = "${cfg.package}/etc/xen/xl.conf";
+        "xen/scripts".source = "${cfg.package}/etc/xen/scripts";
+        "default/xendomains".text = ''
+          source ${cfg.package}/etc/default/xendomains
+
+          ${cfg.domains.extraConfig}
+        '';
+      }
+      // optionalAttrs (builtins.compareVersions cfg.package.version "4.10" >= 0) {
+        # in V 4.10 oxenstored requires /etc/xen/oxenstored.conf to start
+        "xen/oxenstored.conf".source = "${cfg.package}/etc/xen/oxenstored.conf";
+      };
+
+    # Xen provides udev rules.
+    services.udev.packages = [ cfg.package ];
+
+    services.udev.path = [ pkgs.bridge-utils pkgs.iproute2 ];
+
+    systemd.services.xen-store = {
+      description = "Xen Store Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "xen-store.socket" ];
+      requires = [ "xen-store.socket" ];
+      preStart = ''
+        export XENSTORED_ROOTDIR="/var/lib/xenstored"
+        rm -f "$XENSTORED_ROOTDIR"/tdb* &>/dev/null
+
+        mkdir -p /var/run
+        mkdir -p /var/log/xen # Running xl requires /var/log/xen and /var/lib/xen,
+        mkdir -p /var/lib/xen # so we create them here unconditionally.
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = if (builtins.compareVersions cfg.package.version "4.8" < 0) then
+        { ExecStart = ''
+            ${cfg.stored}${optionalString cfg.trace " -T /var/log/xen/xenstored-trace.log"} --no-fork
+            '';
+        } else {
+          ExecStart = ''
+            ${cfg.package}/etc/xen/scripts/launch-xenstore
+            '';
+          Type            = "notify";
+          RemainAfterExit = true;
+          NotifyAccess    = "all";
+        };
+      postStart = ''
+        ${optionalString (builtins.compareVersions cfg.package.version "4.8" < 0) ''
+          time=0
+          timeout=30
+          # Wait for xenstored to actually come up, timing out after 30 seconds
+          while [ $time -lt $timeout ] && ! `${cfg.package}/bin/xenstore-read -s / >/dev/null 2>&1` ; do
+              time=$(($time+1))
+              sleep 1
+          done
+
+          # Exit if we timed out
+          if ! [ $time -lt $timeout ] ; then
+              echo "Could not start Xenstore Daemon"
+              exit 1
+          fi
+        ''}
+        echo "executing xen-init-dom0"
+        ${cfg.package}/lib/xen/bin/xen-init-dom0
+        '';
+    };
+
+    systemd.sockets.xen-store = {
+      description = "XenStore Socket for userspace API";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = [ "/var/run/xenstored/socket" "/var/run/xenstored/socket_ro" ];
+        SocketMode = "0660";
+        SocketUser = "root";
+        SocketGroup = "root";
+      };
+    };
+
+
+    systemd.services.xen-console = {
+      description = "Xen Console Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-store.service" ];
+      requires = [ "xen-store.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        ${optionalString cfg.trace "mkdir -p /var/log/xen"}
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/xenconsoled\
+            ${optionalString ((builtins.compareVersions cfg.package.version "4.8" >= 0)) " -i"}\
+            ${optionalString cfg.trace " --log=all --log-dir=/var/log/xen"}
+          '';
+      };
+    };
+
+
+    systemd.services.xen-qemu = {
+      description = "Xen Qemu Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-console.service" ];
+      requires = [ "xen-store.service" ];
+      serviceConfig.ExecStart = ''
+        ${cfg.package-qemu}/${cfg.package-qemu.qemu-system-i386} \
+           -xen-attach -xen-domid 0 -name dom0 -M xenpv \
+           -nographic -monitor /dev/null -serial /dev/null -parallel /dev/null
+        '';
+    };
+
+
+    systemd.services.xen-watchdog = {
+      description = "Xen Watchdog Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-qemu.service" "xen-domains.service" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/xenwatchdogd 30 15";
+      serviceConfig.Type = "forking";
+      serviceConfig.RestartSec = "1";
+      serviceConfig.Restart = "on-failure";
+    };
+
+
+    systemd.services.xen-bridge = {
+      description = "Xen bridge";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "xen-domains.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        touch /var/run/xen/dnsmasq.pid
+        touch /var/run/xen/dnsmasq.etherfile
+        touch /var/run/xen/dnsmasq.leasefile
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Usable\ range`
+        export XEN_BRIDGE_IP_RANGE_START="${"\${data[1]//[[:blank:]]/}"}"
+        export XEN_BRIDGE_IP_RANGE_END="${"\${data[2]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ mask`
+        export XEN_BRIDGE_NETMASK="${"\${data[1]//[[:blank:]]/}"}"
+
+        echo "${cfg.bridge.address} host gw dns" > /var/run/xen/dnsmasq.hostsfile
+
+        cat <<EOF > /var/run/xen/dnsmasq.conf
+        no-daemon
+        pid-file=/var/run/xen/dnsmasq.pid
+        interface=${cfg.bridge.name}
+        except-interface=lo
+        bind-interfaces
+        auth-zone=xen.local,$XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength}
+        domain=xen.local
+        addn-hosts=/var/run/xen/dnsmasq.hostsfile
+        expand-hosts
+        strict-order
+        no-hosts
+        bogus-priv
+        ${optionalString (!cfg.bridge.forwardDns) ''
+          no-resolv
+          no-poll
+          auth-server=dns.xen.local,${cfg.bridge.name}
+        ''}
+        filterwin2k
+        clear-on-reload
+        domain-needed
+        dhcp-hostsfile=/var/run/xen/dnsmasq.etherfile
+        dhcp-authoritative
+        dhcp-range=$XEN_BRIDGE_IP_RANGE_START,$XEN_BRIDGE_IP_RANGE_END
+        dhcp-no-override
+        no-ping
+        dhcp-leasefile=/var/run/xen/dnsmasq.leasefile
+        EOF
+
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+
+        ${pkgs.bridge-utils}/bin/brctl addbr ${cfg.bridge.name}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} ${cfg.bridge.address}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} netmask $XEN_BRIDGE_NETMASK
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} up
+      '';
+      serviceConfig.ExecStart = "${pkgs.dnsmasq}/bin/dnsmasq --conf-file=/var/run/xen/dnsmasq.conf";
+      postStop = ''
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} down
+        ${pkgs.bridge-utils}/bin/brctl delbr ${cfg.bridge.name}
+
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+      '';
+    };
+
+
+    systemd.services.xen-domains = {
+      description = "Xen domains - automatically starts, saves and restores Xen domains";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-bridge.service" "xen-qemu.service" ];
+      requires = [ "xen-bridge.service" "xen-qemu.service" ];
+      ## To prevent a race between dhcpcd and xend's bridge setup script
+      ## (which renames eth* to peth* and recreates eth* as a virtual
+      ## device), start dhcpcd after xend.
+      before = [ "dhcpd.service" ];
+      restartIfChanged = false;
+      serviceConfig.RemainAfterExit = "yes";
+      path = [ cfg.package cfg.package-qemu ];
+      environment.XENDOM_CONFIG = "${cfg.package}/etc/sysconfig/xendomains";
+      preStart = "mkdir -p /var/lock/subsys -m 755";
+      serviceConfig.ExecStart = "${cfg.package}/etc/init.d/xendomains start";
+      serviceConfig.ExecStop = "${cfg.package}/etc/init.d/xendomains stop";
+    };
+
+  };
+}
diff --git a/nixos/modules/virtualisation/xen-domU.nix b/nixos/modules/virtualisation/xen-domU.nix
new file mode 100644
index 00000000000..c00b984c2ce
--- /dev/null
+++ b/nixos/modules/virtualisation/xen-domU.nix
@@ -0,0 +1,19 @@
+# Common configuration for Xen DomU NixOS virtual machines.
+
+{ ... }:
+
+{
+  boot.loader.grub.version = 2;
+  boot.loader.grub.device = "nodev";
+
+  boot.initrd.kernelModules =
+    [ "xen-blkfront" "xen-tpmfront" "xen-kbdfront" "xen-fbfront"
+      "xen-netfront" "xen-pcifront" "xen-scsifront"
+    ];
+
+  # Send syslog messages to the Xen console.
+  services.syslogd.tty = "hvc0";
+
+  # Don't run ntpd, since we should get the correct time from Dom0.
+  services.timesyncd.enable = false;
+}