summary refs log tree commit diff
path: root/pkgs/build-support
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/build-support')
-rw-r--r--pkgs/build-support/build-fhs-userenv/env.nix1
-rw-r--r--pkgs/build-support/cc-wrapper/cc-wrapper.sh2
-rw-r--r--pkgs/build-support/cc-wrapper/default.nix19
-rwxr-xr-x[-rw-r--r--]pkgs/build-support/cc-wrapper/ld-solaris-wrapper.sh2
-rw-r--r--pkgs/build-support/cc-wrapper/ld-wrapper.sh2
-rw-r--r--pkgs/build-support/cc-wrapper/utils.sh24
-rw-r--r--pkgs/build-support/docker/default.nix457
-rw-r--r--pkgs/build-support/docker/examples.nix108
-rw-r--r--pkgs/build-support/docker/pull.nix10
-rwxr-xr-x[-rw-r--r--]pkgs/build-support/dotnetbuildhelpers/patch-fsharp-targets.sh0
-rw-r--r--pkgs/build-support/emacs/buffer.nix47
-rw-r--r--pkgs/build-support/emacs/wrapper.nix3
-rw-r--r--pkgs/build-support/fetchadc/default.nix10
-rw-r--r--pkgs/build-support/fetchegg/default.nix8
-rw-r--r--pkgs/build-support/fetchfile/builder.sh11
-rw-r--r--pkgs/build-support/fetchfile/default.nix7
-rw-r--r--pkgs/build-support/fetchgit/default.nix12
-rwxr-xr-xpkgs/build-support/fetchgit/nix-prefetch-git39
-rw-r--r--pkgs/build-support/fetchgx/default.nix30
-rw-r--r--pkgs/build-support/fetchhg/default.nix4
-rw-r--r--pkgs/build-support/fetchmtn/default.nix9
-rw-r--r--pkgs/build-support/fetchsvn/default.nix9
-rw-r--r--pkgs/build-support/fetchurl/boot.nix1
-rw-r--r--pkgs/build-support/fetchurl/default.nix8
-rw-r--r--pkgs/build-support/gcc-cross-wrapper/builder.sh2
-rwxr-xr-x[-rw-r--r--]pkgs/build-support/gcc-wrapper-old/ld-solaris-wrapper.sh2
-rw-r--r--pkgs/build-support/grsecurity/default.nix8
-rw-r--r--pkgs/build-support/kernel/cpio-clean.pl17
-rw-r--r--pkgs/build-support/kernel/make-initrd.nix7
-rw-r--r--pkgs/build-support/kernel/make-initrd.sh3
-rw-r--r--pkgs/build-support/kernel/modules-closure.nix4
-rw-r--r--pkgs/build-support/ocaml/default.nix10
-rw-r--r--pkgs/build-support/replace-dependency.nix2
-rw-r--r--pkgs/build-support/rust/default.nix3
-rw-r--r--pkgs/build-support/rust/fetchcargo.nix2
-rw-r--r--pkgs/build-support/setup-hooks/multiple-outputs.sh11
-rw-r--r--pkgs/build-support/setup-hooks/win-dll-link.sh2
-rw-r--r--pkgs/build-support/singularity-tools/default.nix100
-rw-r--r--pkgs/build-support/substitute/substitute-all.nix4
-rw-r--r--pkgs/build-support/trivial-builders.nix60
-rw-r--r--pkgs/build-support/vm/default.nix50
-rw-r--r--pkgs/build-support/vm/windows/bootstrap.nix4
-rw-r--r--pkgs/build-support/vm/windows/controller/default.nix6
-rw-r--r--pkgs/build-support/vm/windows/default.nix2
44 files changed, 774 insertions, 348 deletions
diff --git a/pkgs/build-support/build-fhs-userenv/env.nix b/pkgs/build-support/build-fhs-userenv/env.nix
index f69338cb16c..b30e1362aba 100644
--- a/pkgs/build-support/build-fhs-userenv/env.nix
+++ b/pkgs/build-support/build-fhs-userenv/env.nix
@@ -89,6 +89,7 @@ let
 
       # symlink other core stuff
       ln -s /host/etc/localtime localtime
+      ln -s /host/etc/zoneinfo zoneinfo
       ln -s /host/etc/machine-id machine-id
       ln -s /host/etc/os-release os-release
 
diff --git a/pkgs/build-support/cc-wrapper/cc-wrapper.sh b/pkgs/build-support/cc-wrapper/cc-wrapper.sh
index 03f068d8298..3ccdc34db5b 100644
--- a/pkgs/build-support/cc-wrapper/cc-wrapper.sh
+++ b/pkgs/build-support/cc-wrapper/cc-wrapper.sh
@@ -24,7 +24,7 @@ nonFlagArgs=0
 [[ "@prog@" = *++ ]] && isCpp=1 || isCpp=0
 cppInclude=1
 
-params=("$@")
+expandResponseParams "$@"
 n=0
 while [ $n -lt ${#params[*]} ]; do
     p=${params[n]}
diff --git a/pkgs/build-support/cc-wrapper/default.nix b/pkgs/build-support/cc-wrapper/default.nix
index 8a746ea016e..c8e3d8b4cc8 100644
--- a/pkgs/build-support/cc-wrapper/default.nix
+++ b/pkgs/build-support/cc-wrapper/default.nix
@@ -46,7 +46,20 @@ stdenv.mkDerivation {
   inherit cc shell libc_bin libc_dev libc_lib binutils_bin coreutils_bin;
   gnugrep_bin = if nativeTools then "" else gnugrep;
 
-  passthru = { inherit libc nativeTools nativeLibc nativePrefix isGNU isClang; };
+  passthru = {
+    inherit libc nativeTools nativeLibc nativePrefix isGNU isClang;
+
+    emacsBufferSetup = pkgs: ''
+      ; We should handle propagation here too
+      (mapc (lambda (arg)
+        (when (file-directory-p (concat arg "/include"))
+          (setenv "NIX_CFLAGS_COMPILE" (concat (getenv "NIX_CFLAGS_COMPILE") " -isystem " arg "/include")))
+        (when (file-directory-p (concat arg "/lib"))
+          (setenv "NIX_LDFLAGS" (concat (getenv "NIX_LDFLAGS") " -L" arg "/lib")))
+        (when (file-directory-p (concat arg "/lib64"))
+          (setenv "NIX_LDFLAGS" (concat (getenv "NIX_LDFLAGS") " -L" arg "/lib64")))) '(${concatStringsSep " " (map (pkg: "\"${pkg}\"") pkgs)}))
+    '';
+  };
 
   buildCommand =
     ''
@@ -239,10 +252,10 @@ stdenv.mkDerivation {
 
       # some linkers on some platforms don't support specific -z flags
       hardening_unsupported_flags=""
-      if [[ "$($ldPath/ld -z now 2>&1 || true)" =~ "unknown option" ]]; then
+      if [[ "$($ldPath/ld -z now 2>&1 || true)" =~ un(recognized|known)\ option ]]; then
         hardening_unsupported_flags+=" bindnow"
       fi
-      if [[ "$($ldPath/ld -z relro 2>&1 || true)" =~ "unknown option" ]]; then
+      if [[ "$($ldPath/ld -z relro 2>&1 || true)" =~ un(recognized|known)\ option ]]; then
         hardening_unsupported_flags+=" relro"
       fi
 
diff --git a/pkgs/build-support/cc-wrapper/ld-solaris-wrapper.sh b/pkgs/build-support/cc-wrapper/ld-solaris-wrapper.sh
index 5a7b92b5ad7..263ea5408e9 100644..100755
--- a/pkgs/build-support/cc-wrapper/ld-solaris-wrapper.sh
+++ b/pkgs/build-support/cc-wrapper/ld-solaris-wrapper.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!@shell@
 
 set -e
 set -u
diff --git a/pkgs/build-support/cc-wrapper/ld-wrapper.sh b/pkgs/build-support/cc-wrapper/ld-wrapper.sh
index 44d9a047936..056cfa92053 100644
--- a/pkgs/build-support/cc-wrapper/ld-wrapper.sh
+++ b/pkgs/build-support/cc-wrapper/ld-wrapper.sh
@@ -16,7 +16,7 @@ source @out@/nix-support/utils.sh
 
 
 # Optionally filter out paths not refering to the store.
-params=("$@")
+expandResponseParams "$@"
 if [ "$NIX_ENFORCE_PURITY" = 1 -a -n "$NIX_STORE" \
         -a \( -z "$NIX_IGNORE_LD_THROUGH_GCC" -o -z "$NIX_LDFLAGS_SET" \) ]; then
     rest=()
diff --git a/pkgs/build-support/cc-wrapper/utils.sh b/pkgs/build-support/cc-wrapper/utils.sh
index 3ab512d85c4..aba5f3295a9 100644
--- a/pkgs/build-support/cc-wrapper/utils.sh
+++ b/pkgs/build-support/cc-wrapper/utils.sh
@@ -22,3 +22,27 @@ badPath() {
         "${p:0:4}" != "/tmp" -a \
         "${p:0:${#NIX_BUILD_TOP}}" != "$NIX_BUILD_TOP"
 }
+
+expandResponseParams() {
+    local inparams=("$@")
+    local n=0
+    local p
+    params=()
+    while [ $n -lt ${#inparams[*]} ]; do
+        p=${inparams[n]}
+        case $p in
+            @*)
+                if [ -e "${p:1}" ]; then
+                    args=$(<"${p:1}")
+                    eval 'for arg in '${args//$/\\$}'; do params+=("$arg"); done'
+                else
+                    params+=("$p")
+                fi
+                ;;
+            *)
+                params+=("$p")
+                ;;
+        esac
+        n=$((n + 1))
+    done
+}
diff --git a/pkgs/build-support/docker/default.nix b/pkgs/build-support/docker/default.nix
index 4c5378ea73f..27575053954 100644
--- a/pkgs/build-support/docker/default.nix
+++ b/pkgs/build-support/docker/default.nix
@@ -1,12 +1,36 @@
-{ stdenv, lib, callPackage, runCommand, writeReferencesToFile, writeText, vmTools, writeScript
-, docker, shadow, utillinux, coreutils, jshon, e2fsprogs, go, pigz }:
+{
+  callPackage,
+  coreutils,
+  docker,
+  e2fsprogs,
+  findutils,
+  go,
+  jshon,
+  lib,
+  pkgs,
+  pigz,
+  runCommand,
+  rsync,
+  shadow,
+  stdenv,
+  storeDir ? builtins.storeDir,
+  utillinux,
+  vmTools,
+  writeReferencesToFile,
+  writeScript,
+  writeText,
+}:
 
 # WARNING: this API is unstable and may be subject to backwards-incompatible changes in the future.
-  
+
 rec {
 
+  examples = import ./examples.nix {
+    inherit pkgs buildImage pullImage shadowSetup;
+  };
+
   pullImage = callPackage ./pull.nix {};
-  
+
   # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
   # And we cannot untar it, because then we cannot preserve permissions ecc.
   tarsum = runCommand "tarsum" {
@@ -23,110 +47,138 @@ rec {
 
     cp tarsum $out
   '';
-  
+
   # buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
-  mergeDrvs = { drvs, onlyDeps ? false }:
+  mergeDrvs = {
+    derivations,
+    onlyDeps ? false
+  }:
     runCommand "merge-drvs" {
-      inherit drvs onlyDeps;
+      inherit derivations onlyDeps;
     } ''
-      if [ -n "$onlyDeps" ]; then
-        echo $drvs > $out
+      if [[ -n "$onlyDeps" ]]; then
+        echo $derivations > $out
         exit 0
       fi
-        
+
       mkdir $out
-      for drv in $drvs; do
-        echo Merging $drv
-        if [ -d "$drv" ]; then
-          cp -drf --preserve=mode -f $drv/* $out/
+      for derivation in $derivations; do
+        echo "Merging $derivation..."
+        if [[ -d "$derivation" ]]; then
+          # If it's a directory, copy all of its contents into $out.
+          cp -drf --preserve=mode -f $derivation/* $out/
         else
+          # Otherwise treat the derivation as a tarball and extract it
+          # into $out.
           tar -C $out -xpf $drv || true
         fi
       done
     '';
-  
-  shellScript = text:
-    writeScript "script.sh" ''
-      #!${stdenv.shell}
-      set -e
-      export PATH=${coreutils}/bin:/bin
-
-      ${text}
-    '';
 
+  # Helper for setting up the base files for managing users and
+  # groups, only if such files don't exist already. It is suitable for
+  # being used in a runAsRoot script.
   shadowSetup = ''
     export PATH=${shadow}/bin:$PATH
     mkdir -p /etc/pam.d
-    if [ ! -f /etc/passwd ]; then
+    if [[ ! -f /etc/passwd ]]; then
       echo "root:x:0:0::/root:/bin/sh" > /etc/passwd
       echo "root:!x:::::::" > /etc/shadow
     fi
-    if [ ! -f /etc/group ]; then
+    if [[ ! -f /etc/group ]]; then
       echo "root:x:0:" > /etc/group
       echo "root:x::" > /etc/gshadow
     fi
-    if [ ! -f /etc/pam.d/other ]; then
+    if [[ ! -f /etc/pam.d/other ]]; then
       cat > /etc/pam.d/other <<EOF
-account sufficient pam_unix.so
-auth sufficient pam_rootok.so
-password requisite pam_unix.so nullok sha512
-session required pam_unix.so
-EOF
+    account sufficient pam_unix.so
+    auth sufficient pam_rootok.so
+    password requisite pam_unix.so nullok sha512
+    session required pam_unix.so
+    EOF
     fi
-    if [ ! -f /etc/login.defs ]; then
+    if [[ ! -f /etc/login.defs ]]; then
       touch /etc/login.defs
     fi
   '';
 
-  runWithOverlay = { name , fromImage ? null, fromImageName ? null, fromImageTag ? null
-                   , diskSize ? 1024, preMount ? "", postMount ? "", postUmount ? "" }:
+  # Run commands in a virtual machine.
+  runWithOverlay = {
+    name,
+    fromImage ? null,
+    fromImageName ? null,
+    fromImageTag ? null,
+    diskSize ? 1024,
+    preMount ? "",
+    postMount ? "",
+    postUmount ? ""
+  }:
     vmTools.runInLinuxVM (
       runCommand name {
-        preVM = vmTools.createEmptyImage { size = diskSize; fullName = "docker-run-disk"; };
-
+        preVM = vmTools.createEmptyImage {
+          size = diskSize;
+          fullName = "docker-run-disk";
+        };
         inherit fromImage fromImageName fromImageTag;
-        
-        buildInputs = [ utillinux e2fsprogs jshon ];
+
+        buildInputs = [ utillinux e2fsprogs jshon rsync ];
       } ''
       rm -rf $out
-      
+
       mkdir disk
       mkfs /dev/${vmTools.hd}
       mount /dev/${vmTools.hd} disk
       cd disk
 
-      if [ -n "$fromImage" ]; then
-        echo Unpacking base image
+      if [[ -n "$fromImage" ]]; then
+        echo "Unpacking base image..."
         mkdir image
         tar -C image -xpf "$fromImage"
 
-        if [ -z "$fromImageName" ]; then
-          fromImageName=$(jshon -k < image/repositories|head -n1)
+        # If the image name isn't set, read it from the image repository json.
+        if [[ -z "$fromImageName" ]]; then
+          fromImageName=$(jshon -k < image/repositories | head -n 1)
+          echo "From-image name wasn't set. Read $fromImageName."
         fi
-        if [ -z "$fromImageTag" ]; then
-          fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
+
+        # If the tag isn't set, use the name as an index into the json
+        # and read the first key found.
+        if [[ -z "$fromImageTag" ]]; then
+          fromImageTag=$(jshon -e $fromImageName -k < image/repositories \
+                         | head -n1)
+          echo "From-image tag wasn't set. Read $fromImageTag."
         fi
-        parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
+
+        # Use the name and tag to get the parent ID field.
+        parentID=$(jshon -e $fromImageName -e $fromImageTag -u \
+                   < image/repositories)
       fi
 
+      # Unpack all of the parent layers into the image.
       lowerdir=""
-      while [ -n "$parentID" ]; do
-        echo Unpacking layer $parentID
+      while [[ -n "$parentID" ]]; do
+        echo "Unpacking layer $parentID"
         mkdir -p image/$parentID/layer
         tar -C image/$parentID/layer -xpf image/$parentID/layer.tar
         rm image/$parentID/layer.tar
 
         find image/$parentID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
 
+        # Get the next lower directory and continue the loop.
         lowerdir=$lowerdir''${lowerdir:+:}image/$parentID/layer
-        parentID=$(cat image/$parentID/json|(jshon -e parent -u 2>/dev/null || true))
+        parentID=$(cat image/$parentID/json \
+                  | (jshon -e parent -u 2>/dev/null || true))
       done
 
       mkdir work
       mkdir layer
       mkdir mnt
 
-      ${preMount}
+      ${lib.optionalString (preMount != "") ''
+        # Execute pre-mount steps
+        echo "Executing pre-mount steps..."
+        ${preMount}
+      ''}
 
       if [ -n "$lowerdir" ]; then
         mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
@@ -134,13 +186,19 @@ EOF
         mount --bind layer mnt
       fi
 
-      ${postMount}
- 
+      ${lib.optionalString (postMount != "") ''
+        # Execute post-mount steps
+        echo "Executing post-mount steps..."
+        ${postMount}
+      ''}
+
       umount mnt
 
-      pushd layer
-      find . -type c -exec bash -c 'name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"' \;
-      popd
+      (
+        cd layer
+        cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
+        find . -type c -exec bash -c "$cmd" \;
+      )
 
       ${postUmount}
       '');
@@ -150,76 +208,148 @@ EOF
       inherit name fromImage fromImageName fromImageTag diskSize;
 
       postMount = ''
-        echo Packing raw image
+        echo "Packing raw image..."
         tar -C mnt --mtime=0 -cf $out .
       '';
     };
-    
-  mkPureLayer = { baseJson, contents ? null, extraCommands ? "" }:
-    runCommand "docker-layer" {
+
+
+  # Create an executable shell script which has the coreutils in its
+  # PATH. Since root scripts are executed in a blank environment, even
+  # things like `ls` or `echo` will be missing.
+  shellScript = name: text:
+    writeScript name ''
+      #!${stdenv.shell}
+      set -e
+      export PATH=${coreutils}/bin:/bin
+      ${text}
+    '';
+
+  # Create a "layer" (set of files).
+  mkPureLayer = {
+    # Name of the layer
+    name,
+    # JSON containing configuration and metadata for this layer.
+    baseJson,
+    # Files to add to the layer.
+    contents ? null,
+    # Additional commands to run on the layer before it is tar'd up.
+    extraCommands ? ""
+  }:
+    runCommand "docker-layer-${name}" {
       inherit baseJson contents extraCommands;
 
-      buildInputs = [ jshon ];
-    } ''
+      buildInputs = [ jshon rsync ];
+    }
+    ''
       mkdir layer
-      if [ -n "$contents" ]; then
-        echo Adding contents
-        for c in $contents; do
-          cp -drf $c/* layer/
-          chmod -R ug+w layer/
+      if [[ -n "$contents" ]]; then
+        echo "Adding contents..."
+        for item in $contents; do
+          echo "Adding $item"
+          rsync -ak $item/ layer/
         done
+      else
+        echo "No contents to add to layer."
+      fi
+
+      if [[ -n $extraCommands ]]; then
+        (cd layer; eval "$extraCommands")
       fi
 
-      pushd layer
-      ${extraCommands}
-      popd
-      
-      echo Packing layer
+      # Tar up the layer and throw it into 'layer.tar'.
+      echo "Packing layer..."
       mkdir $out
       tar -C layer --mtime=0 -cf $out/layer.tar .
-      ts=$(${tarsum} < $out/layer.tar)
-      cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
+
+      # Compute a checksum of the tarball.
+      echo "Computing layer checksum..."
+      tarsum=$(${tarsum} < $out/layer.tar)
+
+      # Add a 'checksum' field to the JSON, with the value set to the
+      # checksum of the tarball.
+      cat ${baseJson} | jshon -s "$tarsum" -i checksum > $out/json
+
+      # Indicate to docker that we're using schema version 1.0.
       echo -n "1.0" > $out/VERSION
+
+      echo "Finished building layer '${name}'"
     '';
 
-  mkRootLayer = { runAsRoot, baseJson, fromImage ? null, fromImageName ? null, fromImageTag ? null
-                , diskSize ? 1024, contents ? null, extraCommands ? "" }:
-    let runAsRootScript = writeScript "run-as-root.sh" runAsRoot;
+  # Make a "root" layer; required if we need to execute commands as a
+  # privileged user on the image. The commands themselves will be
+  # performed in a virtual machine sandbox.
+  mkRootLayer = {
+    # Name of the image.
+    name,
+    # Script to run as root. Bash.
+    runAsRoot,
+    # Files to add to the layer. If null, an empty layer will be created.
+    contents ? null,
+    # JSON containing configuration and metadata for this layer.
+    baseJson,
+    # Existing image onto which to append the new layer.
+    fromImage ? null,
+    # Name of the image we're appending onto.
+    fromImageName ? null,
+    # Tag of the image we're appending onto.
+    fromImageTag ? null,
+    # How much disk to allocate for the temporary virtual machine.
+    diskSize ? 1024,
+    # Commands (bash) to run on the layer; these do not require sudo.
+    extraCommands ? ""
+  }:
+    # Generate an executable script from the `runAsRoot` text.
+    let runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
     in runWithOverlay {
-      name = "docker-layer";
-      
+      name = "docker-layer-${name}";
+
       inherit fromImage fromImageName fromImageTag diskSize;
 
-      preMount = lib.optionalString (contents != null) ''
-        echo Adding contents
-        for c in ${builtins.toString contents}; do
-          cp -drf $c/* layer/
-          chmod -R ug+w layer/
+      preMount = lib.optionalString (contents != null && contents != []) ''
+        echo "Adding contents..."
+        for item in ${toString contents}; do
+          echo "Adding $item..."
+          rsync -ak $item/ layer/
         done
       '';
 
       postMount = ''
-        mkdir -p mnt/{dev,proc,sys,nix/store}
+        mkdir -p mnt/{dev,proc,sys} mnt${storeDir}
+
+        # Mount /dev, /sys and the nix store as shared folders.
         mount --rbind /dev mnt/dev
         mount --rbind /sys mnt/sys
-        mount --rbind /nix/store mnt/nix/store
+        mount --rbind ${storeDir} mnt${storeDir}
 
+        # Execute the run as root script. See 'man unshare' for
+        # details on what's going on here; basically this command
+        # means that the runAsRootScript will be executed in a nearly
+        # completely isolated environment.
         unshare -imnpuf --mount-proc chroot mnt ${runAsRootScript}
-        umount -R mnt/dev mnt/sys mnt/nix/store
-        rmdir --ignore-fail-on-non-empty mnt/dev mnt/proc mnt/sys mnt/nix/store mnt/nix
+
+        # Unmount directories and remove them.
+        umount -R mnt/dev mnt/sys mnt${storeDir}
+        rmdir --ignore-fail-on-non-empty \
+          mnt/dev mnt/proc mnt/sys mnt${storeDir} \
+          mnt$(dirname ${storeDir})
       '';
- 
+
       postUmount = ''
-        pushd layer
-        ${extraCommands}
-        popd
+        (cd layer; eval "${extraCommands}")
 
-        echo Packing layer
+        echo "Packing layer..."
         mkdir $out
         tar -C layer --mtime=0 -cf $out/layer.tar .
+
+        # Compute the tar checksum and add it to the output json.
+        echo "Computing checksum..."
         ts=$(${tarsum} < $out/layer.tar)
         cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
+        # Indicate to docker that we're using schema version 1.0.
         echo -n "1.0" > $out/VERSION
+
+        echo "Finished building layer '${name}'"
       '';
     };
 
@@ -229,105 +359,148 @@ EOF
   # 4. compute the layer id
   # 5. put the layer in the image
   # 6. repack the image
-  buildImage = args@{ name, tag ? "latest"
-               , fromImage ? null, fromImageName ? null, fromImageTag ? null
-               , contents ? null, config ? null, runAsRoot ? null
-               , diskSize ? 1024, extraCommands ? "" }:
+  buildImage = args@{
+    # Image name.
+    name,
+    # Image tag.
+    tag ? "latest",
+    # Parent image, to append to.
+    fromImage ? null,
+    # Name of the parent image; will be read from the image otherwise.
+    fromImageName ? null,
+    # Tag of the parent image; will be read from the image otherwise.
+    fromImageTag ? null,
+    # Files to put on the image (a nix store path or list of paths).
+    contents ? null,
+    # Docker config; e.g. what command to run on the container.
+    config ? null,
+    # Optional bash script to run on the files prior to fixturizing the layer.
+    extraCommands ? "",
+    # Optional bash script to run as root on the image when provisioning.
+    runAsRoot ? null,
+    # Size of the virtual machine disk to provision when building the image.
+    diskSize ? 1024,
+  }:
 
     let
-
       baseName = baseNameOf name;
 
+      # Create a JSON blob of the configuration. Set the date to unix zero.
       baseJson = writeText "${baseName}-config.json" (builtins.toJSON {
-          created = "1970-01-01T00:00:01Z";
-          architecture = "amd64";
-          os = "linux";
-          config = config;
+        created = "1970-01-01T00:00:01Z";
+        architecture = "amd64";
+        os = "linux";
+        config = config;
       });
 
-      layer = (if runAsRoot == null
-               then mkPureLayer { inherit baseJson contents extraCommands; }
-               else mkRootLayer { inherit baseJson fromImage fromImageName fromImageTag contents runAsRoot diskSize extraCommands; });
-      result = runCommand "${baseName}.tar.gz" {
-        buildInputs = [ jshon pigz ];
-
+      layer =
+        if runAsRoot == null
+        then mkPureLayer {
+          name = baseName;
+          inherit baseJson contents extraCommands;
+        } else mkRootLayer {
+          name = baseName;
+          inherit baseJson fromImage fromImageName fromImageTag
+                  contents runAsRoot diskSize extraCommands;
+        };
+      result = runCommand "docker-image-${baseName}.tar.gz" {
+        buildInputs = [ jshon pigz coreutils findutils ];
         imageName = name;
         imageTag = tag;
         inherit fromImage baseJson;
-
         layerClosure = writeReferencesToFile layer;
-
-        passthru = {
-          buildArgs = args;
-        };
+        passthru.buildArgs = args;
+        passthru.layer = layer;
       } ''
+        # Print tar contents:
+        # 1: Interpreted as relative to the root directory
+        # 2: With no trailing slashes on directories
+        # This is useful for ensuring that the output matches the
+        # values generated by the "find" command
+        ls_tar() {
+          for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
+            if [[ "$f" != "." ]]; then
+              echo "/$f"
+            fi
+          done
+        }
+
         mkdir image
         touch baseFiles
-        if [ -n "$fromImage" ]; then
-          echo Unpacking base image
+        if [[ -n "$fromImage" ]]; then
+          echo "Unpacking base image..."
           tar -C image -xpf "$fromImage"
-          
-          if [ -z "$fromImageName" ]; then
+
+          if [[ -z "$fromImageName" ]]; then
             fromImageName=$(jshon -k < image/repositories|head -n1)
           fi
-          if [ -z "$fromImageTag" ]; then
-            fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
+          if [[ -z "$fromImageTag" ]]; then
+            fromImageTag=$(jshon -e $fromImageName -k \
+                           < image/repositories|head -n1)
           fi
-          parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
-          
+          parentID=$(jshon -e $fromImageName -e $fromImageTag -u \
+                     < image/repositories)
+
           for l in image/*/layer.tar; do
-            tar -tf $l >> baseFiles
+            ls_tar $l >> baseFiles
           done
         fi
 
         chmod -R ug+rw image
-        
+
         mkdir temp
         cp ${layer}/* temp/
         chmod ug+w temp/*
 
+        echo "$(dirname ${storeDir})" >> layerFiles
+        echo '${storeDir}' >> layerFiles
         for dep in $(cat $layerClosure); do
-          find $dep -path "${layer}" -prune -o -print >> layerFiles
+          find $dep >> layerFiles
         done
 
-        if [ -s layerFiles ]; then
-          # FIXME: might not be /nix/store
-          echo '/nix' >> layerFiles
-          echo '/nix/store' >> layerFiles
-        fi
+        echo "Adding layer..."
+        # Record the contents of the tarball with ls_tar.
+        ls_tar temp/layer.tar >> baseFiles
+
+        # Get the files in the new layer which were *not* present in
+        # the old layer, and record them as newFiles.
+        comm <(sort -n baseFiles|uniq) \
+             <(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
+        # Append the new files to the layer.
+        tar -rpf temp/layer.tar --mtime=0 --no-recursion --files-from newFiles
 
-        echo Adding layer
-        tar -tf temp/layer.tar >> baseFiles
-        sed 's/^\.//' -i baseFiles
-        comm <(sort -u baseFiles) <(sort -u layerFiles) -1 -3 > newFiles
-        tar -rpf temp/layer.tar --mtime=0 --no-recursion --files-from newFiles 2>/dev/null || true
+        echo "Adding meta..."
 
-        echo Adding meta
-        
-        if [ -n "$parentID" ]; then
+        # If we have a parentID, add it to the json metadata.
+        if [[ -n "$parentID" ]]; then
           cat temp/json | jshon -s "$parentID" -i parent > tmpjson
           mv tmpjson temp/json
         fi
-        
+
+        # Take the sha256 sum of the generated json and use it as the layer ID.
+        # Compute the size and add it to the json under the 'Size' field.
         layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
         size=$(stat --printf="%s" temp/layer.tar)
         cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
         mv tmpjson temp/json
 
+        # Use the temp folder we've been working on to create a new image.
         mv temp image/$layerID
-        
+
+        # Store the json under the name image/repositories.
         jshon -n object \
           -n object -s "$layerID" -i "$imageTag" \
           -i "$imageName" > image/repositories
 
+        # Make the image read-only.
         chmod -R a-w image
 
-        echo Cooking the image
+        echo "Cooking the image..."
         tar -C image --mtime=0 -c . | pigz -nT > $out
+
+        echo "Finished."
       '';
 
     in
-
-      result;
-
+    result;
 }
diff --git a/pkgs/build-support/docker/examples.nix b/pkgs/build-support/docker/examples.nix
new file mode 100644
index 00000000000..05b4a9b4f2d
--- /dev/null
+++ b/pkgs/build-support/docker/examples.nix
@@ -0,0 +1,108 @@
+# Examples of using the docker tools to build packages.
+#
+# This file defines several docker images. In order to use an image,
+# build its derivation with `nix-build`, and then load the result with
+# `docker load`. For example:
+#
+#  $ nix-build '<nixpkgs>' -A dockerTools.examples.redis
+#  $ docker load < result
+
+{ pkgs, buildImage, pullImage, shadowSetup }:
+
+rec {
+  # 1. basic example
+  bash = buildImage {
+    name = "bash";
+    contents = pkgs.bashInteractive;
+  };
+
+  # 2. service example, layered on another image
+  redis = buildImage {
+    name = "redis";
+    tag = "latest";
+
+    # for example's sake, we can layer redis on top of bash or debian
+    fromImage = bash;
+    # fromImage = debian;
+
+    contents = pkgs.redis;
+    runAsRoot = ''
+      mkdir -p /data
+    '';
+
+    config = {
+      Cmd = [ "/bin/redis-server" ];
+      WorkingDir = "/data";
+      Volumes = {
+        "/data" = {};
+      };
+    };
+  };
+
+  # 3. another service example
+  nginx = let
+    nginxPort = "80";
+    nginxConf = pkgs.writeText "nginx.conf" ''
+      user nginx nginx;
+      daemon off;
+      error_log /dev/stdout info;
+      pid /dev/null;
+      events {}
+      http {
+        access_log /dev/stdout;
+        server {
+          listen ${nginxPort};
+          index index.html;
+          location / {
+            root ${nginxWebRoot};
+          }
+        }
+      }
+    '';
+    nginxWebRoot = pkgs.writeTextDir "index.html" ''
+      <html><body><h1>Hello from NGINX</h1></body></html>
+    '';
+  in
+  buildImage {
+    name = "nginx-container";
+    contents = pkgs.nginx;
+
+    runAsRoot = ''
+      #!${pkgs.stdenv.shell}
+      ${shadowSetup}
+      groupadd --system nginx
+      useradd --system --gid nginx nginx
+    '';
+
+    config = {
+      Cmd = [ "nginx" "-c" nginxConf ];
+      ExposedPorts = {
+        "${nginxPort}/tcp" = {};
+      };
+    };
+  };
+
+  # 4. example of pulling an image. could be used as a base for other images
+  #
+  # ***** Currently broken, getting 404s. Perhaps the docker API has changed?
+  #
+  #
+  # debian = pullImage {
+  #   imageName = "debian";
+  #   imageTag = "jessie";
+  #   # this hash will need change if the tag is updated at docker hub
+  #   sha256 = "18kd495lc2k35h03bpcbdjnix17nlqbwf6nmq3sb161blf0dk14q";
+  # };
+
+  # 5. example of multiple contents, emacs and vi happily coexisting
+  editors = buildImage {
+    name = "editors";
+    contents = [
+      pkgs.coreutils
+      pkgs.bash
+      pkgs.emacs
+      pkgs.vim
+      pkgs.nano
+    ];
+  };
+}
diff --git a/pkgs/build-support/docker/pull.nix b/pkgs/build-support/docker/pull.nix
index a5e7acaf159..0e1b147f6e1 100644
--- a/pkgs/build-support/docker/pull.nix
+++ b/pkgs/build-support/docker/pull.nix
@@ -26,17 +26,11 @@ let layer = stdenv.mkDerivation {
   outputHash = sha256;
   outputHashMode = "recursive";
 
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-
+  impureEnvVars = lib.fetchers.proxyImpureEnvVars ++ [
     # This variable allows the user to pass additional options to curl
     "NIX_CURL_FLAGS"
   ];
-  
+
   # Doing the download on a remote machine just duplicates network
   # traffic, so don't do that.
   preferLocalBuild = true;
diff --git a/pkgs/build-support/dotnetbuildhelpers/patch-fsharp-targets.sh b/pkgs/build-support/dotnetbuildhelpers/patch-fsharp-targets.sh
index 3f81cc73e80..3f81cc73e80 100644..100755
--- a/pkgs/build-support/dotnetbuildhelpers/patch-fsharp-targets.sh
+++ b/pkgs/build-support/dotnetbuildhelpers/patch-fsharp-targets.sh
diff --git a/pkgs/build-support/emacs/buffer.nix b/pkgs/build-support/emacs/buffer.nix
new file mode 100644
index 00000000000..e366fd1f739
--- /dev/null
+++ b/pkgs/build-support/emacs/buffer.nix
@@ -0,0 +1,47 @@
+# Functions to build elisp files to locally configure emcas buffers.
+# See https://github.com/shlevy/nix-buffer
+
+{ lib, writeText, inherit-local }:
+
+{
+  withPackages = pkgs: let
+      extras = map (x: x.emacsBufferSetup pkgs) (builtins.filter (builtins.hasAttr "emacsBufferSetup") pkgs);
+    in writeText "dir-locals.el" ''
+      (require 'inherit-local "${inherit-local}/share/emacs/site-lisp/elpa/inherit-local-${inherit-local.version}/inherit-local.elc")
+
+      ; Only set up nixpkgs buffer handling when we have some buffers active
+      (defvar nixpkgs--buffer-count 0)
+      (when (eq nixpkgs--buffer-count 0)
+        ; When generating a new temporary buffer (one whose name starts with a space), do inherit-local inheritance and make it a nixpkgs buffer
+        (defun nixpkgs--around-generate (orig name)
+          (if (eq (aref name 0) ?\s)
+              (let ((buf (funcall orig name)))
+                (when (inherit-local-inherit-child buf)
+                  (with-current-buffer buf
+                    (make-local-variable 'kill-buffer-hook)
+                    (setq nixpkgs--buffer-count (1+ nixpkgs--buffer-count))
+                    (add-hook 'kill-buffer-hook 'nixpkgs--decrement-buffer-count)))
+                buf)
+            (funcall orig name)))
+        (advice-add 'generate-new-buffer :around #'nixpkgs--around-generate)
+        ; When we have no more nixpkgs buffers, tear down the buffer handling
+        (defun nixpkgs--decrement-buffer-count ()
+          (setq nixpkgs--buffer-count (1- nixpkgs--buffer-count))
+          (when (eq nixpkgs--buffer-count 0)
+            (advice-remove 'generate-new-buffer #'nixpkgs--around-generate)
+            (fmakunbound 'nixpkgs--around-generate)
+            (fmakunbound 'nixpkgs--decrement-buffer-count))))
+      (setq nixpkgs--buffer-count (1+ nixpkgs--buffer-count))
+      (make-local-variable 'kill-buffer-hook)
+      (add-hook 'kill-buffer-hook 'nixpkgs--decrement-buffer-count)
+
+      ; Add packages to PATH and exec-path
+      (make-local-variable 'process-environment)
+      (put 'process-environment 'permanent-local t)
+      (inherit-local 'process-environment)
+      (setenv "PATH" (concat "${lib.makeSearchPath "bin" pkgs}:" (getenv "PATH")))
+      (inherit-local-permanent exec-path (append '(${builtins.concatStringsSep " " (map (p: "\"${p}/bin\"") pkgs)}) exec-path))
+
+      ${lib.concatStringsSep "\n" extras}
+    '';
+}
diff --git a/pkgs/build-support/emacs/wrapper.nix b/pkgs/build-support/emacs/wrapper.nix
index 45931e6914a..b13def07bb8 100644
--- a/pkgs/build-support/emacs/wrapper.nix
+++ b/pkgs/build-support/emacs/wrapper.nix
@@ -85,13 +85,14 @@ stdenv.mkDerivation {
      done
 
      siteStart="$out/share/emacs/site-lisp/site-start.el"
+     siteStartByteCompiled="$siteStart"c
 
      # A dependency may have brought the original siteStart, delete it and
      # create our own
      # Begin the new site-start.el by loading the original, which sets some
      # NixOS-specific paths. Paths are searched in the reverse of the order
      # they are specified in, so user and system profile paths are searched last.
-     rm -f $siteStart
+     rm -f $siteStart $siteStartByteCompiled
      cat >"$siteStart" <<EOF
 (load-file "$emacs/share/emacs/site-lisp/site-start.el")
 (add-to-list 'load-path "$out/share/emacs/site-lisp")
diff --git a/pkgs/build-support/fetchadc/default.nix b/pkgs/build-support/fetchadc/default.nix
index ac7a442de31..4d759e6f7f1 100644
--- a/pkgs/build-support/fetchadc/default.nix
+++ b/pkgs/build-support/fetchadc/default.nix
@@ -1,15 +1,5 @@
 { stdenv, curl, adc_user, adc_pass }:
 
-let
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-  ];
-in
-
 { # Path to fetch.
   path
 
diff --git a/pkgs/build-support/fetchegg/default.nix b/pkgs/build-support/fetchegg/default.nix
index 223d2098c77..3e0d5d566ad 100644
--- a/pkgs/build-support/fetchegg/default.nix
+++ b/pkgs/build-support/fetchegg/default.nix
@@ -17,12 +17,6 @@ stdenv.mkDerivation {
 
   eggName = name;
 
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-  ];
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars;
 }
 
diff --git a/pkgs/build-support/fetchfile/builder.sh b/pkgs/build-support/fetchfile/builder.sh
deleted file mode 100644
index b849491fc5a..00000000000
--- a/pkgs/build-support/fetchfile/builder.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-source $stdenv/setup
-
-echo "copying $pathname into $out..."
-
-cp "$pathname" "$out" || exit 1
-
-actual=$(md5sum -b $out | cut -c1-32)
-if test "$actual" != "$md5"; then
-    echo "hash is $actual, expected $md5"
-    exit 1
-fi
diff --git a/pkgs/build-support/fetchfile/default.nix b/pkgs/build-support/fetchfile/default.nix
deleted file mode 100644
index bdddfab2b4d..00000000000
--- a/pkgs/build-support/fetchfile/default.nix
+++ /dev/null
@@ -1,7 +0,0 @@
-{stdenv}: {pathname, md5}: stdenv.mkDerivation {
-  name = baseNameOf (toString pathname);
-  builder = ./builder.sh;
-  pathname = pathname;
-  md5 = md5;
-  id = md5;
-}
diff --git a/pkgs/build-support/fetchgit/default.nix b/pkgs/build-support/fetchgit/default.nix
index 7f98c97fc55..e40b460d390 100644
--- a/pkgs/build-support/fetchgit/default.nix
+++ b/pkgs/build-support/fetchgit/default.nix
@@ -26,7 +26,7 @@ in
    Cloning branches will make the hash check fail when there is an update.
    But not all patches we want can be accessed by tags.
 
-   The workaround is getting the last n commits so that it's likly that they
+   The workaround is getting the last n commits so that it's likely that they
    still contain the hash we want.
 
    for now : increase depth iteratively (TODO)
@@ -56,13 +56,9 @@ stdenv.mkDerivation {
 
   GIT_SSL_CAINFO = "${cacert}/etc/ssl/certs/ca-bundle.crt";
 
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy" "GIT_PROXY_COMMAND" "SOCKS_SERVER"
-    ];
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars ++ [
+    "GIT_PROXY_COMMAND" "SOCKS_SERVER"
+  ];
 
   preferLocalBuild = true;
 }
diff --git a/pkgs/build-support/fetchgit/nix-prefetch-git b/pkgs/build-support/fetchgit/nix-prefetch-git
index 705d84c648b..3d656eba5ff 100755
--- a/pkgs/build-support/fetchgit/nix-prefetch-git
+++ b/pkgs/build-support/fetchgit/nix-prefetch-git
@@ -291,8 +291,8 @@ _clone_user_rev() {
     pushd "$dir" >/dev/null
     fullRev=$( (git rev-parse "$rev" 2>/dev/null || git rev-parse "refs/heads/$branchName") | tail -n1)
     humanReadableRev=$(git describe "$fullRev" 2> /dev/null || git describe --tags "$fullRev" 2> /dev/null || echo -- none --)
-    commitDate=$(git show --no-patch --pretty=%ci "$fullRev")
-    commitDateStrict8601=$(git show --no-patch --pretty=%cI "$fullRev")
+    commitDate=$(git show -1 --no-patch --pretty=%ci "$fullRev")
+    commitDateStrict8601=$(git show -1 --no-patch --pretty=%cI "$fullRev")
     popd >/dev/null
 
     # Allow doing additional processing before .git removal
@@ -322,6 +322,18 @@ clone_user_rev() {
     fi
 }
 
+json_escape() {
+    local s="$1"
+    s="${s//\\/\\\\}" # \
+    s="${s//\"/\\\"}" # "
+    s="${s//^H/\\\b}" # \b (backspace)
+    s="${s//^L/\\\f}" # \f (form feed)
+    s="${s//
+/\\\n}" # \n (newline)
+    s="${s//^M/\\\r}" # \r (carriage return)
+    s="${s//   /\\t}" # \t (tab)
+    echo "$s"
+}
 
 print_results() {
     hash="$1"
@@ -338,17 +350,15 @@ print_results() {
         fi
     fi
     if test -n "$hash"; then
-        echo "{"
-        echo "  \"url\": \"$url\","
-        echo "  \"rev\": \"$fullRev\","
-        echo "  \"date\": \"$commitDateStrict8601\","
-        echo -n "  \"$hashType\": \"$hash\""
-        if test -n "$fetchSubmodules"; then
-            echo ","
-            echo -n "  \"fetchSubmodules\": true"
-        fi
-        echo ""
-        echo "}"
+        cat <<EOF
+{
+  "url": "$(json_escape "$url")",
+  "rev": "$(json_escape "$fullRev")",
+  "date": "$(json_escape "$commitDateStrict8601")",
+  "$(json_escape "$hashType")": "$(json_escape "$hash")",
+  "fetchSubmodules": $([[ -n "fetchSubmodules" ]] && echo true || echo false)
+}
+EOF
     fi
 }
 
@@ -396,8 +406,7 @@ else
         finalPath=$(nix-store --add-fixed --recursive "$hashType" "$tmpFile")
 
         if test -n "$expHash" -a "$expHash" != "$hash"; then
-            print_metadata
-            echo "hash mismatch for URL \`$url'" >&2
+            echo "hash mismatch for URL \`$url'. Got \`$hash'; expected \`$expHash'." >&2
             exit 1
         fi
     fi
diff --git a/pkgs/build-support/fetchgx/default.nix b/pkgs/build-support/fetchgx/default.nix
new file mode 100644
index 00000000000..c72bbec6632
--- /dev/null
+++ b/pkgs/build-support/fetchgx/default.nix
@@ -0,0 +1,30 @@
+{ stdenv, gx, gx-go, go, cacert }:
+
+{ name, src, sha256 }:
+
+stdenv.mkDerivation {
+  name = "${name}-gxdeps";
+  inherit src;
+
+  buildInputs = [ go gx gx-go ];
+
+  outputHashAlgo = "sha256";
+  outputHashMode = "recursive";
+  outputHash = sha256;
+
+  phases = [ "unpackPhase" "buildPhase" "installPhase" ];
+
+  SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt";
+
+  buildPhase = ''
+    export GOPATH=$(pwd)/vendor
+    mkdir vendor
+    gx install
+  '';
+
+  installPhase = ''
+    mv vendor $out
+  '';
+
+  preferLocalBuild = true;
+}
diff --git a/pkgs/build-support/fetchhg/default.nix b/pkgs/build-support/fetchhg/default.nix
index 214e9b1387b..79f610166a7 100644
--- a/pkgs/build-support/fetchhg/default.nix
+++ b/pkgs/build-support/fetchhg/default.nix
@@ -6,9 +6,7 @@ stdenv.mkDerivation {
   builder = ./builder.sh;
   buildInputs = [mercurial];
 
-  impureEnvVars = [
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-  ];
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars;
 
   # Nix <= 0.7 compatibility.
   id = md5;
diff --git a/pkgs/build-support/fetchmtn/default.nix b/pkgs/build-support/fetchmtn/default.nix
index daead9ad6d2..1dc14e8cab1 100644
--- a/pkgs/build-support/fetchmtn/default.nix
+++ b/pkgs/build-support/fetchmtn/default.nix
@@ -19,12 +19,7 @@ stdenv.mkDerivation {
   dbs = defaultDBMirrors ++ dbs;
   inherit branch cacheDB name selector;
 
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-    ];
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars;
+
 }
 
diff --git a/pkgs/build-support/fetchsvn/default.nix b/pkgs/build-support/fetchsvn/default.nix
index 90dc13439a0..85ec52c4bde 100644
--- a/pkgs/build-support/fetchsvn/default.nix
+++ b/pkgs/build-support/fetchsvn/default.nix
@@ -33,13 +33,6 @@ stdenv.mkDerivation {
   
   inherit url rev sshSupport openssh ignoreExternals;
 
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-    ];
-
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars;
   preferLocalBuild = true;
 }
diff --git a/pkgs/build-support/fetchurl/boot.nix b/pkgs/build-support/fetchurl/boot.nix
index c007281e87b..722fd2566ef 100644
--- a/pkgs/build-support/fetchurl/boot.nix
+++ b/pkgs/build-support/fetchurl/boot.nix
@@ -16,5 +16,4 @@ import <nix/fetchurl.nix> {
     let m = builtins.match "mirror://([a-z]+)/(.*)" url; in
     if m == null then url
     else builtins.head (mirrors.${builtins.elemAt m 0}) + (builtins.elemAt m 1);
-
 }
diff --git a/pkgs/build-support/fetchurl/default.nix b/pkgs/build-support/fetchurl/default.nix
index a7c76737e1a..00f485ce697 100644
--- a/pkgs/build-support/fetchurl/default.nix
+++ b/pkgs/build-support/fetchurl/default.nix
@@ -20,13 +20,7 @@ let
   # "gnu", etc.).
   sites = builtins.attrNames mirrors;
 
-  impureEnvVars = [
-    # We borrow these environment variables from the caller to allow
-    # easy proxy configuration.  This is impure, but a fixed-output
-    # derivation like fetchurl is allowed to do so since its result is
-    # by definition pure.
-    "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
-
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars ++ [
     # This variable allows the user to pass additional options to curl
     "NIX_CURL_FLAGS"
 
diff --git a/pkgs/build-support/gcc-cross-wrapper/builder.sh b/pkgs/build-support/gcc-cross-wrapper/builder.sh
index 1bdda969653..b729144b860 100644
--- a/pkgs/build-support/gcc-cross-wrapper/builder.sh
+++ b/pkgs/build-support/gcc-cross-wrapper/builder.sh
@@ -8,7 +8,7 @@ mkdir $out/nix-support
 cflagsCompile="-B$out/bin/"
 
 if test -z "$nativeLibc" -a -n "$libc"; then
-    cflagsCompile="$cflagsCompile -B$gccLibs/lib -B$libc/lib/ -isystem $libc/include"
+    cflagsCompile="$cflagsCompile -B$gccLibs/lib -B$libc/lib/ -isystem $libc_dev/include"
     ldflags="$ldflags -L$libc/lib"
     # Get the proper dynamic linker for glibc and uclibc. 
     dlinker=`eval 'echo $libc/lib/ld*.so.?'`
diff --git a/pkgs/build-support/gcc-wrapper-old/ld-solaris-wrapper.sh b/pkgs/build-support/gcc-wrapper-old/ld-solaris-wrapper.sh
index 5a7b92b5ad7..263ea5408e9 100644..100755
--- a/pkgs/build-support/gcc-wrapper-old/ld-solaris-wrapper.sh
+++ b/pkgs/build-support/gcc-wrapper-old/ld-solaris-wrapper.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!@shell@
 
 set -e
 set -u
diff --git a/pkgs/build-support/grsecurity/default.nix b/pkgs/build-support/grsecurity/default.nix
index 19aa57ccd99..ccd46e20654 100644
--- a/pkgs/build-support/grsecurity/default.nix
+++ b/pkgs/build-support/grsecurity/default.nix
@@ -21,8 +21,12 @@ assert (kernel.version == grsecPatch.kver);
 
 overrideDerivation (kernel.override {
   inherit modDirVersion;
-  kernelPatches = [ grsecPatch ] ++ kernelPatches ++ (kernel.kernelPatches or []);
-  inherit extraConfig;
+  kernelPatches = lib.unique ([ grsecPatch ] ++ kernelPatches ++ (kernel.kernelPatches or []));
+  extraConfig = ''
+    GRKERNSEC y
+    PAX y
+    ${extraConfig}
+  '';
   ignoreConfigErrors = true;
 }) (attrs: {
   nativeBuildInputs = (lib.chooseDevOutputs [ gmp libmpc mpfr ]) ++ (attrs.nativeBuildInputs or []);
diff --git a/pkgs/build-support/kernel/cpio-clean.pl b/pkgs/build-support/kernel/cpio-clean.pl
deleted file mode 100644
index ddc6435a5a8..00000000000
--- a/pkgs/build-support/kernel/cpio-clean.pl
+++ /dev/null
@@ -1,17 +0,0 @@
-use strict;
-
-# Make inode number, link info and mtime consistent in order to get a consistent hash.
-#
-# Author: Alexander Kjeldaas <ak@formalprivacy.com>
-
-use Archive::Cpio;
-
-my $cpio = Archive::Cpio->new;
-my $IN = \*STDIN;
-my $ino = 1;
-$cpio->read_with_handler($IN, sub {
-        my ($e) = @_;
-        $e->{mtime} = 1;
-	$cpio->write_one(\*STDOUT, $e);
-    });
-$cpio->write_trailer(\*STDOUT);
diff --git a/pkgs/build-support/kernel/make-initrd.nix b/pkgs/build-support/kernel/make-initrd.nix
index 895160616b7..092ab4586b3 100644
--- a/pkgs/build-support/kernel/make-initrd.nix
+++ b/pkgs/build-support/kernel/make-initrd.nix
@@ -12,10 +12,10 @@
 # `contents = {object = ...; symlink = /init;}' is a typical
 # argument.
 
-{ stdenv, perl, perlArchiveCpio, cpio, contents, ubootChooser, compressor, prepend }:
+{ stdenv, perl, cpio, contents, ubootChooser, compressor, prepend }:
 
 let
-  inputsFun = ubootName : [perl cpio perlArchiveCpio ]
+  inputsFun = ubootName : [ perl cpio ]
     ++ stdenv.lib.optional (ubootName != null) [ (ubootChooser ubootName) ];
   makeUInitrdFun = ubootName : (ubootName != null);
 in
@@ -30,12 +30,11 @@ stdenv.mkDerivation {
   objects = map (x: x.object) contents;
   symlinks = map (x: x.symlink) contents;
   suffices = map (x: if x ? suffix then x.suffix else "none") contents;
-  
+
   # For obtaining the closure of `contents'.
   exportReferencesGraph =
     map (x: [("closure-" + baseNameOf x.symlink) x.object]) contents;
   pathsFromGraph = ./paths-from-graph.pl;
-  cpioClean = ./cpio-clean.pl;
 
   crossAttrs = {
     nativeBuildInputs = inputsFun stdenv.cross.platform.uboot;
diff --git a/pkgs/build-support/kernel/make-initrd.sh b/pkgs/build-support/kernel/make-initrd.sh
index 89021caa583..0aeaedeb372 100644
--- a/pkgs/build-support/kernel/make-initrd.sh
+++ b/pkgs/build-support/kernel/make-initrd.sh
@@ -39,7 +39,8 @@ mkdir -p $out
 for PREP in $prepend; do
   cat $PREP >> $out/initrd
 done
-(cd root && find * -print0 | cpio -o -H newc -R 0:0 --null | perl $cpioClean | $compressor >> $out/initrd)
+(cd root && find * -print0 | xargs -0r touch -h -d '@1')
+(cd root && find * -print0 | sort -z | cpio -o -H newc -R +0:+0 --reproducible --null | $compressor >> $out/initrd)
 
 if [ -n "$makeUInitrd" ]; then
     mv $out/initrd $out/initrd.gz
diff --git a/pkgs/build-support/kernel/modules-closure.nix b/pkgs/build-support/kernel/modules-closure.nix
index 6ae844a6246..9940e611124 100644
--- a/pkgs/build-support/kernel/modules-closure.nix
+++ b/pkgs/build-support/kernel/modules-closure.nix
@@ -3,10 +3,10 @@
 # the modules identified by `rootModules', plus their dependencies.
 # Also generate an appropriate modules.dep.
 
-{ stdenv, kernel, nukeReferences, rootModules
+{ stdenvNoCC, kernel, nukeReferences, rootModules
 , kmod, allowMissing ? false }:
 
-stdenv.mkDerivation {
+stdenvNoCC.mkDerivation {
   name = kernel.name + "-shrunk";
   builder = ./modules-closure.sh;
   buildInputs = [ nukeReferences kmod ];
diff --git a/pkgs/build-support/ocaml/default.nix b/pkgs/build-support/ocaml/default.nix
index 50f7627568d..cc2001c66e2 100644
--- a/pkgs/build-support/ocaml/default.nix
+++ b/pkgs/build-support/ocaml/default.nix
@@ -1,4 +1,4 @@
-{ stdenv, writeText, ocaml, findlib, camlp4 }:
+{ stdenv, writeText, ocaml, findlib, ocamlbuild, camlp4 }:
 
 { name, version, buildInputs ? [],
   createFindlibDestdir ?  true,
@@ -9,26 +9,24 @@
   meta ? {}, ...
 }@args:
 let
-  ocaml_version = (builtins.parseDrvName ocaml.name).version;
   defaultMeta = {
     platforms = ocaml.meta.platforms or [];
   };
 in
   assert minimumSupportedOcamlVersion != null ->
-          stdenv.lib.versionOlder minimumSupportedOcamlVersion ocaml_version;
+          stdenv.lib.versionOlder minimumSupportedOcamlVersion ocaml.version;
 
 stdenv.mkDerivation (args // {
   name = "ocaml-${name}-${version}";
 
-  buildInputs = [ ocaml findlib camlp4 ] ++ buildInputs;
+  buildInputs = [ ocaml findlib ocamlbuild camlp4 ] ++ buildInputs;
 
   setupHook = if setupHook == null && hasSharedObjects
   then writeText "setupHook.sh" ''
-    export CAML_LD_LIBRARY_PATH="''${CAML_LD_LIBRARY_PATH}''${CAML_LD_LIBRARY_PATH:+:}''$1/lib/ocaml/${ocaml_version}/site-lib/${name}/"
+    export CAML_LD_LIBRARY_PATH="''${CAML_LD_LIBRARY_PATH}''${CAML_LD_LIBRARY_PATH:+:}''$1/lib/ocaml/${ocaml.version}/site-lib/${name}/"
     ''
   else setupHook;
 
-  inherit ocaml_version;
   inherit createFindlibDestdir;
   inherit dontStrip;
 
diff --git a/pkgs/build-support/replace-dependency.nix b/pkgs/build-support/replace-dependency.nix
index b0174ca24ab..15ab50bf397 100644
--- a/pkgs/build-support/replace-dependency.nix
+++ b/pkgs/build-support/replace-dependency.nix
@@ -22,7 +22,7 @@
 with lib;
 
 let
-  warn = if verbose then builtins.trace else (x:y:y);
+  warn = if verbose then builtins.trace else (x: y: y);
   references = import (runCommand "references.nix" { exportReferencesGraph = [ "graph" drv ]; } ''
     (echo {
     while read path
diff --git a/pkgs/build-support/rust/default.nix b/pkgs/build-support/rust/default.nix
index bbea045f637..a69ef5c6b07 100644
--- a/pkgs/build-support/rust/default.nix
+++ b/pkgs/build-support/rust/default.nix
@@ -6,6 +6,7 @@
 , logLevel ? ""
 , buildInputs ? []
 , cargoUpdateHook ? ""
+, cargoDepsHook ? ""
 , ... } @ args:
 
 let
@@ -28,6 +29,8 @@ in stdenv.mkDerivation (args // {
   configurePhase = args.configurePhase or "true";
 
   postUnpack = ''
+    eval "$cargoDepsHook"
+
     echo "Using cargo deps from $cargoDeps"
 
     cp -r "$cargoDeps" deps
diff --git a/pkgs/build-support/rust/fetchcargo.nix b/pkgs/build-support/rust/fetchcargo.nix
index 1b4983e3259..0c9625e5140 100644
--- a/pkgs/build-support/rust/fetchcargo.nix
+++ b/pkgs/build-support/rust/fetchcargo.nix
@@ -20,6 +20,6 @@ stdenv.mkDerivation {
   outputHashMode = "recursive";
   outputHash = sha256;
 
-  impureEnvVars = [ "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy" ];
+  impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars;
   preferLocalBuild = true;
 }
diff --git a/pkgs/build-support/setup-hooks/multiple-outputs.sh b/pkgs/build-support/setup-hooks/multiple-outputs.sh
index 2a0ebd52355..eafc770a8e1 100644
--- a/pkgs/build-support/setup-hooks/multiple-outputs.sh
+++ b/pkgs/build-support/setup-hooks/multiple-outputs.sh
@@ -16,6 +16,7 @@ _assignFirst() {
     echo "Error: _assignFirst found no valid variant!"
     return 1 # none found
 }
+
 # Same as _assignFirst, but only if "$1" = ""
 _overrideFirst() {
     if [ -z "${!1}" ]; then
@@ -37,9 +38,10 @@ _overrideFirst outputInclude "$outputDev"
 _overrideFirst outputLib "lib" "out"
 
 _overrideFirst outputDoc "doc" "out"
-_overrideFirst outputDocdev "docdev" REMOVE # documentation for developers
+_overrideFirst outputDevdoc "devdoc" REMOVE # documentation for developers
 # man and info pages are small and often useful to distribute with binaries
 _overrideFirst outputMan "man" "doc" "$outputBin"
+_overrideFirst outputDevman "devman" "devdoc" "$outputMan"
 _overrideFirst outputInfo "info" "doc" "$outputMan"
 
 
@@ -96,7 +98,8 @@ moveToOutput() {
         if [ "${!output}" = "$dstOut" ]; then continue; fi
         local srcPath
         for srcPath in "${!output}"/$patt; do
-            if [ ! -e "$srcPath" ]; then continue; fi
+            # apply to existing files/dirs, *including* broken symlinks
+            if [ ! -e "$srcPath" ] && [ ! -L "$srcPath" ]; then continue; fi
 
             if [ "$dstOut" = REMOVE ]; then
                 echo "Removing $srcPath"
@@ -136,11 +139,11 @@ _multioutDocs() {
 
     moveToOutput share/info "${!outputInfo}"
     moveToOutput share/doc "${!outputDoc}"
-    moveToOutput share/gtk-doc "${!outputDocdev}"
+    moveToOutput share/gtk-doc "${!outputDevdoc}"
 
     # the default outputMan is in $bin
     moveToOutput share/man "${!outputMan}"
-    moveToOutput share/man/man3 "${!outputDocdev}"
+    moveToOutput share/man/man3 "${!outputDevman}"
 }
 
 # Move development-only stuff to the desired outputs.
diff --git a/pkgs/build-support/setup-hooks/win-dll-link.sh b/pkgs/build-support/setup-hooks/win-dll-link.sh
index be63f69ca10..634a9d18f00 100644
--- a/pkgs/build-support/setup-hooks/win-dll-link.sh
+++ b/pkgs/build-support/setup-hooks/win-dll-link.sh
@@ -33,7 +33,7 @@ _linkDLLs() {
         # That DLL might have its own (transitive) dependencies,
         # so add also all DLLs from its directory to be sure.
         local dllPath2
-        for dllPath2 in "$dllPath" "$(dirname "$dllPath")"/*.dll; do
+        for dllPath2 in "$dllPath" "$(dirname $(readlink "$dllPath" || echo "$dllPath"))"/*.dll; do
             if [ -e ./"$(basename "$dllPath2")" ]; then continue; fi
             ln -sr "$dllPath2" .
             linkCount=$(($linkCount+1))
diff --git a/pkgs/build-support/singularity-tools/default.nix b/pkgs/build-support/singularity-tools/default.nix
new file mode 100644
index 00000000000..3c27b9fc1ad
--- /dev/null
+++ b/pkgs/build-support/singularity-tools/default.nix
@@ -0,0 +1,100 @@
+{ runCommand
+, stdenv
+, storeDir ? builtins.storeDir
+, writeScript
+, singularity
+, writeReferencesToFile
+, bash
+, vmTools
+, gawk
+, utillinux
+, e2fsprogs
+, squashfsTools }:
+
+rec {
+  shellScript = name: text:
+    writeScript name ''
+      #!${stdenv.shell}
+      set -e
+      ${text}
+    '';
+
+  mkLayer = {
+    name,
+    contents ? [],
+  }:
+    runCommand "singularity-layer-${name}" {
+      inherit contents;
+    } ''
+      mkdir $out
+      for f in $contents ; do
+        cp -ra $f $out/
+      done
+    '';
+
+  buildImage = {
+    name,
+    contents ? [],
+    diskSize ? 1024,
+    runScript ? "#!${stdenv.shell}\nexec /bin/sh",
+    runAsRoot ? null,
+    extraSpace ? 0
+  }:
+    let layer = mkLayer {
+          inherit name;
+          contents = contents ++ [ bash runScriptFile ];
+          };
+        runAsRootFile = shellScript "run-as-root.sh" runAsRoot;
+        runScriptFile = shellScript "run-script.sh" runScript;
+        result = vmTools.runInLinuxVM (
+          runCommand "singularity-image-${name}.img" {
+            buildInputs = [ singularity e2fsprogs utillinux gawk ];
+            layerClosure = writeReferencesToFile layer;
+            preVM = vmTools.createEmptyImage {
+              size = diskSize;
+              fullName = "singularity-run-disk";
+            };
+          }
+          ''
+            rm -rf $out
+            mkdir disk
+            mkfs -t ext3 -b 4096 /dev/${vmTools.hd}
+            mount /dev/${vmTools.hd} disk
+            cd disk
+
+            # Run root script
+            ${stdenv.lib.optionalString (runAsRoot != null) ''
+              mkdir -p ./${storeDir}
+              mount --rbind ${storeDir} ./${storeDir}
+              unshare -imnpuf --mount-proc chroot ./ ${runAsRootFile}
+              umount -R ./${storeDir}
+            ''}
+
+            # Build /bin and copy across closure
+            mkdir -p bin nix/store
+            for f in $(cat $layerClosure) ; do
+              cp -ar $f ./$f
+              for f in $f/bin/* ; do
+                if [ ! -e bin/$(basename $f) ] ; then
+                  ln -s $f bin/
+                fi
+              done
+            done
+
+            # Create runScript
+            ln -s ${runScriptFile} singularity
+
+            # Size calculation
+            cd ..
+            umount disk
+            size=$(resize2fs -P /dev/${vmTools.hd} | awk '{print $NF}')
+            mount /dev/${vmTools.hd} disk
+            cd disk
+
+            export PATH=$PATH:${e2fsprogs}/bin/
+            singularity create -s $((1 + size * 4 / 1024 + ${toString extraSpace})) $out
+            tar -c . | singularity import $out
+          '');
+
+    in result;
+}
diff --git a/pkgs/build-support/substitute/substitute-all.nix b/pkgs/build-support/substitute/substitute-all.nix
index 1022b25c4c9..7fd46f95f99 100644
--- a/pkgs/build-support/substitute/substitute-all.nix
+++ b/pkgs/build-support/substitute/substitute-all.nix
@@ -1,9 +1,9 @@
-{ stdenv }:
+{ stdenvNoCC }:
 
 args:
 
 # see the substituteAll in the nixpkgs documentation for usage and constaints
-stdenv.mkDerivation ({
+stdenvNoCC.mkDerivation ({
   name = if args ? name then args.name else baseNameOf (toString args.src);
   builder = ./substitute-all.sh;
   inherit (args) src;
diff --git a/pkgs/build-support/trivial-builders.nix b/pkgs/build-support/trivial-builders.nix
index 8775286b117..1529869aa33 100644
--- a/pkgs/build-support/trivial-builders.nix
+++ b/pkgs/build-support/trivial-builders.nix
@@ -1,16 +1,24 @@
-{ lib, stdenv, lndir }:
+{ lib, stdenv, stdenvNoCC, lndir }:
 
-rec {
+let
 
-  # Run the shell command `buildCommand' to produce a store path named
-  # `name'.  The attributes in `env' are added to the environment
-  # prior to running the command.
-  runCommand = name: env: buildCommand:
+  runCommand' = stdenv: name: env: buildCommand:
     stdenv.mkDerivation ({
       inherit name buildCommand;
       passAsFile = [ "buildCommand" ];
     } // env);
 
+in
+
+rec {
+
+  # Run the shell command `buildCommand' to produce a store path named
+  # `name'.  The attributes in `env' are added to the environment
+  # prior to running the command.
+  runCommand = runCommandNoCC;
+  runCommandNoCC = runCommand' stdenvNoCC;
+  runCommandCC = runCommand' stdenv;
+
 
   # Create a single file.
   writeTextFile =
@@ -97,12 +105,17 @@ rec {
       done < graph
     '';
 
+
   # Quickly create a set of symlinks to derivations.
   # entries is a list of attribute sets like { name = "name" ; path = "/nix/store/..."; }
-  linkFarm = name: entries: runCommand name {} ("mkdir -p $out; cd $out; \n" +
-    (lib.concatMapStrings (x: "ln -s '${x.path}' '${x.name}';\n") entries));
+  linkFarm = name: entries: runCommand name { preferLocalBuild = true; }
+    ("mkdir -p $out; cd $out; \n" +
+      (lib.concatMapStrings (x: "ln -s '${x.path}' '${x.name}';\n") entries));
+
 
-  # Require file
+  # Print an error message if the file with the specified name and
+  # hash doesn't exist in the Nix store. Do not use this function; it
+  # produces packages that cannot be built automatically.
   requireFile = { name ? null
                 , sha256 ? null
                 , sha1 ? null
@@ -115,8 +128,8 @@ rec {
     let msg =
       if message != null then message
       else ''
-        Unfortunately, we may not download file ${name_} automatically.
-        Please, go to ${url} to download it yourself, and add it to the Nix store
+        Unfortunately, we cannot download file ${name_} automatically.
+        Please go to ${url} to download it yourself, and add it to the Nix store
         using either
           nix-store --add-fixed ${hashAlgo} ${name_}
         or
@@ -143,30 +156,6 @@ rec {
       '';
     };
 
-  # Search in the environment if the same program exists with a set uid or
-  # set gid bit.  If it exists, run the first program found, otherwise run
-  # the default binary.
-  useSetUID = drv: path:
-    let
-      name = baseNameOf path;
-      bin = "${drv}${path}";
-    in assert name != "";
-      writeScript "setUID-${name}" ''
-        #!${stdenv.shell}
-        inode=$(stat -Lc %i ${bin})
-        for file in $(type -ap ${name}); do
-          case $(stat -Lc %a $file) in
-            ([2-7][0-7][0-7][0-7])
-              if test -r "$file".real; then
-                orig=$(cat "$file".real)
-                if test $inode = $(stat -Lc %i "$orig"); then
-                  exec "$file" "$@"
-                fi
-              fi;;
-          esac
-        done
-        exec ${bin} "$@"
-      '';
 
   # Copy a path to the Nix store.
   # Nix automatically copies files to the store before stringifying paths.
@@ -174,6 +163,7 @@ rec {
   # shortened to ${<path>}.
   copyPathToStore = builtins.filterSource (p: t: true);
 
+
   # Copy a list of paths to the Nix store.
   copyPathsToStore = builtins.map copyPathToStore;
 
diff --git a/pkgs/build-support/vm/default.nix b/pkgs/build-support/vm/default.nix
index 5b407f530dc..d03265c089a 100644
--- a/pkgs/build-support/vm/default.nix
+++ b/pkgs/build-support/vm/default.nix
@@ -1,6 +1,7 @@
 { pkgs
 , kernel ? pkgs.linux
 , img ? "bzImage"
+, storeDir ? builtins.storeDir
 , rootModules ?
     [ "virtio_pci" "virtio_blk" "virtio_balloon" "virtio_rng" "ext4" "unix" "9p" "9pnet_virtio" "rtc_cmos" ]
 }:
@@ -123,12 +124,13 @@ rec {
     mkdir -p /fs/dev
     mount -o bind /dev /fs/dev
 
-    mkdir -p /fs/dev /fs/dev/shm
+    mkdir -p /fs/dev/shm /fs/dev/pts
     mount -t tmpfs -o "mode=1777" none /fs/dev/shm
+    mount -t devpts none /fs/dev/pts
 
     echo "mounting Nix store..."
-    mkdir -p /fs/nix/store
-    mount -t 9p store /fs/nix/store -o trans=virtio,version=9p2000.L,cache=loose
+    mkdir -p /fs${storeDir}
+    mount -t 9p store /fs${storeDir} -o trans=virtio,version=9p2000.L,veryloose
 
     mkdir -p /fs/tmp /fs/run /fs/var
     mount -t tmpfs -o "mode=1777" none /fs/tmp
@@ -137,7 +139,7 @@ rec {
 
     echo "mounting host's temporary directory..."
     mkdir -p /fs/tmp/xchg
-    mount -t 9p xchg /fs/tmp/xchg -o trans=virtio,version=9p2000.L,cache=loose
+    mount -t 9p xchg /fs/tmp/xchg -o trans=virtio,version=9p2000.L,veryloose
 
     mkdir -p /fs/proc
     mount -t proc none /fs/proc
@@ -171,7 +173,7 @@ rec {
     # apparent KVM > 1.5.2 bug.
     ${pkgs.utillinux}/bin/hwclock -s
 
-    export NIX_STORE=/nix/store
+    export NIX_STORE=${storeDir}
     export NIX_BUILD_TOP=/tmp
     export TMPDIR=/tmp
     export PATH=/empty
@@ -219,7 +221,7 @@ rec {
       ${lib.optionalString (pkgs.stdenv.system == "x86_64-linux") "-cpu kvm64"} \
       -nographic -no-reboot \
       -device virtio-rng-pci \
-      -virtfs local,path=/nix/store,security_model=none,mount_tag=store \
+      -virtfs local,path=${storeDir},security_model=none,mount_tag=store \
       -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
       -drive file=$diskImage,if=virtio,cache=unsafe,werror=report \
       -kernel ${kernel}/${img} \
@@ -261,9 +263,12 @@ rec {
       exit 1
     fi
 
-    eval "$postVM"
+    exitCode="$(cat xchg/in-vm-exit)"
+    if [ "$exitCode" != "0" ]; then
+      exit "$exitCode"
+    fi
 
-    exit $(cat xchg/in-vm-exit)
+    eval "$postVM"
   '';
 
 
@@ -294,7 +299,7 @@ rec {
 
   /* Run a derivation in a Linux virtual machine (using Qemu/KVM).  By
      default, there is no disk image; the root filesystem is a tmpfs,
-     and /nix/store is shared with the host (via the 9P protocol).
+     and the nix store is shared with the host (via the 9P protocol).
      Thus, any pure Nix derivation should run unmodified, e.g. the
      call
 
@@ -430,8 +435,8 @@ rec {
         chroot=$(type -tP chroot)
 
         # Make the Nix store available in /mnt, because that's where the RPMs live.
-        mkdir -p /mnt/nix/store
-        ${utillinux}/bin/mount -o bind /nix/store /mnt/nix/store
+        mkdir -p /mnt${storeDir}
+        ${utillinux}/bin/mount -o bind ${storeDir} /mnt${storeDir}
 
         # Newer distributions like Fedora 18 require /lib etc. to be
         # symlinked to /usr.
@@ -470,7 +475,7 @@ rec {
 
         rm /mnt/.debug
 
-        ${utillinux}/bin/umount /mnt/nix/store /mnt/tmp ${lib.optionalString unifiedSystemDir "/mnt/proc"}
+        ${utillinux}/bin/umount /mnt${storeDir} /mnt/tmp ${lib.optionalString unifiedSystemDir "/mnt/proc"}
         ${utillinux}/bin/umount /mnt
       '';
 
@@ -533,8 +538,7 @@ rec {
 
       # Hacky: RPM looks for <basename>.spec inside the tarball, so
       # strip off the hash.
-      stripHash "$src"
-      srcName="$strippedName"
+      srcName="$(stripHash "$src")"
       cp "$src" "$srcName" # `ln' doesn't work always work: RPM requires that the file is owned by root
 
       export HOME=/tmp/home
@@ -601,8 +605,8 @@ rec {
         done
 
         # Make the Nix store available in /mnt, because that's where the .debs live.
-        mkdir -p /mnt/inst/nix/store
-        ${utillinux}/bin/mount -o bind /nix/store /mnt/inst/nix/store
+        mkdir -p /mnt/inst${storeDir}
+        ${utillinux}/bin/mount -o bind ${storeDir} /mnt/inst${storeDir}
         ${utillinux}/bin/mount -o bind /proc /mnt/proc
         ${utillinux}/bin/mount -o bind /dev /mnt/dev
 
@@ -650,7 +654,7 @@ rec {
 
         rm /mnt/.debug
 
-        ${utillinux}/bin/umount /mnt/inst/nix/store
+        ${utillinux}/bin/umount /mnt/inst${storeDir}
         ${utillinux}/bin/umount /mnt/proc
         ${utillinux}/bin/umount /mnt/dev
         ${utillinux}/bin/umount /mnt
@@ -1882,22 +1886,22 @@ rec {
     };
 
     debian8i386 = {
-      name = "debian-8.5-jessie-i386";
-      fullName = "Debian 8.5 Jessie (i386)";
+      name = "debian-8.6-jessie-i386";
+      fullName = "Debian 8.6 Jessie (i386)";
       packagesList = fetchurl {
         url = mirror://debian/dists/jessie/main/binary-i386/Packages.xz;
-        sha256 = "f87a1ee673b335c28cb6ac87be61d6ef20f32dd847835c2bb7d400a00a464c7f";
+        sha256 = "b915c936233609af3ecf9272cd53fbdb2144d463e8472a30507aa112ef5e6a6b";
       };
       urlPrefix = mirror://debian;
       packages = commonDebianPackages;
     };
 
     debian8x86_64 = {
-      name = "debian-8.5-jessie-amd64";
-      fullName = "Debian 8.5 Jessie (amd64)";
+      name = "debian-8.6-jessie-amd64";
+      fullName = "Debian 8.6 Jessie (amd64)";
       packagesList = fetchurl {
         url = mirror://debian/dists/jessie/main/binary-amd64/Packages.xz;
-        sha256 = "df6aea15d5547ae8dc6d7ceadc8bf6499bc5a3907d13231f811bf3c1c22474ef";
+        sha256 = "8b80b6608a8fc72509b949efe1730077f0e8383b29c6aed5f86d9f9b51a631d8";
       };
       urlPrefix = mirror://debian;
       packages = commonDebianPackages;
diff --git a/pkgs/build-support/vm/windows/bootstrap.nix b/pkgs/build-support/vm/windows/bootstrap.nix
index ebea819b191..3b06d8f4749 100644
--- a/pkgs/build-support/vm/windows/bootstrap.nix
+++ b/pkgs/build-support/vm/windows/bootstrap.nix
@@ -1,5 +1,5 @@
 { stdenv, fetchurl, vmTools, writeScript, writeText, runCommand, makeInitrd
-, python, perl, coreutils, dosfstools, gzip, mtools, netcat, openssh, qemu
+, python, perl, coreutils, dosfstools, gzip, mtools, netcat-gnu, openssh, qemu
 , samba, socat, vde2, cdrkit, pathsFromGraph, gnugrep
 }:
 
@@ -10,7 +10,7 @@ with stdenv.lib;
 let
   controller = import ./controller {
     inherit stdenv writeScript vmTools makeInitrd;
-    inherit samba vde2 openssh socat netcat coreutils gzip gnugrep;
+    inherit samba vde2 openssh socat netcat-gnu coreutils gzip gnugrep;
   };
 
   mkCygwinImage = import ./cygwin-iso {
diff --git a/pkgs/build-support/vm/windows/controller/default.nix b/pkgs/build-support/vm/windows/controller/default.nix
index 06a0a229306..9009702113e 100644
--- a/pkgs/build-support/vm/windows/controller/default.nix
+++ b/pkgs/build-support/vm/windows/controller/default.nix
@@ -1,5 +1,5 @@
 { stdenv, writeScript, vmTools, makeInitrd
-, samba, vde2, openssh, socat, netcat, coreutils, gnugrep, gzip
+, samba, vde2, openssh, socat, netcat-gnu, coreutils, gnugrep, gzip
 }:
 
 { sshKey
@@ -79,7 +79,7 @@ let
     ${coreutils}/bin/chmod 600 /ssh.key
   '' + (if installMode then ''
     echo -n "Waiting for Windows installation to finish..."
-    while ! ${netcat}/bin/netcat -z 192.168.0.1 22; do
+    while ! ${netcat-gnu}/bin/netcat -z 192.168.0.1 22; do
       echo -n .
       # Print a dot every 10 seconds only to shorten line length.
       ${coreutils}/bin/sleep 10
@@ -118,7 +118,7 @@ let
     ${samba}/sbin/smbd -D
 
     echo -n "Waiting for Windows VM to become available..."
-    while ! ${netcat}/bin/netcat -z 192.168.0.1 22; do
+    while ! ${netcat-gnu}/bin/netcat -z 192.168.0.1 22; do
       echo -n .
       ${coreutils}/bin/sleep 1
     done
diff --git a/pkgs/build-support/vm/windows/default.nix b/pkgs/build-support/vm/windows/default.nix
index f9f1d75c70d..c668e7569a4 100644
--- a/pkgs/build-support/vm/windows/default.nix
+++ b/pkgs/build-support/vm/windows/default.nix
@@ -3,7 +3,7 @@ pkgs:
 let
   bootstrapper = import ./bootstrap.nix {
     inherit (pkgs) stdenv vmTools writeScript writeText runCommand makeInitrd;
-    inherit (pkgs) coreutils dosfstools gzip mtools netcat openssh qemu samba;
+    inherit (pkgs) coreutils dosfstools gzip mtools netcat-gnu openssh qemu samba;
     inherit (pkgs) socat vde2 fetchurl python perl cdrkit pathsFromGraph;
     inherit (pkgs) gnugrep;
   };