summary refs log tree commit diff
path: root/pkgs/build-support/docker
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/build-support/docker')
-rw-r--r--pkgs/build-support/docker/default.nix265
-rw-r--r--pkgs/build-support/docker/detjson.py2
-rw-r--r--pkgs/build-support/docker/examples.nix211
-rwxr-xr-xpkgs/build-support/docker/nix-prefetch-docker14
-rw-r--r--pkgs/build-support/docker/nix-prefetch-docker.nix8
-rw-r--r--pkgs/build-support/docker/stream_layered_image.py184
6 files changed, 519 insertions, 165 deletions
diff --git a/pkgs/build-support/docker/default.nix b/pkgs/build-support/docker/default.nix
index bf815af6f7c..4bda09670ab 100644
--- a/pkgs/build-support/docker/default.nix
+++ b/pkgs/build-support/docker/default.nix
@@ -1,4 +1,5 @@
 {
+  bashInteractive,
   buildPackages,
   cacert,
   callPackage,
@@ -6,6 +7,7 @@
   coreutils,
   docker,
   e2fsprogs,
+  fakeroot,
   findutils,
   go,
   jq,
@@ -15,27 +17,30 @@
   moreutils,
   nix,
   pigz,
-  referencesByPopularity,
   rsync,
   runCommand,
   runtimeShell,
   shadow,
   skopeo,
-  stdenv,
   storeDir ? builtins.storeDir,
   substituteAll,
   symlinkJoin,
-  utillinux,
+  util-linux,
   vmTools,
   writeReferencesToFile,
   writeScript,
   writeText,
+  writeTextDir,
   writePython3,
+  system,  # Note: This is the cross system we're compiling for
 }:
 
-# WARNING: this API is unstable and may be subject to backwards-incompatible changes in the future.
 let
 
+  inherit (lib)
+    optionals
+    ;
+
   mkDbExtraCommand = contents: let
     contentsList = if builtins.isList contents then contents else [ contents ];
   in ''
@@ -48,7 +53,7 @@ let
     # A user is required by nix
     # https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
     export USER=nobody
-    ${nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
+    ${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
 
     mkdir -p nix/var/nix/gcroots/docker/
     for i in ${lib.concatStringsSep " " contentsList}; do
@@ -56,11 +61,18 @@ let
     done;
   '';
 
+  # The OCI Image specification recommends that configurations use values listed
+  # in the Go Language document for GOARCH.
+  # Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties
+  # For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the
+  # mapping from the go package.
+  defaultArch = go.GOARCH;
+
 in
 rec {
 
   examples = callPackage ./examples.nix {
-    inherit buildImage pullImage shadowSetup buildImageWithNixDb;
+    inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb;
   };
 
   pullImage = let
@@ -72,12 +84,14 @@ rec {
     , imageDigest
     , sha256
     , os ? "linux"
-    , arch ? buildPackages.go.GOARCH
+    , arch ? defaultArch
 
       # This is used to set name to the pulled image
     , finalImageName ? imageName
       # This used to set a tag to the pulled image
     , finalImageTag ? "latest"
+      # This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks
+    , tlsVerify ? true
 
     , name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
     }:
@@ -86,7 +100,7 @@ rec {
       inherit imageDigest;
       imageName = finalImageName;
       imageTag = finalImageTag;
-      impureEnvVars = stdenv.lib.fetchers.proxyImpureEnvVars;
+      impureEnvVars = lib.fetchers.proxyImpureEnvVars;
       outputHashMode = "flat";
       outputHashAlgo = "sha256";
       outputHash = sha256;
@@ -97,11 +111,18 @@ rec {
       sourceURL = "docker://${imageName}@${imageDigest}";
       destNameTag = "${finalImageName}:${finalImageTag}";
     } ''
-      skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceURL" "docker-archive://$out:$destNameTag"
+      skopeo \
+        --src-tls-verify=${lib.boolToString tlsVerify} \
+        --insecure-policy \
+        --tmpdir=$TMPDIR \
+        --override-os ${os} \
+        --override-arch ${arch} \
+        copy "$sourceURL" "docker-archive://$out:$destNameTag" \
+        | cat  # pipe through cat to force-disable progress bar
     '';
 
   # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
-  # And we cannot untar it, because then we cannot preserve permissions ecc.
+  # And we cannot untar it, because then we cannot preserve permissions etc.
   tarsum = runCommand "tarsum" {
     nativeBuildInputs = [ go ];
   } ''
@@ -111,8 +132,9 @@ rec {
     cp ${./tarsum.go} tarsum.go
     export GOPATH=$(pwd)
     export GOCACHE="$TMPDIR/go-cache"
+    export GO111MODULE=off
     mkdir -p src/github.com/docker/docker/pkg
-    ln -sT ${docker.src}/components/engine/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
+    ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
     go build
 
     mkdir -p $out/bin
@@ -194,7 +216,7 @@ rec {
         };
         inherit fromImage fromImageName fromImageTag;
 
-        nativeBuildInputs = [ utillinux e2fsprogs jshon rsync jq ];
+        nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
       } ''
       mkdir disk
       mkfs /dev/${vmTools.hd}
@@ -340,7 +362,7 @@ rec {
       # Tar up the layer and throw it into 'layer.tar'.
       echo "Packing layer..."
       mkdir $out
-      tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee $out/layer.tar | tarsum)
+      tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
 
       # Add a 'checksum' field to the JSON, with the value set to the
       # checksum of the tarball.
@@ -410,7 +432,11 @@ rec {
         # details on what's going on here; basically this command
         # means that the runAsRootScript will be executed in a nearly
         # completely isolated environment.
-        unshare -imnpuf --mount-proc chroot mnt ${runAsRootScript}
+        #
+        # Ideally we would use --mount-proc=mnt/proc or similar, but this
+        # doesn't work. The workaround is to setup proc after unshare.
+        # See: https://github.com/karelzak/util-linux/issues/648
+        unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}'
 
         # Unmount directories and remove them.
         umount -R mnt/dev mnt/sys mnt${storeDir}
@@ -425,7 +451,7 @@ rec {
         echo "Packing layer..."
         mkdir -p $out
         tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . |
-                    tee $out/layer.tar |
+                    tee -p $out/layer.tar |
                     ${tarsum}/bin/tarsum)
 
         cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
@@ -440,10 +466,10 @@ rec {
     let
       stream = streamLayeredImage args;
     in
-      runCommand "${name}.tar.gz" {
+      runCommand "${baseNameOf name}.tar.gz" {
         inherit (stream) imageName;
         passthru = { inherit (stream) imageTag; };
-        buildInputs = [ pigz ];
+        nativeBuildInputs = [ pigz ];
       } "${stream} | pigz -nT > $out";
 
   # 1. extract the base image
@@ -488,7 +514,7 @@ rec {
       baseJson = let
           pure = writeText "${baseName}-config.json" (builtins.toJSON {
             inherit created config;
-            architecture = buildPackages.go.GOARCH;
+            architecture = defaultArch;
             os = "linux";
           });
           impure = runCommand "${baseName}-config.json"
@@ -511,16 +537,16 @@ rec {
         };
       result = runCommand "docker-image-${baseName}.tar.gz" {
         nativeBuildInputs = [ jshon pigz coreutils findutils jq moreutils ];
-        # Image name and tag must be lowercase
+        # Image name must be lowercase
         imageName = lib.toLower name;
-        imageTag = if tag == null then "" else lib.toLower tag;
+        imageTag = if tag == null then "" else tag;
         inherit fromImage baseJson;
         layerClosure = writeReferencesToFile layer;
         passthru.buildArgs = args;
         passthru.layer = layer;
         passthru.imageTag =
           if tag != null
-            then lib.toLower tag
+            then tag
             else
               lib.head (lib.strings.splitString "-" (baseNameOf result.outPath));
         # Docker can't be made to run darwin binaries
@@ -674,6 +700,69 @@ rec {
     in
     result;
 
+  # Merge the tarballs of images built with buildImage into a single
+  # tarball that contains all images. Running `docker load` on the resulting
+  # tarball will load the images into the docker daemon.
+  mergeImages = images: runCommand "merge-docker-images"
+    {
+      inherit images;
+      nativeBuildInputs = [ pigz jq ];
+    } ''
+    mkdir image inputs
+    # Extract images
+    repos=()
+    manifests=()
+    for item in $images; do
+      name=$(basename $item)
+      mkdir inputs/$name
+      tar -I pigz -xf $item -C inputs/$name
+      if [ -f inputs/$name/repositories ]; then
+        repos+=(inputs/$name/repositories)
+      fi
+      if [ -f inputs/$name/manifest.json ]; then
+        manifests+=(inputs/$name/manifest.json)
+      fi
+    done
+    # Copy all layers from input images to output image directory
+    cp -R --no-clobber inputs/*/* image/
+    # Merge repositories objects and manifests
+    jq -s add "''${repos[@]}" > repositories
+    jq -s add "''${manifests[@]}" > manifest.json
+    # Replace output image repositories and manifest with merged versions
+    mv repositories image/repositories
+    mv manifest.json image/manifest.json
+    # Create tarball and gzip
+    tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
+  '';
+
+
+  # Provide a /etc/passwd and /etc/group that contain root and nobody.
+  # Useful when packaging binaries that insist on using nss to look up
+  # username/groups (like nginx).
+  # /bin/sh is fine to not exist, and provided by another shim.
+  fakeNss = symlinkJoin {
+    name = "fake-nss";
+    paths = [
+      (writeTextDir "etc/passwd" ''
+        root:x:0:0:root user:/var/empty:/bin/sh
+        nobody:x:65534:65534:nobody:/var/empty:/bin/sh
+      '')
+      (writeTextDir "etc/group" ''
+        root:x:0:
+        nobody:x:65534:
+      '')
+      (runCommand "var-empty" {} ''
+        mkdir -p $out/var/empty
+      '')
+    ];
+  };
+
+  # This provides /bin/sh, pointing to bashInteractive.
+  binSh = runCommand "bin-sh" {} ''
+    mkdir -p $out/bin
+    ln -s ${bashInteractive}/bin/bash $out/bin/sh
+  '';
+
   # Build an image and populate its nix database with the provided
   # contents. The main purpose is to be able to use nix commands in
   # the container.
@@ -695,6 +784,8 @@ rec {
     name,
     # Image tag, the Nix's output hash will be used if null
     tag ? null,
+    # Parent image, to append to.
+    fromImage ? null,
     # Files to put on the image (a nix store path or list of paths).
     contents ? [],
     # Docker config; e.g. what command to run on the container.
@@ -704,56 +795,83 @@ rec {
     created ? "1970-01-01T00:00:01Z",
     # Optional bash script to run on the files prior to fixturizing the layer.
     extraCommands ? "",
+    # Optional bash script to run inside fakeroot environment.
+    # Could be used for changing ownership of files in customisation layer.
+    fakeRootCommands ? "",
     # We pick 100 to ensure there is plenty of room for extension. I
     # believe the actual maximum is 128.
-    maxLayers ? 100
+    maxLayers ? 100,
+    # Whether to include store paths in the image. You generally want to leave
+    # this on, but tooling may disable this to insert the store paths more
+    # efficiently via other means, such as bind mounting the host store.
+    includeStorePaths ? true,
   }:
     assert
       (lib.assertMsg (maxLayers > 1)
       "the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
     let
+      baseName = baseNameOf name;
+
       streamScript = writePython3 "stream" {} ./stream_layered_image.py;
-      baseJson = writeText "${name}-base.json" (builtins.toJSON {
+      baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
          inherit config;
-         architecture = buildPackages.go.GOARCH;
+         architecture = defaultArch;
          os = "linux";
       });
-      customisationLayer = runCommand "${name}-customisation-layer" { inherit extraCommands; } ''
-        cp -r ${contentsEnv}/ $out
 
-        if [[ -n $extraCommands ]]; then
-          chmod u+w $out
-          (cd $out; eval "$extraCommands")
-        fi
-      '';
-      contentsEnv = symlinkJoin {
-        name = "${name}-bulk-layers";
-        paths = if builtins.isList contents
-          then contents
-          else [ contents ];
+      contentsList = if builtins.isList contents then contents else [ contents ];
+
+      # We store the customisation layer as a tarball, to make sure that
+      # things like permissions set on 'extraCommands' are not overriden
+      # by Nix. Then we precompute the sha256 for performance.
+      customisationLayer = symlinkJoin {
+        name = "${baseName}-customisation-layer";
+        paths = contentsList;
+        inherit extraCommands fakeRootCommands;
+        nativeBuildInputs = [ fakeroot ];
+        postBuild = ''
+          mv $out old_out
+          (cd old_out; eval "$extraCommands" )
+
+          mkdir $out
+
+          fakeroot bash -c '
+            source $stdenv/setup
+            cd old_out
+            eval "$fakeRootCommands"
+            tar \
+              --sort name \
+              --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
+              --hard-dereference \
+              -cf $out/layer.tar .
+          '
+
+          sha256sum $out/layer.tar \
+            | cut -f 1 -d ' ' \
+            > $out/checksum
+        '';
       };
 
-      # NOTE: the `closures` parameter is a list of closures to include.
-      # The TOP LEVEL store paths themselves will never be present in the
-      # resulting image. At this time (2020-06-18) none of these layers
-      # are appropriate to include, as they are all created as
-      # implementation details of dockerTools.
-      closures = [ baseJson contentsEnv ];
-      overallClosure = writeText "closure" (lib.concatStringsSep " " closures);
-      conf = runCommand "${name}-conf.json" {
-        inherit maxLayers created;
+      closureRoots = optionals includeStorePaths /* normally true */ (
+        [ baseJson ] ++ contentsList
+      );
+      overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
+
+      # These derivations are only created as implementation details of docker-tools,
+      # so they'll be excluded from the created images.
+      unnecessaryDrvs = [ baseJson overallClosure ];
+
+      conf = runCommand "${baseName}-conf.json" {
+        inherit fromImage maxLayers created;
         imageName = lib.toLower name;
         passthru.imageTag =
           if tag != null
             then tag
             else
               lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath));
-        paths = referencesByPopularity overallClosure;
-        buildInputs = [ jq ];
+        paths = buildPackages.referencesByPopularity overallClosure;
+        nativeBuildInputs = [ jq ];
       } ''
-        paths() {
-          cat $paths ${lib.concatMapStringsSep " " (path: "| (grep -v ${path} || true)") (closures ++ [ overallClosure ])}
-        }
         ${if (tag == null) then ''
           outName="$(basename "$out")"
           outHash=$(echo "$outName" | cut -d - -f 1)
@@ -768,6 +886,33 @@ rec {
             created="$(date -Iseconds -d "$created")"
         fi
 
+        paths() {
+          cat $paths ${lib.concatMapStringsSep " "
+                         (path: "| (grep -v ${path} || true)")
+                         unnecessaryDrvs}
+        }
+
+        # Compute the number of layers that are already used by a potential
+        # 'fromImage' as well as the customization layer. Ensure that there is
+        # still at least one layer available to store the image contents.
+        usedLayers=0
+
+        # subtract number of base image layers
+        if [[ -n "$fromImage" ]]; then
+          (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
+        fi
+
+        # one layer will be taken up by the customisation layer
+        (( usedLayers += 1 ))
+
+        if ! (( $usedLayers < $maxLayers )); then
+          echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
+                    "'extraCommands', but only maxLayers=$maxLayers were" \
+                    "allowed. At least 1 layer is required to store contents."
+          exit 1
+        fi
+        availableLayers=$(( maxLayers - usedLayers ))
+
         # Create $maxLayers worth of Docker Layers, one layer per store path
         # unless there are more paths than $maxLayers. In that case, create
         # $maxLayers-1 for the most popular layers, and smush the remainaing
@@ -785,26 +930,36 @@ rec {
                 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
                 | map(select(length > 0))
             ' \
-              --argjson maxLayers "$(( maxLayers - 1 ))" # one layer will be taken up by the customisation layer
+              --argjson maxLayers "$availableLayers"
         )"
 
         cat ${baseJson} | jq '
           . + {
+            "store_dir": $store_dir,
+            "from_image": $from_image,
             "store_layers": $store_layers,
             "customisation_layer", $customisation_layer,
             "repo_tag": $repo_tag,
             "created": $created
           }
-          ' --argjson store_layers "$store_layers" \
+          ' --arg store_dir "${storeDir}" \
+            --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
+            --argjson store_layers "$store_layers" \
             --arg customisation_layer ${customisationLayer} \
             --arg repo_tag "$imageName:$imageTag" \
             --arg created "$created" |
           tee $out
       '';
-      result = runCommand "stream-${name}" {
+      result = runCommand "stream-${baseName}" {
         inherit (conf) imageName;
-        passthru = { inherit (conf) imageTag; };
-        buildInputs = [ makeWrapper ];
+        passthru = {
+          inherit (conf) imageTag;
+
+          # Distinguish tarballs and exes at the Nix level so functions that
+          # take images can know in advance how the image is supposed to be used.
+          isExe = true;
+        };
+        nativeBuildInputs = [ makeWrapper ];
       } ''
         makeWrapper ${streamScript} $out --add-flags ${conf}
       '';
diff --git a/pkgs/build-support/docker/detjson.py b/pkgs/build-support/docker/detjson.py
index 439c2131387..fe82cbea11b 100644
--- a/pkgs/build-support/docker/detjson.py
+++ b/pkgs/build-support/docker/detjson.py
@@ -37,4 +37,4 @@ def main():
     json.dump(j, sys.stdout, sort_keys=True)
 
 if __name__ == '__main__':
-    main()
\ No newline at end of file
+    main()
diff --git a/pkgs/build-support/docker/examples.nix b/pkgs/build-support/docker/examples.nix
index bc107471762..f890d0a77a2 100644
--- a/pkgs/build-support/docker/examples.nix
+++ b/pkgs/build-support/docker/examples.nix
@@ -7,7 +7,7 @@
 #  $ nix-build '<nixpkgs>' -A dockerTools.examples.redis
 #  $ docker load < result
 
-{ pkgs, buildImage, pullImage, shadowSetup, buildImageWithNixDb }:
+{ pkgs, buildImage, buildLayeredImage, fakeNss, pullImage, shadowSetup, buildImageWithNixDb, pkgsCross }:
 
 rec {
   # 1. basic example
@@ -44,7 +44,7 @@ rec {
   nginx = let
     nginxPort = "80";
     nginxConf = pkgs.writeText "nginx.conf" ''
-      user nginx nginx;
+      user nobody nobody;
       daemon off;
       error_log /dev/stdout info;
       pid /dev/null;
@@ -64,10 +64,13 @@ rec {
       <html><body><h1>Hello from NGINX</h1></body></html>
     '';
   in
-  buildImage {
+  buildLayeredImage {
     name = "nginx-container";
     tag = "latest";
-    contents = pkgs.nginx;
+    contents = [
+      fakeNss
+      pkgs.nginx
+    ];
 
     extraCommands = ''
       # nginx still tries to read this directory even if error_log
@@ -75,12 +78,6 @@ rec {
       mkdir -p var/log/nginx
       mkdir -p var/cache/nginx
     '';
-    runAsRoot = ''
-      #!${pkgs.stdenv.shell}
-      ${shadowSetup}
-      groupadd --system nginx
-      useradd --system --gid nginx nginx
-    '';
 
     config = {
       Cmd = [ "nginx" "-c" nginxConf ];
@@ -94,7 +91,7 @@ rec {
   nixFromDockerHub = pullImage {
     imageName = "nixos/nix";
     imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
-    sha256 = "07q9y9r7fsd18sy95ybrvclpkhlal12d30ybnf089hq7v1hgxbi7";
+    sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
     finalImageTag = "2.2.1";
     finalImageName = "nix";
   };
@@ -191,7 +188,25 @@ rec {
     };
   };
 
-  # 12. example of running something as root on top of a parent image
+  # 12 Create a layered image on top of a layered image
+  layered-on-top-layered = pkgs.dockerTools.buildLayeredImage {
+    name = "layered-on-top-layered";
+    tag = "latest";
+    fromImage = layered-image;
+    extraCommands = ''
+      mkdir ./example-output
+      chmod 777 ./example-output
+    '';
+    config = {
+      Env = [ "PATH=${pkgs.coreutils}/bin/" ];
+      WorkingDir = "/example-output";
+      Cmd = [
+        "${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
+      ];
+    };
+  };
+
+  # 13. example of running something as root on top of a parent image
   # Regression test related to PR #52109
   runAsRootParentImage = buildImage {
     name = "runAsRootParentImage";
@@ -200,7 +215,7 @@ rec {
     fromImage = bash;
   };
 
-  # 13. example of 3 layers images This image is used to verify the
+  # 14. example of 3 layers images This image is used to verify the
   # order of layers is correct.
   # It allows to validate
   # - the layer of parent are below
@@ -238,23 +253,36 @@ rec {
     '';
   };
 
-  # 14. Environment variable inheritance.
+  # 15. Environment variable inheritance.
   # Child image should inherit parents environment variables,
   # optionally overriding them.
-  environmentVariables = let
-    parent = pkgs.dockerTools.buildImage {
-      name = "parent";
-      tag = "latest";
-      config = {
-        Env = [
-          "FROM_PARENT=true"
-          "LAST_LAYER=parent"
-        ];
-      };
+  environmentVariablesParent = pkgs.dockerTools.buildImage {
+    name = "parent";
+    tag = "latest";
+    config = {
+      Env = [
+        "FROM_PARENT=true"
+        "LAST_LAYER=parent"
+      ];
     };
-  in pkgs.dockerTools.buildImage {
+  };
+
+  environmentVariables = pkgs.dockerTools.buildImage {
+    name = "child";
+    fromImage = environmentVariablesParent;
+    tag = "latest";
+    contents = [ pkgs.coreutils ];
+    config = {
+      Env = [
+        "FROM_CHILD=true"
+        "LAST_LAYER=child"
+      ];
+    };
+  };
+
+  environmentVariablesLayered = pkgs.dockerTools.buildLayeredImage {
     name = "child";
-    fromImage = parent;
+    fromImage = environmentVariablesParent;
     tag = "latest";
     contents = [ pkgs.coreutils ];
     config = {
@@ -265,14 +293,14 @@ rec {
     };
   };
 
-  # 15. Create another layered image, for comparing layers with image 10.
+  # 16. Create another layered image, for comparing layers with image 10.
   another-layered-image = pkgs.dockerTools.buildLayeredImage {
     name = "another-layered-image";
     tag = "latest";
     config.Cmd = [ "${pkgs.hello}/bin/hello" ];
   };
 
-  # 16. Create a layered image with only 2 layers
+  # 17. Create a layered image with only 2 layers
   two-layered-image = pkgs.dockerTools.buildLayeredImage {
     name = "two-layered-image";
     tag = "latest";
@@ -281,7 +309,7 @@ rec {
     maxLayers = 2;
   };
 
-  # 17. Create a layered image with more packages than max layers.
+  # 18. Create a layered image with more packages than max layers.
   # coreutils and hello are part of the same layer
   bulk-layer = pkgs.dockerTools.buildLayeredImage {
     name = "bulk-layer";
@@ -292,27 +320,28 @@ rec {
     maxLayers = 2;
   };
 
-  # 18. Create a "layered" image without nix store layers. This is not
+  # 19. Create a layered image with a base image and more packages than max
+  # layers. coreutils and hello are part of the same layer
+  layered-bulk-layer = pkgs.dockerTools.buildLayeredImage {
+    name = "layered-bulk-layer";
+    tag = "latest";
+    fromImage = two-layered-image;
+    contents = with pkgs; [
+      coreutils hello
+    ];
+    maxLayers = 4;
+  };
+
+  # 20. Create a "layered" image without nix store layers. This is not
   # recommended, but can be useful for base images in rare cases.
   no-store-paths = pkgs.dockerTools.buildLayeredImage {
     name = "no-store-paths";
     tag = "latest";
     extraCommands = ''
-      chmod a+w bin
-
       # This removes sharing of busybox and is not recommended. We do this
       # to make the example suitable as a test case with working binaries.
       cp -r ${pkgs.pkgsStatic.busybox}/* .
     '';
-    contents = [
-      # This layer has no dependencies and its symlinks will be dereferenced
-      # when creating the customization layer.
-      (pkgs.runCommand "layer-to-flatten" {} ''
-        mkdir -p $out/bin
-        ln -s /bin/true $out/bin/custom-true
-      ''
-      )
-    ];
   };
 
   nixLayered = pkgs.dockerTools.buildLayeredImageWithNixDb {
@@ -335,7 +364,7 @@ rec {
     };
   };
 
-  # 19. Support files in the store on buildLayeredImage
+  # 21. Support files in the store on buildLayeredImage
   # See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
   filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
     name = "file-in-store";
@@ -355,7 +384,7 @@ rec {
     };
   };
 
-  # 20. Ensure that setting created to now results in a date which
+  # 22. Ensure that setting created to now results in a date which
   # isn't the epoch + 1 for layered images.
   unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
     name = "unstable-date-layered";
@@ -415,7 +444,101 @@ rec {
     pkgs.dockerTools.buildLayeredImage {
       name = "bash-layered-with-user";
       tag = "latest";
-      contents = [ pkgs.bash pkgs.coreutils (nonRootShadowSetup { uid = 999; user = "somebody"; }) ];
+      contents = [ pkgs.bash pkgs.coreutils ] ++ nonRootShadowSetup { uid = 999; user = "somebody"; };
     };
 
+  # basic example, with cross compilation
+  cross = let
+    # Cross compile for x86_64 if on aarch64
+    crossPkgs =
+      if pkgs.system == "aarch64-linux" then pkgsCross.gnu64
+      else pkgsCross.aarch64-multiplatform;
+  in crossPkgs.dockerTools.buildImage {
+    name = "hello-cross";
+    tag = "latest";
+    contents = crossPkgs.hello;
+  };
+
+  # layered image where a store path is itself a symlink
+  layeredStoreSymlink =
+  let
+    target = pkgs.writeTextDir "dir/target" "Content doesn't matter.";
+    symlink = pkgs.runCommandNoCC "symlink" {} "ln -s ${target} $out";
+  in
+    pkgs.dockerTools.buildLayeredImage {
+      name = "layeredstoresymlink";
+      tag = "latest";
+      contents = [ pkgs.bash symlink ];
+    } // { passthru = { inherit symlink; }; };
+
+  # image with registry/ prefix
+  prefixedImage = pkgs.dockerTools.buildImage {
+    name = "registry-1.docker.io/image";
+    tag = "latest";
+    config.Cmd = [ "${pkgs.hello}/bin/hello" ];
+  };
+
+  # layered image with registry/ prefix
+  prefixedLayeredImage = pkgs.dockerTools.buildLayeredImage {
+    name = "registry-1.docker.io/layered-image";
+    tag = "latest";
+    config.Cmd = [ "${pkgs.hello}/bin/hello" ];
+  };
+
+  # layered image with files owned by a user other than root
+  layeredImageWithFakeRootCommands = pkgs.dockerTools.buildLayeredImage {
+    name = "layered-image-with-fake-root-commands";
+    tag = "latest";
+    contents = [
+      pkgs.pkgsStatic.busybox
+    ];
+    fakeRootCommands = ''
+      mkdir -p ./home/jane
+      chown 1000 ./home/jane
+    '';
+  };
+
+  # tarball consisting of both bash and redis images
+  mergedBashAndRedis = pkgs.dockerTools.mergeImages [
+    bash
+    redis
+  ];
+
+  # tarball consisting of bash (without tag) and redis images
+  mergedBashNoTagAndRedis = pkgs.dockerTools.mergeImages [
+    bashNoTag
+    redis
+  ];
+
+  # tarball consisting of bash and layered image with different owner of the
+  # /home/jane directory
+  mergedBashFakeRoot = pkgs.dockerTools.mergeImages [
+    bash
+    layeredImageWithFakeRootCommands
+  ];
+
+  helloOnRoot = pkgs.dockerTools.streamLayeredImage {
+    name = "hello";
+    tag = "latest";
+    contents = [
+      (pkgs.buildEnv {
+        name = "hello-root";
+        paths = [ pkgs.hello ];
+      })
+    ];
+    config.Cmd = [ "hello" ];
+  };
+
+  helloOnRootNoStore = pkgs.dockerTools.streamLayeredImage {
+    name = "hello";
+    tag = "latest";
+    contents = [
+      (pkgs.buildEnv {
+        name = "hello-root";
+        paths = [ pkgs.hello ];
+      })
+    ];
+    config.Cmd = [ "hello" ];
+    includeStorePaths = false;
+  };
 }
diff --git a/pkgs/build-support/docker/nix-prefetch-docker b/pkgs/build-support/docker/nix-prefetch-docker
index 1b6785189c2..5798ab5984f 100755
--- a/pkgs/build-support/docker/nix-prefetch-docker
+++ b/pkgs/build-support/docker/nix-prefetch-docker
@@ -127,7 +127,7 @@ trap "rm -rf \"$tmpPath\"" EXIT
 tmpFile="$tmpPath/$(get_name $finalImageName $finalImageTag)"
 
 if test -z "$QUIET"; then
-    skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag"
+    skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" >&2
 else
     skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" > /dev/null
 fi
@@ -139,12 +139,12 @@ imageHash=$(nix-hash --flat --type $hashType --base32 "$tmpFile")
 finalPath=$(nix-store --add-fixed "$hashType" "$tmpFile")
 
 if test -z "$QUIET"; then
-    echo "-> ImageName: $imageName"
-    echo "-> ImageDigest: $imageDigest"
-    echo "-> FinalImageName: $finalImageName"
-    echo "-> FinalImageTag: $finalImageTag"
-    echo "-> ImagePath: $finalPath"
-    echo "-> ImageHash: $imageHash"
+    echo "-> ImageName: $imageName" >&2
+    echo "-> ImageDigest: $imageDigest" >&2
+    echo "-> FinalImageName: $finalImageName" >&2
+    echo "-> FinalImageTag: $finalImageTag" >&2
+    echo "-> ImagePath: $finalPath" >&2
+    echo "-> ImageHash: $imageHash" >&2
 fi
 
 if [ "$format" == "nix" ]; then
diff --git a/pkgs/build-support/docker/nix-prefetch-docker.nix b/pkgs/build-support/docker/nix-prefetch-docker.nix
index 6341eb0154b..61e917461ed 100644
--- a/pkgs/build-support/docker/nix-prefetch-docker.nix
+++ b/pkgs/build-support/docker/nix-prefetch-docker.nix
@@ -1,6 +1,4 @@
-{ stdenv, makeWrapper, nix, skopeo, jq }:
-
-with stdenv.lib;
+{ lib, stdenv, makeWrapper, nix, skopeo, jq }:
 
 stdenv.mkDerivation {
   name = "nix-prefetch-docker";
@@ -12,13 +10,13 @@ stdenv.mkDerivation {
   installPhase = ''
     install -vD ${./nix-prefetch-docker} $out/bin/$name;
     wrapProgram $out/bin/$name \
-      --prefix PATH : ${makeBinPath [ nix skopeo jq ]} \
+      --prefix PATH : ${lib.makeBinPath [ nix skopeo jq ]} \
       --set HOME /homeless-shelter
   '';
 
   preferLocalBuild = true;
 
-  meta = {
+  meta = with lib; {
     description = "Script used to obtain source hashes for dockerTools.pullImage";
     maintainers = with maintainers; [ offline ];
     platforms = platforms.unix;
diff --git a/pkgs/build-support/docker/stream_layered_image.py b/pkgs/build-support/docker/stream_layered_image.py
index ffb6ba0ade4..d7c63eb43a7 100644
--- a/pkgs/build-support/docker/stream_layered_image.py
+++ b/pkgs/build-support/docker/stream_layered_image.py
@@ -45,21 +45,14 @@ from datetime import datetime, timezone
 from collections import namedtuple
 
 
-def archive_paths_to(obj, paths, mtime, add_nix, filter=None):
+def archive_paths_to(obj, paths, mtime):
     """
     Writes the given store paths as a tar file to the given stream.
 
     obj: Stream to write to. Should have a 'write' method.
     paths: List of store paths.
-    add_nix: Whether /nix and /nix/store directories should be
-             prepended to the archive.
-    filter: An optional transformation to be applied to TarInfo
-            objects. Should take a single TarInfo object and return
-            another one. Defaults to identity.
     """
 
-    filter = filter if filter else lambda i: i
-
     # gettarinfo makes the paths relative, this makes them
     # absolute again
     def append_root(ti):
@@ -72,7 +65,7 @@ def archive_paths_to(obj, paths, mtime, add_nix, filter=None):
         ti.gid = 0
         ti.uname = "root"
         ti.gname = "root"
-        return filter(ti)
+        return ti
 
     def nix_root(ti):
         ti.mode = 0o0555  # r-xr-xr-x
@@ -85,15 +78,17 @@ def archive_paths_to(obj, paths, mtime, add_nix, filter=None):
 
     with tarfile.open(fileobj=obj, mode="w|") as tar:
         # To be consistent with the docker utilities, we need to have
-        # these directories first when building layer tarballs. But
-        # we don't need them on the customisation layer.
-        if add_nix:
-            tar.addfile(apply_filters(nix_root(dir("/nix"))))
-            tar.addfile(apply_filters(nix_root(dir("/nix/store"))))
+        # these directories first when building layer tarballs.
+        tar.addfile(apply_filters(nix_root(dir("/nix"))))
+        tar.addfile(apply_filters(nix_root(dir("/nix/store"))))
 
         for path in paths:
             path = pathlib.Path(path)
-            files = itertools.chain([path], path.rglob("*"))
+            if path.is_symlink():
+                files = [path]
+            else:
+                files = itertools.chain([path], path.rglob("*"))
+
             for filename in sorted(files):
                 ti = append_root(tar.gettarinfo(filename))
 
@@ -132,31 +127,104 @@ class ExtractChecksum:
         return (self._digest.hexdigest(), self._size)
 
 
+FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
 # Some metadata for a layer
 LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
 
 
-def add_layer_dir(tar, paths, mtime, add_nix=True, filter=None):
+def load_from_image(from_image_str):
+    """
+    Loads the given base image, if any.
+
+    from_image_str: Path to the base image archive.
+
+    Returns: A 'FromImage' object with references to the loaded base image,
+             or 'None' if no base image was provided.
+    """
+    if from_image_str is None:
+        return None
+
+    base_tar = tarfile.open(from_image_str)
+
+    manifest_json_tarinfo = base_tar.getmember("manifest.json")
+    with base_tar.extractfile(manifest_json_tarinfo) as f:
+        manifest_json = json.load(f)
+
+    image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
+    with base_tar.extractfile(image_json_tarinfo) as f:
+        image_json = json.load(f)
+
+    return FromImage(base_tar, manifest_json, image_json)
+
+
+def add_base_layers(tar, from_image):
+    """
+    Adds the layers from the given base image to the final image.
+
+    tar: 'tarfile.TarFile' object for new layers to be added to.
+    from_image: 'FromImage' object with references to the loaded base image.
+    """
+    if from_image is None:
+        print("No 'fromImage' provided", file=sys.stderr)
+        return []
+
+    layers = from_image.manifest_json[0]["Layers"]
+    checksums = from_image.image_json["rootfs"]["diff_ids"]
+    layers_checksums = zip(layers, checksums)
+
+    for num, (layer, checksum) in enumerate(layers_checksums, start=1):
+        layer_tarinfo = from_image.tar.getmember(layer)
+        checksum = re.sub(r"^sha256:", "", checksum)
+
+        tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
+        path = layer_tarinfo.path
+        size = layer_tarinfo.size
+
+        print("Adding base layer", num, "from", path, file=sys.stderr)
+        yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
+
+    from_image.tar.close()
+
+
+def overlay_base_config(from_image, final_config):
+    """
+    Overlays the final image 'config' JSON on top of selected defaults from the
+    base image 'config' JSON.
+
+    from_image: 'FromImage' object with references to the loaded base image.
+    final_config: 'dict' object of the final image 'config' JSON.
+    """
+    if from_image is None:
+        return final_config
+
+    base_config = from_image.image_json["config"]
+
+    # Preserve environment from base image
+    final_env = base_config.get("Env", []) + final_config.get("Env", [])
+    if final_env:
+        # Resolve duplicates (last one wins) and format back as list
+        resolved_env = {entry.split("=", 1)[0]: entry for entry in final_env}
+        final_config["Env"] = list(resolved_env.values())
+    return final_config
+
+
+def add_layer_dir(tar, paths, store_dir, mtime):
     """
     Appends given store paths to a TarFile object as a new layer.
 
     tar: 'tarfile.TarFile' object for the new layer to be added to.
     paths: List of store paths.
+    store_dir: the root directory of the nix store
     mtime: 'mtime' of the added files and the layer tarball.
            Should be an integer representing a POSIX time.
-    add_nix: Whether /nix and /nix/store directories should be
-             added to a layer.
-    filter: An optional transformation to be applied to TarInfo
-            objects inside the layer. Should take a single TarInfo
-            object and return another one. Defaults to identity.
 
     Returns: A 'LayerInfo' object containing some metadata of
              the layer added.
     """
 
-    invalid_paths = [i for i in paths if not i.startswith("/nix/store/")]
+    invalid_paths = [i for i in paths if not i.startswith(store_dir)]
     assert len(invalid_paths) == 0, \
-        "Expecting absolute store paths, but got: {invalid_paths}"
+        f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
 
     # First, calculate the tarball checksum and the size.
     extract_checksum = ExtractChecksum()
@@ -164,8 +232,6 @@ def add_layer_dir(tar, paths, mtime, add_nix=True, filter=None):
         extract_checksum,
         paths,
         mtime=mtime,
-        add_nix=add_nix,
-        filter=filter
     )
     (checksum, size) = extract_checksum.extract()
 
@@ -182,8 +248,6 @@ def add_layer_dir(tar, paths, mtime, add_nix=True, filter=None):
                 write,
                 paths,
                 mtime=mtime,
-                add_nix=add_nix,
-                filter=filter
             )
             write.close()
 
@@ -199,29 +263,38 @@ def add_layer_dir(tar, paths, mtime, add_nix=True, filter=None):
     return LayerInfo(size=size, checksum=checksum, path=path, paths=paths)
 
 
-def add_customisation_layer(tar, path, mtime):
+def add_customisation_layer(target_tar, customisation_layer, mtime):
     """
-    Adds the contents of the store path as a new layer. This is different
-    than the 'add_layer_dir' function defaults in the sense that the contents
-    of a single store path will be added to the root of the layer. eg (without
-    the /nix/store prefix).
+    Adds the customisation layer as a new layer. This is layer is structured
+    differently; given store path has the 'layer.tar' and corresponding
+    sha256sum ready.
 
     tar: 'tarfile.TarFile' object for the new layer to be added to.
-    path: A store path.
-    mtime: 'mtime' of the added files and the layer tarball. Should be an
-           integer representing a POSIX time.
+    customisation_layer: Path containing the layer archive.
+    mtime: 'mtime' of the added layer tarball.
     """
 
-    def filter(ti):
-        ti.name = re.sub("^/nix/store/[^/]*", "", ti.name)
-        return ti
-    return add_layer_dir(
-        tar,
-        [path],
-        mtime=mtime,
-        add_nix=False,
-        filter=filter
-      )
+    checksum_path = os.path.join(customisation_layer, "checksum")
+    with open(checksum_path) as f:
+        checksum = f.read().strip()
+    assert len(checksum) == 64, f"Invalid sha256 at ${checksum_path}."
+
+    layer_path = os.path.join(customisation_layer, "layer.tar")
+
+    path = f"{checksum}/layer.tar"
+    tarinfo = target_tar.gettarinfo(layer_path)
+    tarinfo.name = path
+    tarinfo.mtime = mtime
+
+    with open(layer_path, "rb") as f:
+        target_tar.addfile(tarinfo, f)
+
+    return LayerInfo(
+      size=None,
+      checksum=checksum,
+      path=path,
+      paths=[customisation_layer]
+    )
 
 
 def add_bytes(tar, path, content, mtime):
@@ -251,18 +324,23 @@ def main():
       else datetime.fromisoformat(conf["created"])
     )
     mtime = int(created.timestamp())
+    store_dir = conf["store_dir"]
+
+    from_image = load_from_image(conf["from_image"])
 
     with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
         layers = []
-        for num, store_layer in enumerate(conf["store_layers"]):
-            print(
-              "Creating layer", num,
-              "from paths:", store_layer,
-              file=sys.stderr)
-            info = add_layer_dir(tar, store_layer, mtime=mtime)
+        layers.extend(add_base_layers(tar, from_image))
+
+        start = len(layers) + 1
+        for num, store_layer in enumerate(conf["store_layers"], start=start):
+            print("Creating layer", num, "from paths:", store_layer,
+                  file=sys.stderr)
+            info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
             layers.append(info)
 
-        print("Creating the customisation layer...", file=sys.stderr)
+        print("Creating layer", len(layers) + 1, "with customisation...",
+              file=sys.stderr)
         layers.append(
           add_customisation_layer(
             tar,
@@ -277,7 +355,7 @@ def main():
             "created": datetime.isoformat(created),
             "architecture": conf["architecture"],
             "os": "linux",
-            "config": conf["config"],
+            "config": overlay_base_config(from_image, conf["config"]),
             "rootfs": {
                 "diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
                 "type": "layers",