summary refs log tree commit diff
path: root/nixos/doc/manual/from_md/administration
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/doc/manual/from_md/administration')
-rw-r--r--nixos/doc/manual/from_md/administration/boot-problems.section.xml144
-rw-r--r--nixos/doc/manual/from_md/administration/cleaning-store.chapter.xml72
-rw-r--r--nixos/doc/manual/from_md/administration/container-networking.section.xml54
-rw-r--r--nixos/doc/manual/from_md/administration/containers.chapter.xml31
-rw-r--r--nixos/doc/manual/from_md/administration/control-groups.chapter.xml67
-rw-r--r--nixos/doc/manual/from_md/administration/declarative-containers.section.xml60
-rw-r--r--nixos/doc/manual/from_md/administration/imperative-containers.section.xml131
-rw-r--r--nixos/doc/manual/from_md/administration/logging.chapter.xml45
-rw-r--r--nixos/doc/manual/from_md/administration/maintenance-mode.section.xml14
-rw-r--r--nixos/doc/manual/from_md/administration/network-problems.section.xml25
-rw-r--r--nixos/doc/manual/from_md/administration/rebooting.chapter.xml38
-rw-r--r--nixos/doc/manual/from_md/administration/rollback.section.xml42
-rw-r--r--nixos/doc/manual/from_md/administration/service-mgmt.chapter.xml141
-rw-r--r--nixos/doc/manual/from_md/administration/store-corruption.section.xml34
-rw-r--r--nixos/doc/manual/from_md/administration/troubleshooting.chapter.xml12
-rw-r--r--nixos/doc/manual/from_md/administration/user-sessions.chapter.xml46
16 files changed, 956 insertions, 0 deletions
diff --git a/nixos/doc/manual/from_md/administration/boot-problems.section.xml b/nixos/doc/manual/from_md/administration/boot-problems.section.xml
new file mode 100644
index 00000000000..144661c86eb
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/boot-problems.section.xml
@@ -0,0 +1,144 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-boot-problems">
+  <title>Boot Problems</title>
+  <para>
+    If NixOS fails to boot, there are a number of kernel command line
+    parameters that may help you to identify or fix the issue. You can
+    add these parameters in the GRUB boot menu by pressing “e” to modify
+    the selected boot entry and editing the line starting with
+    <literal>linux</literal>. The following are some useful kernel
+    command line parameters that are recognised by the NixOS boot
+    scripts or by systemd:
+  </para>
+  <variablelist>
+    <varlistentry>
+      <term>
+        <literal>boot.shell_on_fail</literal>
+      </term>
+      <listitem>
+        <para>
+          Allows the user to start a root shell if something goes wrong
+          in stage 1 of the boot process (the initial ramdisk). This is
+          disabled by default because there is no authentication for the
+          root shell.
+        </para>
+      </listitem>
+    </varlistentry>
+    <varlistentry>
+      <term>
+        <literal>boot.debug1</literal>
+      </term>
+      <listitem>
+        <para>
+          Start an interactive shell in stage 1 before anything useful
+          has been done. That is, no modules have been loaded and no
+          file systems have been mounted, except for
+          <literal>/proc</literal> and <literal>/sys</literal>.
+        </para>
+      </listitem>
+    </varlistentry>
+    <varlistentry>
+      <term>
+        <literal>boot.debug1devices</literal>
+      </term>
+      <listitem>
+        <para>
+          Like <literal>boot.debug1</literal>, but runs stage1 until
+          kernel modules are loaded and device nodes are created. This
+          may help with e.g. making the keyboard work.
+        </para>
+      </listitem>
+    </varlistentry>
+    <varlistentry>
+      <term>
+        <literal>boot.debug1mounts</literal>
+      </term>
+      <listitem>
+        <para>
+          Like <literal>boot.debug1</literal> or
+          <literal>boot.debug1devices</literal>, but runs stage1 until
+          all filesystems that are mounted during initrd are mounted
+          (see
+          <link linkend="opt-fileSystems._name_.neededForBoot">neededForBoot</link>).
+          As a motivating example, this could be useful if you’ve
+          forgotten to set
+          <link linkend="opt-fileSystems._name_.neededForBoot">neededForBoot</link>
+          on a file system.
+        </para>
+      </listitem>
+    </varlistentry>
+    <varlistentry>
+      <term>
+        <literal>boot.trace</literal>
+      </term>
+      <listitem>
+        <para>
+          Print every shell command executed by the stage 1 and 2 boot
+          scripts.
+        </para>
+      </listitem>
+    </varlistentry>
+    <varlistentry>
+      <term>
+        <literal>single</literal>
+      </term>
+      <listitem>
+        <para>
+          Boot into rescue mode (a.k.a. single user mode). This will
+          cause systemd to start nothing but the unit
+          <literal>rescue.target</literal>, which runs
+          <literal>sulogin</literal> to prompt for the root password and
+          start a root login shell. Exiting the shell causes the system
+          to continue with the normal boot process.
+        </para>
+      </listitem>
+    </varlistentry>
+    <varlistentry>
+      <term>
+        <literal>systemd.log_level=debug</literal>
+        <literal>systemd.log_target=console</literal>
+      </term>
+      <listitem>
+        <para>
+          Make systemd very verbose and send log messages to the console
+          instead of the journal. For more parameters recognised by
+          systemd, see systemd(1).
+        </para>
+      </listitem>
+    </varlistentry>
+  </variablelist>
+  <para>
+    In addition, these arguments are recognised by the live image only:
+  </para>
+  <variablelist>
+    <varlistentry>
+      <term>
+        <literal>live.nixos.passwd=password</literal>
+      </term>
+      <listitem>
+        <para>
+          Set the password for the <literal>nixos</literal> live user.
+          This can be used for SSH access if there are issues using the
+          terminal.
+        </para>
+      </listitem>
+    </varlistentry>
+  </variablelist>
+  <para>
+    Notice that for <literal>boot.shell_on_fail</literal>,
+    <literal>boot.debug1</literal>,
+    <literal>boot.debug1devices</literal>, and
+    <literal>boot.debug1mounts</literal>, if you did
+    <emphasis role="strong">not</emphasis> select <quote>start the new
+    shell as pid 1</quote>, and you <literal>exit</literal> from the new
+    shell, boot will proceed normally from the point where it failed, as
+    if you’d chosen <quote>ignore the error and continue</quote>.
+  </para>
+  <para>
+    If no login prompts or X11 login screens appear (e.g. due to hanging
+    dependencies), you can press Alt+ArrowUp. If you’re lucky, this will
+    start rescue mode (described above). (Also note that since most
+    units have a 90-second timeout before systemd gives up on them, the
+    <literal>agetty</literal> login prompts should appear eventually
+    unless something is very wrong.)
+  </para>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/cleaning-store.chapter.xml b/nixos/doc/manual/from_md/administration/cleaning-store.chapter.xml
new file mode 100644
index 00000000000..4243d2bf53f
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/cleaning-store.chapter.xml
@@ -0,0 +1,72 @@
+<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-nix-gc">
+  <title>Cleaning the Nix Store</title>
+  <para>
+    Nix has a purely functional model, meaning that packages are never
+    upgraded in place. Instead new versions of packages end up in a
+    different location in the Nix store (<literal>/nix/store</literal>).
+    You should periodically run Nix’s <emphasis>garbage
+    collector</emphasis> to remove old, unreferenced packages. This is
+    easy:
+  </para>
+  <programlisting>
+$ nix-collect-garbage
+</programlisting>
+  <para>
+    Alternatively, you can use a systemd unit that does the same in the
+    background:
+  </para>
+  <programlisting>
+# systemctl start nix-gc.service
+</programlisting>
+  <para>
+    You can tell NixOS in <literal>configuration.nix</literal> to run
+    this unit automatically at certain points in time, for instance,
+    every night at 03:15:
+  </para>
+  <programlisting language="bash">
+nix.gc.automatic = true;
+nix.gc.dates = &quot;03:15&quot;;
+</programlisting>
+  <para>
+    The commands above do not remove garbage collector roots, such as
+    old system configurations. Thus they do not remove the ability to
+    roll back to previous configurations. The following command deletes
+    old roots, removing the ability to roll back to them:
+  </para>
+  <programlisting>
+$ nix-collect-garbage -d
+</programlisting>
+  <para>
+    You can also do this for specific profiles, e.g.
+  </para>
+  <programlisting>
+$ nix-env -p /nix/var/nix/profiles/per-user/eelco/profile --delete-generations old
+</programlisting>
+  <para>
+    Note that NixOS system configurations are stored in the profile
+    <literal>/nix/var/nix/profiles/system</literal>.
+  </para>
+  <para>
+    Another way to reclaim disk space (often as much as 40% of the size
+    of the Nix store) is to run Nix’s store optimiser, which seeks out
+    identical files in the store and replaces them with hard links to a
+    single copy.
+  </para>
+  <programlisting>
+$ nix-store --optimise
+</programlisting>
+  <para>
+    Since this command needs to read the entire Nix store, it can take
+    quite a while to finish.
+  </para>
+  <section xml:id="sect-nixos-gc-boot-entries">
+    <title>NixOS Boot Entries</title>
+    <para>
+      If your <literal>/boot</literal> partition runs out of space,
+      after clearing old profiles you must rebuild your system with
+      <literal>nixos-rebuild boot</literal> or
+      <literal>nixos-rebuild switch</literal> to update the
+      <literal>/boot</literal> partition and clear space.
+    </para>
+  </section>
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/container-networking.section.xml b/nixos/doc/manual/from_md/administration/container-networking.section.xml
new file mode 100644
index 00000000000..788a2b7b0ac
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/container-networking.section.xml
@@ -0,0 +1,54 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-container-networking">
+  <title>Container Networking</title>
+  <para>
+    When you create a container using
+    <literal>nixos-container create</literal>, it gets it own private
+    IPv4 address in the range <literal>10.233.0.0/16</literal>. You can
+    get the container’s IPv4 address as follows:
+  </para>
+  <programlisting>
+# nixos-container show-ip foo
+10.233.4.2
+
+$ ping -c1 10.233.4.2
+64 bytes from 10.233.4.2: icmp_seq=1 ttl=64 time=0.106 ms
+</programlisting>
+  <para>
+    Networking is implemented using a pair of virtual Ethernet devices.
+    The network interface in the container is called
+    <literal>eth0</literal>, while the matching interface in the host is
+    called <literal>ve-container-name</literal> (e.g.,
+    <literal>ve-foo</literal>). The container has its own network
+    namespace and the <literal>CAP_NET_ADMIN</literal> capability, so it
+    can perform arbitrary network configuration such as setting up
+    firewall rules, without affecting or having access to the host’s
+    network.
+  </para>
+  <para>
+    By default, containers cannot talk to the outside network. If you
+    want that, you should set up Network Address Translation (NAT) rules
+    on the host to rewrite container traffic to use your external IP
+    address. This can be accomplished using the following configuration
+    on the host:
+  </para>
+  <programlisting language="bash">
+networking.nat.enable = true;
+networking.nat.internalInterfaces = [&quot;ve-+&quot;];
+networking.nat.externalInterface = &quot;eth0&quot;;
+</programlisting>
+  <para>
+    where <literal>eth0</literal> should be replaced with the desired
+    external interface. Note that <literal>ve-+</literal> is a wildcard
+    that matches all container interfaces.
+  </para>
+  <para>
+    If you are using Network Manager, you need to explicitly prevent it
+    from managing container interfaces:
+  </para>
+  <programlisting language="bash">
+networking.networkmanager.unmanaged = [ &quot;interface-name:ve-*&quot; ];
+</programlisting>
+  <para>
+    You may need to restart your system for the changes to take effect.
+  </para>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/containers.chapter.xml b/nixos/doc/manual/from_md/administration/containers.chapter.xml
new file mode 100644
index 00000000000..afbd5b35aaa
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/containers.chapter.xml
@@ -0,0 +1,31 @@
+<chapter xmlns="http://docbook.org/ns/docbook"  xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude" xml:id="ch-containers">
+  <title>Container Management</title>
+  <para>
+    NixOS allows you to easily run other NixOS instances as
+    <emphasis>containers</emphasis>. Containers are a light-weight
+    approach to virtualisation that runs software in the container at
+    the same speed as in the host system. NixOS containers share the Nix
+    store of the host, making container creation very efficient.
+  </para>
+  <warning>
+    <para>
+      Currently, NixOS containers are not perfectly isolated from the
+      host system. This means that a user with root access to the
+      container can do things that affect the host. So you should not
+      give container root access to untrusted users.
+    </para>
+  </warning>
+  <para>
+    NixOS containers can be created in two ways: imperatively, using the
+    command <literal>nixos-container</literal>, and declaratively, by
+    specifying them in your <literal>configuration.nix</literal>. The
+    declarative approach implies that containers get upgraded along with
+    your host system when you run <literal>nixos-rebuild</literal>,
+    which is often not what you want. By contrast, in the imperative
+    approach, containers are configured and updated independently from
+    the host system.
+  </para>
+  <xi:include href="imperative-containers.section.xml" />
+  <xi:include href="declarative-containers.section.xml" />
+  <xi:include href="container-networking.section.xml" />
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/control-groups.chapter.xml b/nixos/doc/manual/from_md/administration/control-groups.chapter.xml
new file mode 100644
index 00000000000..8dab2c9d44b
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/control-groups.chapter.xml
@@ -0,0 +1,67 @@
+<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-cgroups">
+  <title>Control Groups</title>
+  <para>
+    To keep track of the processes in a running system, systemd uses
+    <emphasis>control groups</emphasis> (cgroups). A control group is a
+    set of processes used to allocate resources such as CPU, memory or
+    I/O bandwidth. There can be multiple control group hierarchies,
+    allowing each kind of resource to be managed independently.
+  </para>
+  <para>
+    The command <literal>systemd-cgls</literal> lists all control groups
+    in the <literal>systemd</literal> hierarchy, which is what systemd
+    uses to keep track of the processes belonging to each service or
+    user session:
+  </para>
+  <programlisting>
+$ systemd-cgls
+├─user
+│ └─eelco
+│   └─c1
+│     ├─ 2567 -:0
+│     ├─ 2682 kdeinit4: kdeinit4 Running...
+│     ├─ ...
+│     └─10851 sh -c less -R
+└─system
+  ├─httpd.service
+  │ ├─2444 httpd -f /nix/store/3pyacby5cpr55a03qwbnndizpciwq161-httpd.conf -DNO_DETACH
+  │ └─...
+  ├─dhcpcd.service
+  │ └─2376 dhcpcd --config /nix/store/f8dif8dsi2yaa70n03xir8r653776ka6-dhcpcd.conf
+  └─ ...
+</programlisting>
+  <para>
+    Similarly, <literal>systemd-cgls cpu</literal> shows the cgroups in
+    the CPU hierarchy, which allows per-cgroup CPU scheduling
+    priorities. By default, every systemd service gets its own CPU
+    cgroup, while all user sessions are in the top-level CPU cgroup.
+    This ensures, for instance, that a thousand run-away processes in
+    the <literal>httpd.service</literal> cgroup cannot starve the CPU
+    for one process in the <literal>postgresql.service</literal> cgroup.
+    (By contrast, it they were in the same cgroup, then the PostgreSQL
+    process would get 1/1001 of the cgroup’s CPU time.) You can limit a
+    service’s CPU share in <literal>configuration.nix</literal>:
+  </para>
+  <programlisting language="bash">
+systemd.services.httpd.serviceConfig.CPUShares = 512;
+</programlisting>
+  <para>
+    By default, every cgroup has 1024 CPU shares, so this will halve the
+    CPU allocation of the <literal>httpd.service</literal> cgroup.
+  </para>
+  <para>
+    There also is a <literal>memory</literal> hierarchy that controls
+    memory allocation limits; by default, all processes are in the
+    top-level cgroup, so any service or session can exhaust all
+    available memory. Per-cgroup memory limits can be specified in
+    <literal>configuration.nix</literal>; for instance, to limit
+    <literal>httpd.service</literal> to 512 MiB of RAM (excluding swap):
+  </para>
+  <programlisting language="bash">
+systemd.services.httpd.serviceConfig.MemoryLimit = &quot;512M&quot;;
+</programlisting>
+  <para>
+    The command <literal>systemd-cgtop</literal> shows a continuously
+    updated list of all cgroups with their CPU and memory usage.
+  </para>
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/declarative-containers.section.xml b/nixos/doc/manual/from_md/administration/declarative-containers.section.xml
new file mode 100644
index 00000000000..7b35520d567
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/declarative-containers.section.xml
@@ -0,0 +1,60 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-declarative-containers">
+  <title>Declarative Container Specification</title>
+  <para>
+    You can also specify containers and their configuration in the
+    host’s <literal>configuration.nix</literal>. For example, the
+    following specifies that there shall be a container named
+    <literal>database</literal> running PostgreSQL:
+  </para>
+  <programlisting language="bash">
+containers.database =
+  { config =
+      { config, pkgs, ... }:
+      { services.postgresql.enable = true;
+      services.postgresql.package = pkgs.postgresql_10;
+      };
+  };
+</programlisting>
+  <para>
+    If you run <literal>nixos-rebuild switch</literal>, the container
+    will be built. If the container was already running, it will be
+    updated in place, without rebooting. The container can be configured
+    to start automatically by setting
+    <literal>containers.database.autoStart = true</literal> in its
+    configuration.
+  </para>
+  <para>
+    By default, declarative containers share the network namespace of
+    the host, meaning that they can listen on (privileged) ports.
+    However, they cannot change the network configuration. You can give
+    a container its own network as follows:
+  </para>
+  <programlisting language="bash">
+containers.database = {
+  privateNetwork = true;
+  hostAddress = &quot;192.168.100.10&quot;;
+  localAddress = &quot;192.168.100.11&quot;;
+};
+</programlisting>
+  <para>
+    This gives the container a private virtual Ethernet interface with
+    IP address <literal>192.168.100.11</literal>, which is hooked up to
+    a virtual Ethernet interface on the host with IP address
+    <literal>192.168.100.10</literal>. (See the next section for details
+    on container networking.)
+  </para>
+  <para>
+    To disable the container, just remove it from
+    <literal>configuration.nix</literal> and run
+    <literal>nixos-rebuild switch</literal>. Note that this will not
+    delete the root directory of the container in
+    <literal>/var/lib/containers</literal>. Containers can be destroyed
+    using the imperative method:
+    <literal>nixos-container destroy foo</literal>.
+  </para>
+  <para>
+    Declarative containers can be started and stopped using the
+    corresponding systemd service, e.g.
+    <literal>systemctl start container@database</literal>.
+  </para>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/imperative-containers.section.xml b/nixos/doc/manual/from_md/administration/imperative-containers.section.xml
new file mode 100644
index 00000000000..59ecfdee5af
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/imperative-containers.section.xml
@@ -0,0 +1,131 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-imperative-containers">
+  <title>Imperative Container Management</title>
+  <para>
+    We’ll cover imperative container management using
+    <literal>nixos-container</literal> first. Be aware that container
+    management is currently only possible as <literal>root</literal>.
+  </para>
+  <para>
+    You create a container with identifier <literal>foo</literal> as
+    follows:
+  </para>
+  <programlisting>
+# nixos-container create foo
+</programlisting>
+  <para>
+    This creates the container’s root directory in
+    <literal>/var/lib/containers/foo</literal> and a small configuration
+    file in <literal>/etc/containers/foo.conf</literal>. It also builds
+    the container’s initial system configuration and stores it in
+    <literal>/nix/var/nix/profiles/per-container/foo/system</literal>.
+    You can modify the initial configuration of the container on the
+    command line. For instance, to create a container that has
+    <literal>sshd</literal> running, with the given public key for
+    <literal>root</literal>:
+  </para>
+  <programlisting>
+# nixos-container create foo --config '
+  services.openssh.enable = true;
+  users.users.root.openssh.authorizedKeys.keys = [&quot;ssh-dss AAAAB3N…&quot;];
+'
+</programlisting>
+  <para>
+    By default the next free address in the
+    <literal>10.233.0.0/16</literal> subnet will be chosen as container
+    IP. This behavior can be altered by setting
+    <literal>--host-address</literal> and
+    <literal>--local-address</literal>:
+  </para>
+  <programlisting>
+# nixos-container create test --config-file test-container.nix \
+    --local-address 10.235.1.2 --host-address 10.235.1.1
+</programlisting>
+  <para>
+    Creating a container does not start it. To start the container, run:
+  </para>
+  <programlisting>
+# nixos-container start foo
+</programlisting>
+  <para>
+    This command will return as soon as the container has booted and has
+    reached <literal>multi-user.target</literal>. On the host, the
+    container runs within a systemd unit called
+    <literal>container@container-name.service</literal>. Thus, if
+    something went wrong, you can get status info using
+    <literal>systemctl</literal>:
+  </para>
+  <programlisting>
+# systemctl status container@foo
+</programlisting>
+  <para>
+    If the container has started successfully, you can log in as root
+    using the <literal>root-login</literal> operation:
+  </para>
+  <programlisting>
+# nixos-container root-login foo
+[root@foo:~]#
+</programlisting>
+  <para>
+    Note that only root on the host can do this (since there is no
+    authentication). You can also get a regular login prompt using the
+    <literal>login</literal> operation, which is available to all users
+    on the host:
+  </para>
+  <programlisting>
+# nixos-container login foo
+foo login: alice
+Password: ***
+</programlisting>
+  <para>
+    With <literal>nixos-container run</literal>, you can execute
+    arbitrary commands in the container:
+  </para>
+  <programlisting>
+# nixos-container run foo -- uname -a
+Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
+</programlisting>
+  <para>
+    There are several ways to change the configuration of the container.
+    First, on the host, you can edit
+    <literal>/var/lib/container/name/etc/nixos/configuration.nix</literal>,
+    and run
+  </para>
+  <programlisting>
+# nixos-container update foo
+</programlisting>
+  <para>
+    This will build and activate the new configuration. You can also
+    specify a new configuration on the command line:
+  </para>
+  <programlisting>
+# nixos-container update foo --config '
+  services.httpd.enable = true;
+  services.httpd.adminAddr = &quot;foo@example.org&quot;;
+  networking.firewall.allowedTCPPorts = [ 80 ];
+'
+
+# curl http://$(nixos-container show-ip foo)/
+&lt;!DOCTYPE HTML PUBLIC &quot;-//W3C//DTD HTML 3.2 Final//EN&quot;&gt;…
+</programlisting>
+  <para>
+    However, note that this will overwrite the container’s
+    <literal>/etc/nixos/configuration.nix</literal>.
+  </para>
+  <para>
+    Alternatively, you can change the configuration from within the
+    container itself by running <literal>nixos-rebuild switch</literal>
+    inside the container. Note that the container by default does not
+    have a copy of the NixOS channel, so you should run
+    <literal>nix-channel --update</literal> first.
+  </para>
+  <para>
+    Containers can be stopped and started using
+    <literal>nixos-container stop</literal> and
+    <literal>nixos-container start</literal>, respectively, or by using
+    <literal>systemctl</literal> on the container’s service unit. To
+    destroy a container, including its file system, do
+  </para>
+  <programlisting>
+# nixos-container destroy foo
+</programlisting>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/logging.chapter.xml b/nixos/doc/manual/from_md/administration/logging.chapter.xml
new file mode 100644
index 00000000000..4da38c065a2
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/logging.chapter.xml
@@ -0,0 +1,45 @@
+<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-logging">
+  <title>Logging</title>
+  <para>
+    System-wide logging is provided by systemd’s
+    <emphasis>journal</emphasis>, which subsumes traditional logging
+    daemons such as syslogd and klogd. Log entries are kept in binary
+    files in <literal>/var/log/journal/</literal>. The command
+    <literal>journalctl</literal> allows you to see the contents of the
+    journal. For example,
+  </para>
+  <programlisting>
+$ journalctl -b
+</programlisting>
+  <para>
+    shows all journal entries since the last reboot. (The output of
+    <literal>journalctl</literal> is piped into <literal>less</literal>
+    by default.) You can use various options and match operators to
+    restrict output to messages of interest. For instance, to get all
+    messages from PostgreSQL:
+  </para>
+  <programlisting>
+$ journalctl -u postgresql.service
+-- Logs begin at Mon, 2013-01-07 13:28:01 CET, end at Tue, 2013-01-08 01:09:57 CET. --
+...
+Jan 07 15:44:14 hagbard postgres[2681]: [2-1] LOG:  database system is shut down
+-- Reboot --
+Jan 07 15:45:10 hagbard postgres[2532]: [1-1] LOG:  database system was shut down at 2013-01-07 15:44:14 CET
+Jan 07 15:45:13 hagbard postgres[2500]: [1-1] LOG:  database system is ready to accept connections
+</programlisting>
+  <para>
+    Or to get all messages since the last reboot that have at least a
+    <quote>critical</quote> severity level:
+  </para>
+  <programlisting>
+$ journalctl -b -p crit
+Dec 17 21:08:06 mandark sudo[3673]: pam_unix(sudo:auth): auth could not identify password for [alice]
+Dec 29 01:30:22 mandark kernel[6131]: [1053513.909444] CPU6: Core temperature above threshold, cpu clock throttled (total events = 1)
+</programlisting>
+  <para>
+    The system journal is readable by root and by users in the
+    <literal>wheel</literal> and <literal>systemd-journal</literal>
+    groups. All users have a private journal that can be read using
+    <literal>journalctl</literal>.
+  </para>
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/maintenance-mode.section.xml b/nixos/doc/manual/from_md/administration/maintenance-mode.section.xml
new file mode 100644
index 00000000000..c86b1911c11
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/maintenance-mode.section.xml
@@ -0,0 +1,14 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-maintenance-mode">
+  <title>Maintenance Mode</title>
+  <para>
+    You can enter rescue mode by running:
+  </para>
+  <programlisting>
+# systemctl rescue
+</programlisting>
+  <para>
+    This will eventually give you a single-user root shell. Systemd will
+    stop (almost) all system services. To get out of maintenance mode,
+    just exit from the rescue shell.
+  </para>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/network-problems.section.xml b/nixos/doc/manual/from_md/administration/network-problems.section.xml
new file mode 100644
index 00000000000..4c0598ca94e
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/network-problems.section.xml
@@ -0,0 +1,25 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-nix-network-issues">
+  <title>Network Problems</title>
+  <para>
+    Nix uses a so-called <emphasis>binary cache</emphasis> to optimise
+    building a package from source into downloading it as a pre-built
+    binary. That is, whenever a command like
+    <literal>nixos-rebuild</literal> needs a path in the Nix store, Nix
+    will try to download that path from the Internet rather than build
+    it from source. The default binary cache is
+    <literal>https://cache.nixos.org/</literal>. If this cache is
+    unreachable, Nix operations may take a long time due to HTTP
+    connection timeouts. You can disable the use of the binary cache by
+    adding <literal>--option use-binary-caches false</literal>, e.g.
+  </para>
+  <programlisting>
+# nixos-rebuild switch --option use-binary-caches false
+</programlisting>
+  <para>
+    If you have an alternative binary cache at your disposal, you can
+    use it instead:
+  </para>
+  <programlisting>
+# nixos-rebuild switch --option binary-caches http://my-cache.example.org/
+</programlisting>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/rebooting.chapter.xml b/nixos/doc/manual/from_md/administration/rebooting.chapter.xml
new file mode 100644
index 00000000000..78ee75afb64
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/rebooting.chapter.xml
@@ -0,0 +1,38 @@
+<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-rebooting">
+  <title>Rebooting and Shutting Down</title>
+  <para>
+    The system can be shut down (and automatically powered off) by
+    doing:
+  </para>
+  <programlisting>
+# shutdown
+</programlisting>
+  <para>
+    This is equivalent to running <literal>systemctl poweroff</literal>.
+  </para>
+  <para>
+    To reboot the system, run
+  </para>
+  <programlisting>
+# reboot
+</programlisting>
+  <para>
+    which is equivalent to <literal>systemctl reboot</literal>.
+    Alternatively, you can quickly reboot the system using
+    <literal>kexec</literal>, which bypasses the BIOS by directly
+    loading the new kernel into memory:
+  </para>
+  <programlisting>
+# systemctl kexec
+</programlisting>
+  <para>
+    The machine can be suspended to RAM (if supported) using
+    <literal>systemctl suspend</literal>, and suspended to disk using
+    <literal>systemctl hibernate</literal>.
+  </para>
+  <para>
+    These commands can be run by any user who is logged in locally, i.e.
+    on a virtual console or in X11; otherwise, the user is asked for
+    authentication.
+  </para>
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/rollback.section.xml b/nixos/doc/manual/from_md/administration/rollback.section.xml
new file mode 100644
index 00000000000..a8df053011c
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/rollback.section.xml
@@ -0,0 +1,42 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-rollback">
+  <title>Rolling Back Configuration Changes</title>
+  <para>
+    After running <literal>nixos-rebuild</literal> to switch to a new
+    configuration, you may find that the new configuration doesn’t work
+    very well. In that case, there are several ways to return to a
+    previous configuration.
+  </para>
+  <para>
+    First, the GRUB boot manager allows you to boot into any previous
+    configuration that hasn’t been garbage-collected. These
+    configurations can be found under the GRUB submenu <quote>NixOS -
+    All configurations</quote>. This is especially useful if the new
+    configuration fails to boot. After the system has booted, you can
+    make the selected configuration the default for subsequent boots:
+  </para>
+  <programlisting>
+# /run/current-system/bin/switch-to-configuration boot
+</programlisting>
+  <para>
+    Second, you can switch to the previous configuration in a running
+    system:
+  </para>
+  <programlisting>
+# nixos-rebuild switch --rollback
+</programlisting>
+  <para>
+    This is equivalent to running:
+  </para>
+  <programlisting>
+# /nix/var/nix/profiles/system-N-link/bin/switch-to-configuration switch
+</programlisting>
+  <para>
+    where <literal>N</literal> is the number of the NixOS system
+    configuration. To get a list of the available configurations, do:
+  </para>
+  <programlisting>
+$ ls -l /nix/var/nix/profiles/system-*-link
+...
+lrwxrwxrwx 1 root root 78 Aug 12 13:54 /nix/var/nix/profiles/system-268-link -&gt; /nix/store/202b...-nixos-13.07pre4932_5a676e4-4be1055
+</programlisting>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/service-mgmt.chapter.xml b/nixos/doc/manual/from_md/administration/service-mgmt.chapter.xml
new file mode 100644
index 00000000000..8b01b8f896a
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/service-mgmt.chapter.xml
@@ -0,0 +1,141 @@
+<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-systemctl">
+  <title>Service Management</title>
+  <para>
+    In NixOS, all system services are started and monitored using the
+    systemd program. systemd is the <quote>init</quote> process of the
+    system (i.e. PID 1), the parent of all other processes. It manages a
+    set of so-called <quote>units</quote>, which can be things like
+    system services (programs), but also mount points, swap files,
+    devices, targets (groups of units) and more. Units can have complex
+    dependencies; for instance, one unit can require that another unit
+    must be successfully started before the first unit can be started.
+    When the system boots, it starts a unit named
+    <literal>default.target</literal>; the dependencies of this unit
+    cause all system services to be started, file systems to be mounted,
+    swap files to be activated, and so on.
+  </para>
+  <section xml:id="sect-nixos-systemd-general">
+    <title>Interacting with a running systemd</title>
+    <para>
+      The command <literal>systemctl</literal> is the main way to
+      interact with <literal>systemd</literal>. The following paragraphs
+      demonstrate ways to interact with any OS running systemd as init
+      system. NixOS is of no exception. The
+      <link linkend="sect-nixos-systemd-nixos">next section </link>
+      explains NixOS specific things worth knowing.
+    </para>
+    <para>
+      Without any arguments, <literal>systemctl</literal> the status of
+      active units:
+    </para>
+    <programlisting>
+$ systemctl
+-.mount          loaded active mounted   /
+swapfile.swap    loaded active active    /swapfile
+sshd.service     loaded active running   SSH Daemon
+graphical.target loaded active active    Graphical Interface
+...
+</programlisting>
+    <para>
+      You can ask for detailed status information about a unit, for
+      instance, the PostgreSQL database service:
+    </para>
+    <programlisting>
+$ systemctl status postgresql.service
+postgresql.service - PostgreSQL Server
+          Loaded: loaded (/nix/store/pn3q73mvh75gsrl8w7fdlfk3fq5qm5mw-unit/postgresql.service)
+          Active: active (running) since Mon, 2013-01-07 15:55:57 CET; 9h ago
+        Main PID: 2390 (postgres)
+          CGroup: name=systemd:/system/postgresql.service
+                  ├─2390 postgres
+                  ├─2418 postgres: writer process
+                  ├─2419 postgres: wal writer process
+                  ├─2420 postgres: autovacuum launcher process
+                  ├─2421 postgres: stats collector process
+                  └─2498 postgres: zabbix zabbix [local] idle
+
+Jan 07 15:55:55 hagbard postgres[2394]: [1-1] LOG:  database system was shut down at 2013-01-07 15:55:05 CET
+Jan 07 15:55:57 hagbard postgres[2390]: [1-1] LOG:  database system is ready to accept connections
+Jan 07 15:55:57 hagbard postgres[2420]: [1-1] LOG:  autovacuum launcher started
+Jan 07 15:55:57 hagbard systemd[1]: Started PostgreSQL Server.
+</programlisting>
+    <para>
+      Note that this shows the status of the unit (active and running),
+      all the processes belonging to the service, as well as the most
+      recent log messages from the service.
+    </para>
+    <para>
+      Units can be stopped, started or restarted:
+    </para>
+    <programlisting>
+# systemctl stop postgresql.service
+# systemctl start postgresql.service
+# systemctl restart postgresql.service
+</programlisting>
+    <para>
+      These operations are synchronous: they wait until the service has
+      finished starting or stopping (or has failed). Starting a unit
+      will cause the dependencies of that unit to be started as well (if
+      necessary).
+    </para>
+  </section>
+  <section xml:id="sect-nixos-systemd-nixos">
+    <title>systemd in NixOS</title>
+    <para>
+      Packages in Nixpkgs sometimes provide systemd units with them,
+      usually in e.g <literal>#pkg-out#/lib/systemd/</literal>. Putting
+      such a package in <literal>environment.systemPackages</literal>
+      doesn't make the service available to users or the system.
+    </para>
+    <para>
+      In order to enable a systemd <emphasis>system</emphasis> service
+      with provided upstream package, use (e.g):
+    </para>
+    <programlisting language="bash">
+systemd.packages = [ pkgs.packagekit ];
+</programlisting>
+    <para>
+      Usually NixOS modules written by the community do the above, plus
+      take care of other details. If a module was written for a service
+      you are interested in, you'd probably need only to use
+      <literal>services.#name#.enable = true;</literal>. These services
+      are defined in Nixpkgs'
+      <link xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/modules">
+      <literal>nixos/modules/</literal> directory </link>. In case the
+      service is simple enough, the above method should work, and start
+      the service on boot.
+    </para>
+    <para>
+      <emphasis>User</emphasis> systemd services on the other hand,
+      should be treated differently. Given a package that has a systemd
+      unit file at <literal>#pkg-out#/lib/systemd/user/</literal>, using
+      <xref linkend="opt-systemd.packages" /> will make you able to
+      start the service via <literal>systemctl --user start</literal>,
+      but it won't start automatically on login. However, You can
+      imperatively enable it by adding the package's attribute to
+      <xref linkend="opt-systemd.packages" /> and then do this (e.g):
+    </para>
+    <programlisting>
+$ mkdir -p ~/.config/systemd/user/default.target.wants
+$ ln -s /run/current-system/sw/lib/systemd/user/syncthing.service ~/.config/systemd/user/default.target.wants/
+$ systemctl --user daemon-reload
+$ systemctl --user enable syncthing.service
+</programlisting>
+    <para>
+      If you are interested in a timer file, use
+      <literal>timers.target.wants</literal> instead of
+      <literal>default.target.wants</literal> in the 1st and 2nd
+      command.
+    </para>
+    <para>
+      Using <literal>systemctl --user enable syncthing.service</literal>
+      instead of the above, will work, but it'll use the absolute path
+      of <literal>syncthing.service</literal> for the symlink, and this
+      path is in <literal>/nix/store/.../lib/systemd/user/</literal>.
+      Hence <link linkend="sec-nix-gc">garbage collection</link> will
+      remove that file and you will wind up with a broken symlink in
+      your systemd configuration, which in turn will not make the
+      service / timer start on login.
+    </para>
+  </section>
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/store-corruption.section.xml b/nixos/doc/manual/from_md/administration/store-corruption.section.xml
new file mode 100644
index 00000000000..9ed572d484d
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/store-corruption.section.xml
@@ -0,0 +1,34 @@
+<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-nix-store-corruption">
+  <title>Nix Store Corruption</title>
+  <para>
+    After a system crash, it’s possible for files in the Nix store to
+    become corrupted. (For instance, the Ext4 file system has the
+    tendency to replace un-synced files with zero bytes.) NixOS tries
+    hard to prevent this from happening: it performs a
+    <literal>sync</literal> before switching to a new configuration, and
+    Nix’s database is fully transactional. If corruption still occurs,
+    you may be able to fix it automatically.
+  </para>
+  <para>
+    If the corruption is in a path in the closure of the NixOS system
+    configuration, you can fix it by doing
+  </para>
+  <programlisting>
+# nixos-rebuild switch --repair
+</programlisting>
+  <para>
+    This will cause Nix to check every path in the closure, and if its
+    cryptographic hash differs from the hash recorded in Nix’s database,
+    the path is rebuilt or redownloaded.
+  </para>
+  <para>
+    You can also scan the entire Nix store for corrupt paths:
+  </para>
+  <programlisting>
+# nix-store --verify --check-contents --repair
+</programlisting>
+  <para>
+    Any corrupt paths will be redownloaded if they’re available in a
+    binary cache; otherwise, they cannot be repaired.
+  </para>
+</section>
diff --git a/nixos/doc/manual/from_md/administration/troubleshooting.chapter.xml b/nixos/doc/manual/from_md/administration/troubleshooting.chapter.xml
new file mode 100644
index 00000000000..8bbb8a1fe72
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/troubleshooting.chapter.xml
@@ -0,0 +1,12 @@
+<chapter xmlns="http://docbook.org/ns/docbook"  xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude" xml:id="ch-troubleshooting">
+  <title>Troubleshooting</title>
+  <para>
+    This chapter describes solutions to common problems you might
+    encounter when you manage your NixOS system.
+  </para>
+  <xi:include href="boot-problems.section.xml" />
+  <xi:include href="maintenance-mode.section.xml" />
+  <xi:include href="rollback.section.xml" />
+  <xi:include href="store-corruption.section.xml" />
+  <xi:include href="network-problems.section.xml" />
+</chapter>
diff --git a/nixos/doc/manual/from_md/administration/user-sessions.chapter.xml b/nixos/doc/manual/from_md/administration/user-sessions.chapter.xml
new file mode 100644
index 00000000000..e8c64f153fc
--- /dev/null
+++ b/nixos/doc/manual/from_md/administration/user-sessions.chapter.xml
@@ -0,0 +1,46 @@
+<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-user-sessions">
+  <title>User Sessions</title>
+  <para>
+    Systemd keeps track of all users who are logged into the system
+    (e.g. on a virtual console or remotely via SSH). The command
+    <literal>loginctl</literal> allows querying and manipulating user
+    sessions. For instance, to list all user sessions:
+  </para>
+  <programlisting>
+$ loginctl
+   SESSION        UID USER             SEAT
+        c1        500 eelco            seat0
+        c3          0 root             seat0
+        c4        500 alice
+</programlisting>
+  <para>
+    This shows that two users are logged in locally, while another is
+    logged in remotely. (<quote>Seats</quote> are essentially the
+    combinations of displays and input devices attached to the system;
+    usually, there is only one seat.) To get information about a
+    session:
+  </para>
+  <programlisting>
+$ loginctl session-status c3
+c3 - root (0)
+           Since: Tue, 2013-01-08 01:17:56 CET; 4min 42s ago
+          Leader: 2536 (login)
+            Seat: seat0; vc3
+             TTY: /dev/tty3
+         Service: login; type tty; class user
+           State: online
+          CGroup: name=systemd:/user/root/c3
+                  ├─ 2536 /nix/store/10mn4xip9n7y9bxqwnsx7xwx2v2g34xn-shadow-4.1.5.1/bin/login --
+                  ├─10339 -bash
+                  └─10355 w3m nixos.org
+</programlisting>
+  <para>
+    This shows that the user is logged in on virtual console 3. It also
+    lists the processes belonging to this session. Since systemd keeps
+    track of this, you can terminate a session in a way that ensures
+    that all the session’s processes are gone:
+  </para>
+  <programlisting>
+# loginctl terminate-session c3
+</programlisting>
+</chapter>