From 3aab503321d05acad34086fc4c7b2d1b73a2d491 Mon Sep 17 00:00:00 2001 From: "Andrew J. Hesford" Date: Sun, 16 May 2021 08:32:50 -0400 Subject: [PATCH] mkinitcpio: update to 30, add mkinitcpio-zfs subpackage ZFS hooks taken from [1] and slightly modified: 1. If present, /etc/hostid is included in the image 2. Pool imports are read/write by default 3. Dracut-style root=zfs: arguments are recognized [1] https://github.com/archzfs/archzfs/tree/master/src/zfs-utils --- srcpkgs/{mkinitcpio-udev => mkinitcpio-zfs} | 0 srcpkgs/mkinitcpio/files/kernel-hook-postinst | 1 + srcpkgs/mkinitcpio/files/zfs_hook | 218 ++++++++++++++++++ srcpkgs/mkinitcpio/files/zfs_install | 102 ++++++++ srcpkgs/mkinitcpio/patches/gzip-default.patch | 39 ++++ srcpkgs/mkinitcpio/template | 41 ++-- 6 files changed, 387 insertions(+), 14 deletions(-) rename srcpkgs/{mkinitcpio-udev => mkinitcpio-zfs} (100%) create mode 100644 srcpkgs/mkinitcpio/files/zfs_hook create mode 100644 srcpkgs/mkinitcpio/files/zfs_install create mode 100644 srcpkgs/mkinitcpio/patches/gzip-default.patch diff --git a/srcpkgs/mkinitcpio-udev b/srcpkgs/mkinitcpio-zfs similarity index 100% rename from srcpkgs/mkinitcpio-udev rename to srcpkgs/mkinitcpio-zfs diff --git a/srcpkgs/mkinitcpio/files/kernel-hook-postinst b/srcpkgs/mkinitcpio/files/kernel-hook-postinst index c6f2ab81264f..08bf4c0c9514 100644 --- a/srcpkgs/mkinitcpio/files/kernel-hook-postinst +++ b/srcpkgs/mkinitcpio/files/kernel-hook-postinst @@ -11,5 +11,6 @@ if [ ! -x usr/bin/mkinitcpio ]; then exit 0 fi +umask 0077 usr/bin/mkinitcpio -g boot/initramfs-${VERSION}.img -k ${VERSION} exit $? diff --git a/srcpkgs/mkinitcpio/files/zfs_hook b/srcpkgs/mkinitcpio/files/zfs_hook new file mode 100644 index 000000000000..5d296293c163 --- /dev/null +++ b/srcpkgs/mkinitcpio/files/zfs_hook @@ -0,0 +1,218 @@ +# +# WARNING: This script is parsed by ash in busybox at boot time, not bash! +# http://linux.die.net/man/1/ash +# https://wiki.ubuntu.com/DashAsBinSh +# http://www.jpsdomain.org/public/2008-JP_bash_vs_dash.pdf +# +ZPOOL_FORCE="" +ZPOOL_IMPORT_FLAGS="" +ZFS_BOOT_ONLY="" + +zfs_get_bootfs () { + for zfs_dataset in $(zpool list -H -o bootfs); do + case ${zfs_dataset} in + "" | "-") + # skip this line/dataset + ;; + "no pools available") + return 1 + ;; + *) + ZFS_DATASET=${zfs_dataset} + return 0 + ;; + esac + done + return 1 +} + +zfs_decrypt_fs() { + dataset=$1 + + # Make sure dataset is encrypted; get fails if ZFS does not support encryption + encryption="$(zfs get -H -o value encryption "${dataset}" 2>/dev/null)" || return 0 + [ "${encryption}" != "off" ] || return 0 + + # Make sure the dataset is locked + keystatus="$(zfs get -H -o value keystatus "${dataset}")" || return 0 + [ "${keystatus}" != "available" ] || return 0 + + # Make sure the encryptionroot is sensible + encryptionroot="$(zfs get -H -o value encryptionroot "${dataset}")" || return 0 + [ "${encryptionroot}" != "-" ] || return 0 + + # Export encryption root to be used by other hooks (SSH) + echo "${encryptionroot}" > /.encryptionroot + + # If key location is a file, determine if it can by overridden by prompt + prompt_override="" + if keylocation="$(zfs get -H -o value keylocation "${dataset}")"; then + if [ "${keylocation}" != "prompt" ]; then + if keyformat="$(zfs get -H -o value keyformat "${dataset}")"; then + [ "${keyformat}" = "passphrase" ] && prompt_override="yes" + fi + fi + fi + + # Loop until key is loaded here or by another vector (SSH, for instance) + while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ]; do + # Try the default loading mechanism + zfs load-key "${encryptionroot}" && break + + # Load failed, try a prompt if the failure was not a prompt + if [ -n "${prompt_override}" ]; then + echo "Unable to load key ${keylocation}; please type the passphrase" + echo "To retry the file, interrupt now or repeatedly input a wrong passphrase" + zfs load-key -L prompt "${encryptionroot}" && break + fi + + # Throttle retry attempts + sleep 2 + done + + if [ -f /.encryptionroot ]; then + rm /.encryptionroot + fi +} + +zfs_mount_handler () { + if [ "${ZFS_DATASET}" = "bootfs" ] ; then + if ! zfs_get_bootfs ; then + # Lets import everything and try again + zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE} + if ! zfs_get_bootfs ; then + err "ZFS: Cannot find bootfs." + exit 1 + fi + fi + fi + + local pool="${ZFS_DATASET%%/*}" + local rwopt_exp="${rwopt:-rw}" + + if ! zpool list -H "${pool}" > /dev/null 2>&1; then + if [ ! "${rwopt_exp}" = "rw" ]; then + msg "ZFS: Importing pool ${pool} readonly." + ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on" + else + msg "ZFS: Importing pool ${pool}." + fi + + if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then + err "ZFS: Unable to import pool ${pool}." + exit 1 + fi + fi + + local node="$1" + local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}") + local tab_file="${node}/etc/fstab" + local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})" + + # Mount the root, and any child datasets + for dataset in ${zfs_datasets}; do + mountpoint=$(zfs get -H -o value mountpoint "${dataset}") + canmount=$(zfs get -H -o value canmount "${dataset}") + # skip dataset + [ ${dataset} != "${ZFS_DATASET}" -a \( ${canmount} = "off" -o ${canmount} = "noauto" -o ${mountpoint} = "none" \) ] && continue + if [ ${mountpoint} = "legacy" ]; then + if [ -f "${tab_file}" ]; then + if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then + opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}") + mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}") + zfs_decrypt_fs "${dataset}" + mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}" + fi + fi + else + zfs_decrypt_fs "${dataset}" + mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}" + fi + done +} + +set_flags() { + # Force import the pools, useful if the pool has not properly been exported using 'zpool export ' + [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f" + + # Disable late hook, useful if we want to use zfs-import-cache.service instead + [ ! "${zfs_boot_only}" = "" ] && ZFS_BOOT_ONLY="1" + + # Add import directory to import command flags + [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}" + [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org" +} + +run_hook() { + set_flags + + # Wait 15 seconds for ZFS devices to show up + [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}" + + case ${root} in + # root=zfs + "zfs") + mount_handler="zfs_mount_handler" + ;; + # root=ZFS=... syntax (grub) + "ZFS="*) + mount_handler="zfs_mount_handler" + ZFS_DATASET="${root#*[=]}" + ;; + # root=zfs:... syntax (dracut) + "zfs:"*) + mount_handler="zfs_mount_handler" + ZFS_DATASET="${root#*[:]}" + ;; + esac + + case ${zfs} in + "") + # skip this line/dataset + ;; + auto|bootfs) + ZFS_DATASET="bootfs" + mount_handler="zfs_mount_handler" + local pool="[a-zA-Z][^ ]*" + ;; + *) + ZFS_DATASET="${zfs}" + mount_handler="zfs_mount_handler" + local pool="${ZFS_DATASET%%/*}" + ;; + esac + + # Allow at least n seconds for zfs device to show up. Especially + # when using zfs_import_dir instead of zpool.cache, the listing of + # available pools can be slow, so this loop must be top-tested to + # ensure we do one 'zpool import' pass after the timer has expired. + sleep ${ZFS_WAIT} & pid=$! + local break_after=0 + while :; do + kill -0 $pid > /dev/null 2>&1 || break_after=1 + if [ -c "/dev/zfs" ]; then + zpool import ${ZPOOL_IMPORT_FLAGS} | awk " + BEGIN { pool_found=0; online=0; unavail=0 } + /^ ${pool} .*/ { pool_found=1 } + /^\$/ { pool_found=0 } + /UNAVAIL/ { if (pool_found == 1) { unavail=1 } } + /ONLINE/ { if (pool_found == 1) { online=1 } } + END { if (online == 1 && unavail != 1) + { exit 0 } + else + { exit 1 } + }" && break + fi + [ $break_after == 1 ] && break + sleep 1 + done + kill $pid > /dev/null 2>&1 +} + +run_latehook () { + set_flags + # only run zpool import, if flags were set (cache file found / zfs_import_dir specified) and zfs_boot_only is not set + [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && [ "${ZFS_BOOT_ONLY}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE} +} + +# vim:set ts=4 sw=4 ft=sh et: diff --git a/srcpkgs/mkinitcpio/files/zfs_install b/srcpkgs/mkinitcpio/files/zfs_install new file mode 100644 index 000000000000..41f2ad279b00 --- /dev/null +++ b/srcpkgs/mkinitcpio/files/zfs_install @@ -0,0 +1,102 @@ +#!/bin/bash + +build() { + map add_module \ + zavl \ + znvpair \ + zunicode \ + zcommon \ + zfs \ + spl + + map add_binary \ + fsck.zfs \ + mount.zfs \ + seq \ + zdb \ + zed \ + zfs \ + zhack \ + zinject \ + zpool \ + zstreamdump \ + /usr/lib/udev/vdev_id \ + /usr/lib/udev/zvol_id \ + findmnt + + map add_file \ + /usr/lib/udev/rules.d/60-zvol.rules \ + /usr/lib/udev/rules.d/69-vdev.rules \ + /usr/lib/udev/rules.d/90-zfs.rules \ + /usr/lib/libgcc_s.so.1 + + map add_dir \ + /etc/zfs/zed.d + + add_runscript + + # allow mount(8) to "autodetect" ZFS + echo 'zfs' >>"${BUILDROOT}/etc/filesystems" + + [[ -f /etc/hostid ]] && add_file "/etc/hostid" + [[ -f /etc/zfs/zpool.cache ]] && cp "/etc/zfs/zpool.cache" "${BUILDROOT}/etc/zfs/zpool.cache.org" + [[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf" +} + +help() { + cat< + + To force importing of a ZFS pool: + + zfs_force=1 + + If set to 1, this will use "zpool import -f" when attempting to import + pools. + + To change the seconds of time to wait for ZFS devices to show up at boot: + + zfs_wait=30 + + To search for devices in a directory other than "/dev": + + zfs_import_dir=/dev/disk/by-uuid + or + zfs_import_dir=/dev/disk/by-partuuid + or + zfs_import_dir=/dev/disk/by-path + etc. + + Following initcpio convention, the 'rw' option must be specified to load the + pool as read/write. Pools are loaded as read only by default. + +Examples: + + To use bootfs on your pool, use + + zfs=bootfs rw + + This will setup your root using tank/root zfs pool. + + zfs=tank/root rw + +If you want to set properties for zfs-on-linux module, you should add them to +/etc/modprobe.d/zfs.conf and then rebuild initcpio. + +HELPEOF +} + +# vim: set ts=4 sw=4 ft=sh et: diff --git a/srcpkgs/mkinitcpio/patches/gzip-default.patch b/srcpkgs/mkinitcpio/patches/gzip-default.patch new file mode 100644 index 000000000000..7d6365a38f81 --- /dev/null +++ b/srcpkgs/mkinitcpio/patches/gzip-default.patch @@ -0,0 +1,39 @@ +Because not all Void kernels may support zstd, change the default initramfs +compression to gzip. + +diff -ur a/man/mkinitcpio.conf.5.txt b/man/mkinitcpio.conf.5.txt +--- a/man/mkinitcpio.conf.5.txt 2021-02-16 21:37:31.000000000 -0500 ++++ b/man/mkinitcpio.conf.5.txt 2021-05-17 09:33:32.418504652 -0400 +@@ -55,7 +55,7 @@ + Defines a program to filter the generated image through. The kernel + understands the compression formats yielded by the *zstd*, *gzip*, *bzip2*, + *lz4*, *lzop*, *lzma*, and *xz* compressors. If unspecified, this setting +- defaults to *zstd* compression. In order to create an uncompressed image, ++ defaults to *gzip* compression. In order to create an uncompressed image, + define this variable as *cat*. + + + It's not hard to realize that a filter such as a *tac* or *rev* will cause +diff -ur a/mkinitcpio b/mkinitcpio +--- a/mkinitcpio 2021-02-16 21:37:31.000000000 -0500 ++++ b/mkinitcpio 2021-05-17 09:34:15.970588222 -0400 +@@ -508,7 +508,7 @@ + die 'Unable to write to %s' "$_optgenimg" + fi + +- _optcompress=${_optcompress:-${COMPRESSION:-zstd}} ++ _optcompress=${_optcompress:-${COMPRESSION:-gzip}} + if ! type -P "$_optcompress" >/dev/null; then + warning "Unable to locate compression method: %s" "$_optcompress" + _optcompress=cat +diff -ur a/mkinitcpio.conf b/mkinitcpio.conf +--- a/mkinitcpio.conf 2021-02-16 21:37:31.000000000 -0500 ++++ b/mkinitcpio.conf 2021-05-17 09:34:24.752605714 -0400 +@@ -52,7 +52,7 @@ + HOOKS=(base udev autodetect modconf block filesystems keyboard fsck) + + # COMPRESSION +-# Use this to compress the initramfs image. By default, zstd compression ++# Use this to compress the initramfs image. By default, gzip compression + # is used. Use 'cat' to create an uncompressed image. + #COMPRESSION="zstd" + #COMPRESSION="gzip" diff --git a/srcpkgs/mkinitcpio/template b/srcpkgs/mkinitcpio/template index 67b3cff04f10..04277348348d 100644 --- a/srcpkgs/mkinitcpio/template +++ b/srcpkgs/mkinitcpio/template @@ -1,41 +1,45 @@ # Template file for 'mkinitcpio' pkgname=mkinitcpio -version=29 +version=30 revision=1 build_style=gnu-makefile hostmakedepends="asciidoc" depends="busybox-static bsdtar bash" +checkdepends="busybox-static" short_desc="Next generation of initramfs creation" maintainer="Andrea Brancaleoni " license="GPL-2.0-only" -homepage="https://wiki.archlinux.org/index.php/Mkinitcpio" +homepage="https://git.archlinux.org/mkinitcpio.git" distfiles="https://sources.archlinux.org/other/${pkgname}/${pkgname}-${version}.tar.gz" -checksum=0239ba7ae91d652472819457a5dd812c574ba37c3c3d9161e7742a63b85076c2 - +checksum=c7725035a06d2ab6ef6e97601b69859d6061aec95c4551e2a1ad2e27d307258f conf_files="/etc/mkinitcpio.conf" - patch_args="-Np1" +replaces="mkinitcpio-udev>=0" + +pre_check() { + sed -i -e '1s,/lib/initcpio/busybox,/usr/bin/busybox.static,' test/* +} post_install() { + # Install udev hooks + vinstall ${FILESDIR}/udev_hook 644 usr/lib/initcpio/hooks udev + vinstall ${FILESDIR}/udev_install 644 usr/lib/initcpio/install udev + + # Remove unneeded systemd bits rm -rf ${DESTDIR}/usr/lib/kernel rm -rf ${DESTDIR}/usr/lib/systemd rm -rf ${DESTDIR}/usr/lib/initcpio/install/sd* rm -rf ${DESTDIR}/usr/lib/tmpfiles.d + + # Remove unneeded pacman hooks + rm -rf ${DESTDIR}/usr/share/libalpm + ln -s /usr/bin/busybox.static $DESTDIR/usr/lib/initcpio/busybox vinstall ${FILESDIR}/kernel-hook-postinst 755 etc/kernel.d/post-install 20-mkinitcpio vinstall ${FILESDIR}/kernel-hook-postrm 755 etc/kernel.d/post-remove 20-mkinitcpio } -mkinitcpio-udev_package() { - depends="${sourcepkg}>=${version}_${revision} eudev" - short_desc+=" - udev support" - pkg_install() { - vinstall ${FILESDIR}/udev_hook 644 usr/lib/initcpio/hooks udev - vinstall ${FILESDIR}/udev_install 644 usr/lib/initcpio/install udev - } -} - mkinitcpio-lvm2_package() { depends="${sourcepkg}>=${version}_${revision} lvm2 thin-provisioning-tools" short_desc+=" - lvm2 support" @@ -75,3 +79,12 @@ mkinitcpio-xbps_package() { vinstall ${FILESDIR}/xbps_install 644 usr/lib/initcpio/install xbps } } + +mkinitcpio-zfs_package() { + depends="${sourcepkg}>=${version}_${revision} zfs" + short_desc+=" - ZFS support" + pkg_install() { + vinstall ${FILESDIR}/zfs_hook 644 usr/lib/initcpio/hooks zfs + vinstall ${FILESDIR}/zfs_install 644 usr/lib/initcpio/install zfs + } +}