gparted/gparted.in

254 lines
7.4 KiB
Plaintext
Raw Permalink Normal View History

#!/bin/sh
# Name: gparted
# Purpose: Perform appropriate startup of GParted executable gpartedbin.
#
# The purpose of these startup methods is to prevent
# devices from being automounted, and to ensure only one
# instance of GParted is running. File system problems can
# occur if devices are mounted prior to the completion of
# GParted's operations, or if multiple partition editing
# tools are in use concurrently.
#
# Copyright (C) 2008, 2009, 2010, 2013, 2015 Curtis Gedak
#
# This file is part of GParted.
#
# GParted is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GParted is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GParted. If not, see <http://www.gnu.org/licenses/>.
#
#
# Only permit one instance of GParted to execute at a time
#
if pidof gpartedbin 1> /dev/null; then
echo "The process gpartedbin is already running."
echo "Only one gpartedbin process is permitted."
exit 1
fi
#
# Define base command for executing GParted
#
BASE_CMD="@libexecdir@/gpartedbin $*"
#
# For non-root users try to get authorisation to run GParted as root.
#
if test "x`id -u`" != "x0"; then
#
# If there is no configured SU program run gpartedbin as
# non-root to display the graphical error about needing root
# privileges.
#
if test "x@gksuprog@" = "x"; then
echo "Root privileges are required for running gparted."
$BASE_CMD
exit 1
fi
#
# Interim workaround to allow GParted run by root access to the
# X11 display server under Wayland. If configured with
# './configure --enable-xhost-root', the xhost command is
# available and root has not been granted access to the X11
# display via xhost, then grant access.
#
ENABLE_XHOST_ROOT=@enable_xhost_root@
GRANTED_XHOST_ROOT=no
if test "x$ENABLE_XHOST_ROOT" = 'xyes' && xhost 1> /dev/null 2>&1; then
if ! xhost | grep -qi 'SI:localuser:root$'; then
xhost +SI:localuser:root
GRANTED_XHOST_ROOT=yes
fi
fi
#
# Run gparted as root.
#
@gksuprog@ '@bindir@/gparted' "$@"
status=$?
#
# Revoke root access to the X11 display, only if we granted it.
#
if test "x$GRANTED_XHOST_ROOT" = 'xyes'; then
xhost -SI:localuser:root
fi
exit $status
fi
#
# Search PATH to determine if systemctl program can be found
# and if appropriate daemon is running.
#
HAVE_SYSTEMCTL=no
for k in '' `echo "$PATH" | sed 's,:, ,g'`; do
if test -x "$k/systemctl"; then
if pidof systemd 1> /dev/null; then
HAVE_SYSTEMCTL=yes
break
fi
fi
done
#
# Check if udisks2-inhibit exists in a known location
# and if appropriate daemon is running.
#
HAVE_UDISKS2_INHIBIT=no
for k in /usr/libexec/udisks2/udisks2-inhibit \
/usr/lib/udisks2/udisks2-inhibit; do
if test -x $k; then
if pidof udisksd 1> /dev/null; then
HAVE_UDISKS2_INHIBIT=yes
UDISKS2_INHIBIT_BIN=$k
break
fi
fi
done
#
# Search PATH to determine if udisks program can be found
# and if appropriate daemon is running.
#
HAVE_UDISKS=no
for k in '' `echo "$PATH" | sed 's,:, ,g'`; do
if test -x "$k/udisks"; then
if pidof udisks-daemon 1> /dev/null; then
HAVE_UDISKS=yes
break
fi
fi
done
#
# Search PATH to determine if hal-lock program can be found
# and if appropriate daemon is running.
#
HAVE_HAL_LOCK=no
for k in '' `echo "$PATH" | sed 's,:, ,g'`; do
if test -x "$k/hal-lock"; then
if pidof hald 1> /dev/null; then
HAVE_HAL_LOCK=yes
break
fi
fi
done
#
# Use systemctl to prevent automount by masking currently unmasked mount points
#
MOUNTLIST=''
if test "x$HAVE_SYSTEMCTL" = "xyes"; then
Exclude snap /dev/loop file system image mounts (#129) On Ubuntu the gparted shell wrapper still attempts to mask lots of non-block device based file systems. Remove the --quiet option from the systemctl --runtime mask command to see: $ gparted Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-66.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10583.mount -> /dev/null. Created symlink /run/systemd/system/boot-efi.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1514.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10577.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1944.mount -> /dev/null. Created symlink /run/systemd/system/run-user-1000-doc.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1506.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-128.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-518.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-145.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1932.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-467.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-60.mount -> /dev/null. Created symlink /run/systemd/system/-.mount -> /dev/null. GParted 1.0.0 configuration --enable-libparted-dmraid --enable-online-resize libparted 3.3 The gparted shell wrapper is currently looking for non-masked Systemd mount units where the 'What' property starts "/dev/". However Ubuntu also uses snap packages which are mounted file images via loop devices: $ grep '^/dev/' /proc/mounts | sort /dev/fuse /run/user/1000/doc fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/loop0 /snap/core/10583 squashfs ro,nodev,relatime 0 0 /dev/loop10 /snap/snap-store/518 squashfs ro,nodev,relatime 0 0 /dev/loop11 /snap/snap-store/467 squashfs ro,nodev,relatime 0 0 /dev/loop12 /snap/gtk-common-themes/1506 squashfs ro,nodev,relatime 0 0 /dev/loop1 /snap/core/10577 squashfs ro,nodev,relatime 0 0 /dev/loop3 /snap/core18/1944 squashfs ro,nodev,relatime 0 0 /dev/loop4 /snap/core18/1932 squashfs ro,nodev,relatime 0 0 /dev/loop5 /snap/gnome-3-34-1804/66 squashfs ro,nodev,relatime 0 0 /dev/loop6 /snap/gnome-3-28-1804/128 squashfs ro,nodev,relatime 0 0 /dev/loop7 /snap/gnome-3-34-1804/60 squashfs ro,nodev,relatime 0 0 /dev/loop8 /snap/gnome-3-28-1804/145 squashfs ro,nodev,relatime 0 0 /dev/loop9 /snap/gtk-common-themes/1514 squashfs ro,nodev,relatime 0 0 /dev/sda1 /boot/efi vfat rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 /dev/sda5 / ext4 rw,relatime,errors=remount-ro 0 0 Fix by excluding: 1. Device name "/dev/fuse" because it's a character not a block device and the mount point is associated with snap, 2. Device names starting "/dev/loop" and where the mount point starts "/snap/" [1]. This is to allow for use of GParted with explicitly named loop devices. [1] The system /snap directory https://snapcraft.io/docs/system-snap-directory Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-10 09:42:54 -07:00
MOUNTLIST=`systemctl show --all --property=Where,What,Id,LoadState '*.mount' | \
Only mask Systemd mounts on block devices (#129) The gparted shell wrapper masks Systemd mount units to prevent it automounting file systems while GParted is running [1], excluding virtual file system which GParted isn't interested in [2]. The problem is that there are a lot of virtual file systems and they have changed between Fedora 19 and 33 so now the exclusion list is out of date. Run GParted on Fedora 33 and query the mount units while it is running: $ systemctl list-units -t mount --full --all UNIT LOAD ACTIVE SUB DESCRIPTION -.mount loaded active mounted Root Mount * boot.mount masked active mounted /boot dev-hugepages.mount loaded active mounted Huge Pages File System dev-mqueue.mount loaded active mounted POSIX Message Queue File System * home.mount masked active mounted /home * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount proc-sys-fs-binfmt_misc.mount loaded inactive dead Arbitrary Executable File Formats File System run-user-1000-gvfs.mount loaded active mounted /run/user/1000/gvfs * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 sys-fs-fuse-connections.mount loaded active mounted FUSE Control File System sys-kernel-config.mount loaded active mounted Kernel Configuration File System sys-kernel-debug.mount loaded active mounted Kernel Debug File System * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * sysroot.mount masked inactive dead sysroot.mount * tmp.mount masked active mounted /tmp * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs * var.mount masked inactive dead var.mount LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 19 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. So it masked these virtual file systems which didn't need to be masked: * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs Lines from /proc/partitions for some of these virtual file systems: $ egrep '/run/user|/sys/kernel/tracing|/var/lib/nfs/rpc_pipefs' /proc/mounts tmpfs /run/user/42 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=42,gid=42,inode64 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=1000,gid=1000,inode64 0 0 none /sys/kernel/tracing tracefs rw,seclabel,relatime 0 0 sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 And for contrast the lines from /proc/mounts for disk backed file systems: $ egrep '^/dev/' /proc/mounts /dev/sda1 /boot ext4 rw,seclabel,relatime 0 0 /dev/sda2 / btrfs rw,seclabel,relatime,space_cache,subvolid=258,subvol=/root 0 0 /dev/sda2 /home btrfs rw,seclabel,relatime,space_cache,subvolid=256,subvol=/home 0 0 Going back to first principles GParted cares that Systemd doesn't automount file systems on block devices. So instead only mask mount units which are on block devices. Where the 'What' property starts "/dev/". Systemd maintains hundreds of properties for each unit. $ systemctl show boot.mount | wc -l 221 The properties of interest for all mount units can be queries like this: $ systemctl show --all --property=What,Id,LoadState '*.mount' ... What=sunrpc Id=var-lib-nfs-rpc_pipefs.mount LoadState=masked What=/dev/sda1 Id=boot.mount LoadState=masked ... [1] 4c109df9b59e55699bd42023cf4007ee359793e9 Use systemctl runtime mask to prevent automounting (#701676) [2] 43de8e326a9f6f099e5274619f16039bdc20c1a4 Do not mask virtual file systems when using systemctl (#708378) Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-04 16:00:54 -07:00
awk '
function clear_properties() {
Exclude snap /dev/loop file system image mounts (#129) On Ubuntu the gparted shell wrapper still attempts to mask lots of non-block device based file systems. Remove the --quiet option from the systemctl --runtime mask command to see: $ gparted Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-66.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10583.mount -> /dev/null. Created symlink /run/systemd/system/boot-efi.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1514.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10577.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1944.mount -> /dev/null. Created symlink /run/systemd/system/run-user-1000-doc.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1506.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-128.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-518.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-145.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1932.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-467.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-60.mount -> /dev/null. Created symlink /run/systemd/system/-.mount -> /dev/null. GParted 1.0.0 configuration --enable-libparted-dmraid --enable-online-resize libparted 3.3 The gparted shell wrapper is currently looking for non-masked Systemd mount units where the 'What' property starts "/dev/". However Ubuntu also uses snap packages which are mounted file images via loop devices: $ grep '^/dev/' /proc/mounts | sort /dev/fuse /run/user/1000/doc fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/loop0 /snap/core/10583 squashfs ro,nodev,relatime 0 0 /dev/loop10 /snap/snap-store/518 squashfs ro,nodev,relatime 0 0 /dev/loop11 /snap/snap-store/467 squashfs ro,nodev,relatime 0 0 /dev/loop12 /snap/gtk-common-themes/1506 squashfs ro,nodev,relatime 0 0 /dev/loop1 /snap/core/10577 squashfs ro,nodev,relatime 0 0 /dev/loop3 /snap/core18/1944 squashfs ro,nodev,relatime 0 0 /dev/loop4 /snap/core18/1932 squashfs ro,nodev,relatime 0 0 /dev/loop5 /snap/gnome-3-34-1804/66 squashfs ro,nodev,relatime 0 0 /dev/loop6 /snap/gnome-3-28-1804/128 squashfs ro,nodev,relatime 0 0 /dev/loop7 /snap/gnome-3-34-1804/60 squashfs ro,nodev,relatime 0 0 /dev/loop8 /snap/gnome-3-28-1804/145 squashfs ro,nodev,relatime 0 0 /dev/loop9 /snap/gtk-common-themes/1514 squashfs ro,nodev,relatime 0 0 /dev/sda1 /boot/efi vfat rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 /dev/sda5 / ext4 rw,relatime,errors=remount-ro 0 0 Fix by excluding: 1. Device name "/dev/fuse" because it's a character not a block device and the mount point is associated with snap, 2. Device names starting "/dev/loop" and where the mount point starts "/snap/" [1]. This is to allow for use of GParted with explicitly named loop devices. [1] The system /snap directory https://snapcraft.io/docs/system-snap-directory Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-10 09:42:54 -07:00
where = ""; what = ""; id = ""; loadstate = ""
Only mask Systemd mounts on block devices (#129) The gparted shell wrapper masks Systemd mount units to prevent it automounting file systems while GParted is running [1], excluding virtual file system which GParted isn't interested in [2]. The problem is that there are a lot of virtual file systems and they have changed between Fedora 19 and 33 so now the exclusion list is out of date. Run GParted on Fedora 33 and query the mount units while it is running: $ systemctl list-units -t mount --full --all UNIT LOAD ACTIVE SUB DESCRIPTION -.mount loaded active mounted Root Mount * boot.mount masked active mounted /boot dev-hugepages.mount loaded active mounted Huge Pages File System dev-mqueue.mount loaded active mounted POSIX Message Queue File System * home.mount masked active mounted /home * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount proc-sys-fs-binfmt_misc.mount loaded inactive dead Arbitrary Executable File Formats File System run-user-1000-gvfs.mount loaded active mounted /run/user/1000/gvfs * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 sys-fs-fuse-connections.mount loaded active mounted FUSE Control File System sys-kernel-config.mount loaded active mounted Kernel Configuration File System sys-kernel-debug.mount loaded active mounted Kernel Debug File System * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * sysroot.mount masked inactive dead sysroot.mount * tmp.mount masked active mounted /tmp * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs * var.mount masked inactive dead var.mount LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 19 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. So it masked these virtual file systems which didn't need to be masked: * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs Lines from /proc/partitions for some of these virtual file systems: $ egrep '/run/user|/sys/kernel/tracing|/var/lib/nfs/rpc_pipefs' /proc/mounts tmpfs /run/user/42 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=42,gid=42,inode64 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=1000,gid=1000,inode64 0 0 none /sys/kernel/tracing tracefs rw,seclabel,relatime 0 0 sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 And for contrast the lines from /proc/mounts for disk backed file systems: $ egrep '^/dev/' /proc/mounts /dev/sda1 /boot ext4 rw,seclabel,relatime 0 0 /dev/sda2 / btrfs rw,seclabel,relatime,space_cache,subvolid=258,subvol=/root 0 0 /dev/sda2 /home btrfs rw,seclabel,relatime,space_cache,subvolid=256,subvol=/home 0 0 Going back to first principles GParted cares that Systemd doesn't automount file systems on block devices. So instead only mask mount units which are on block devices. Where the 'What' property starts "/dev/". Systemd maintains hundreds of properties for each unit. $ systemctl show boot.mount | wc -l 221 The properties of interest for all mount units can be queries like this: $ systemctl show --all --property=What,Id,LoadState '*.mount' ... What=sunrpc Id=var-lib-nfs-rpc_pipefs.mount LoadState=masked What=/dev/sda1 Id=boot.mount LoadState=masked ... [1] 4c109df9b59e55699bd42023cf4007ee359793e9 Use systemctl runtime mask to prevent automounting (#701676) [2] 43de8e326a9f6f099e5274619f16039bdc20c1a4 Do not mask virtual file systems when using systemctl (#708378) Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-04 16:00:54 -07:00
}
function process_unit() {
Exclude snap /dev/loop file system image mounts (#129) On Ubuntu the gparted shell wrapper still attempts to mask lots of non-block device based file systems. Remove the --quiet option from the systemctl --runtime mask command to see: $ gparted Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-66.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10583.mount -> /dev/null. Created symlink /run/systemd/system/boot-efi.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1514.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10577.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1944.mount -> /dev/null. Created symlink /run/systemd/system/run-user-1000-doc.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1506.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-128.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-518.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-145.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1932.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-467.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-60.mount -> /dev/null. Created symlink /run/systemd/system/-.mount -> /dev/null. GParted 1.0.0 configuration --enable-libparted-dmraid --enable-online-resize libparted 3.3 The gparted shell wrapper is currently looking for non-masked Systemd mount units where the 'What' property starts "/dev/". However Ubuntu also uses snap packages which are mounted file images via loop devices: $ grep '^/dev/' /proc/mounts | sort /dev/fuse /run/user/1000/doc fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/loop0 /snap/core/10583 squashfs ro,nodev,relatime 0 0 /dev/loop10 /snap/snap-store/518 squashfs ro,nodev,relatime 0 0 /dev/loop11 /snap/snap-store/467 squashfs ro,nodev,relatime 0 0 /dev/loop12 /snap/gtk-common-themes/1506 squashfs ro,nodev,relatime 0 0 /dev/loop1 /snap/core/10577 squashfs ro,nodev,relatime 0 0 /dev/loop3 /snap/core18/1944 squashfs ro,nodev,relatime 0 0 /dev/loop4 /snap/core18/1932 squashfs ro,nodev,relatime 0 0 /dev/loop5 /snap/gnome-3-34-1804/66 squashfs ro,nodev,relatime 0 0 /dev/loop6 /snap/gnome-3-28-1804/128 squashfs ro,nodev,relatime 0 0 /dev/loop7 /snap/gnome-3-34-1804/60 squashfs ro,nodev,relatime 0 0 /dev/loop8 /snap/gnome-3-28-1804/145 squashfs ro,nodev,relatime 0 0 /dev/loop9 /snap/gtk-common-themes/1514 squashfs ro,nodev,relatime 0 0 /dev/sda1 /boot/efi vfat rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 /dev/sda5 / ext4 rw,relatime,errors=remount-ro 0 0 Fix by excluding: 1. Device name "/dev/fuse" because it's a character not a block device and the mount point is associated with snap, 2. Device names starting "/dev/loop" and where the mount point starts "/snap/" [1]. This is to allow for use of GParted with explicitly named loop devices. [1] The system /snap directory https://snapcraft.io/docs/system-snap-directory Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-10 09:42:54 -07:00
if (substr(what,1,5) == "/dev/" &&
loadstate != "masked" &&
what != "/dev/fuse" &&
where != "/" &&
Exclude snap /dev/loop file system image mounts (#129) On Ubuntu the gparted shell wrapper still attempts to mask lots of non-block device based file systems. Remove the --quiet option from the systemctl --runtime mask command to see: $ gparted Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-66.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10583.mount -> /dev/null. Created symlink /run/systemd/system/boot-efi.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1514.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10577.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1944.mount -> /dev/null. Created symlink /run/systemd/system/run-user-1000-doc.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1506.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-128.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-518.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-145.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1932.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-467.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-60.mount -> /dev/null. Created symlink /run/systemd/system/-.mount -> /dev/null. GParted 1.0.0 configuration --enable-libparted-dmraid --enable-online-resize libparted 3.3 The gparted shell wrapper is currently looking for non-masked Systemd mount units where the 'What' property starts "/dev/". However Ubuntu also uses snap packages which are mounted file images via loop devices: $ grep '^/dev/' /proc/mounts | sort /dev/fuse /run/user/1000/doc fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/loop0 /snap/core/10583 squashfs ro,nodev,relatime 0 0 /dev/loop10 /snap/snap-store/518 squashfs ro,nodev,relatime 0 0 /dev/loop11 /snap/snap-store/467 squashfs ro,nodev,relatime 0 0 /dev/loop12 /snap/gtk-common-themes/1506 squashfs ro,nodev,relatime 0 0 /dev/loop1 /snap/core/10577 squashfs ro,nodev,relatime 0 0 /dev/loop3 /snap/core18/1944 squashfs ro,nodev,relatime 0 0 /dev/loop4 /snap/core18/1932 squashfs ro,nodev,relatime 0 0 /dev/loop5 /snap/gnome-3-34-1804/66 squashfs ro,nodev,relatime 0 0 /dev/loop6 /snap/gnome-3-28-1804/128 squashfs ro,nodev,relatime 0 0 /dev/loop7 /snap/gnome-3-34-1804/60 squashfs ro,nodev,relatime 0 0 /dev/loop8 /snap/gnome-3-28-1804/145 squashfs ro,nodev,relatime 0 0 /dev/loop9 /snap/gtk-common-themes/1514 squashfs ro,nodev,relatime 0 0 /dev/sda1 /boot/efi vfat rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 /dev/sda5 / ext4 rw,relatime,errors=remount-ro 0 0 Fix by excluding: 1. Device name "/dev/fuse" because it's a character not a block device and the mount point is associated with snap, 2. Device names starting "/dev/loop" and where the mount point starts "/snap/" [1]. This is to allow for use of GParted with explicitly named loop devices. [1] The system /snap directory https://snapcraft.io/docs/system-snap-directory Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-10 09:42:54 -07:00
! (substr(what,1,9) == "/dev/loop" && substr(where,1,6) == "/snap/"))
{
Only mask Systemd mounts on block devices (#129) The gparted shell wrapper masks Systemd mount units to prevent it automounting file systems while GParted is running [1], excluding virtual file system which GParted isn't interested in [2]. The problem is that there are a lot of virtual file systems and they have changed between Fedora 19 and 33 so now the exclusion list is out of date. Run GParted on Fedora 33 and query the mount units while it is running: $ systemctl list-units -t mount --full --all UNIT LOAD ACTIVE SUB DESCRIPTION -.mount loaded active mounted Root Mount * boot.mount masked active mounted /boot dev-hugepages.mount loaded active mounted Huge Pages File System dev-mqueue.mount loaded active mounted POSIX Message Queue File System * home.mount masked active mounted /home * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount proc-sys-fs-binfmt_misc.mount loaded inactive dead Arbitrary Executable File Formats File System run-user-1000-gvfs.mount loaded active mounted /run/user/1000/gvfs * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 sys-fs-fuse-connections.mount loaded active mounted FUSE Control File System sys-kernel-config.mount loaded active mounted Kernel Configuration File System sys-kernel-debug.mount loaded active mounted Kernel Debug File System * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * sysroot.mount masked inactive dead sysroot.mount * tmp.mount masked active mounted /tmp * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs * var.mount masked inactive dead var.mount LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 19 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. So it masked these virtual file systems which didn't need to be masked: * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs Lines from /proc/partitions for some of these virtual file systems: $ egrep '/run/user|/sys/kernel/tracing|/var/lib/nfs/rpc_pipefs' /proc/mounts tmpfs /run/user/42 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=42,gid=42,inode64 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=1000,gid=1000,inode64 0 0 none /sys/kernel/tracing tracefs rw,seclabel,relatime 0 0 sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 And for contrast the lines from /proc/mounts for disk backed file systems: $ egrep '^/dev/' /proc/mounts /dev/sda1 /boot ext4 rw,seclabel,relatime 0 0 /dev/sda2 / btrfs rw,seclabel,relatime,space_cache,subvolid=258,subvol=/root 0 0 /dev/sda2 /home btrfs rw,seclabel,relatime,space_cache,subvolid=256,subvol=/home 0 0 Going back to first principles GParted cares that Systemd doesn't automount file systems on block devices. So instead only mask mount units which are on block devices. Where the 'What' property starts "/dev/". Systemd maintains hundreds of properties for each unit. $ systemctl show boot.mount | wc -l 221 The properties of interest for all mount units can be queries like this: $ systemctl show --all --property=What,Id,LoadState '*.mount' ... What=sunrpc Id=var-lib-nfs-rpc_pipefs.mount LoadState=masked What=/dev/sda1 Id=boot.mount LoadState=masked ... [1] 4c109df9b59e55699bd42023cf4007ee359793e9 Use systemctl runtime mask to prevent automounting (#701676) [2] 43de8e326a9f6f099e5274619f16039bdc20c1a4 Do not mask virtual file systems when using systemctl (#708378) Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-04 16:00:54 -07:00
print id
Exclude snap /dev/loop file system image mounts (#129) On Ubuntu the gparted shell wrapper still attempts to mask lots of non-block device based file systems. Remove the --quiet option from the systemctl --runtime mask command to see: $ gparted Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-66.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10583.mount -> /dev/null. Created symlink /run/systemd/system/boot-efi.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1514.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10577.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1944.mount -> /dev/null. Created symlink /run/systemd/system/run-user-1000-doc.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1506.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-128.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-518.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-145.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1932.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-467.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-60.mount -> /dev/null. Created symlink /run/systemd/system/-.mount -> /dev/null. GParted 1.0.0 configuration --enable-libparted-dmraid --enable-online-resize libparted 3.3 The gparted shell wrapper is currently looking for non-masked Systemd mount units where the 'What' property starts "/dev/". However Ubuntu also uses snap packages which are mounted file images via loop devices: $ grep '^/dev/' /proc/mounts | sort /dev/fuse /run/user/1000/doc fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/loop0 /snap/core/10583 squashfs ro,nodev,relatime 0 0 /dev/loop10 /snap/snap-store/518 squashfs ro,nodev,relatime 0 0 /dev/loop11 /snap/snap-store/467 squashfs ro,nodev,relatime 0 0 /dev/loop12 /snap/gtk-common-themes/1506 squashfs ro,nodev,relatime 0 0 /dev/loop1 /snap/core/10577 squashfs ro,nodev,relatime 0 0 /dev/loop3 /snap/core18/1944 squashfs ro,nodev,relatime 0 0 /dev/loop4 /snap/core18/1932 squashfs ro,nodev,relatime 0 0 /dev/loop5 /snap/gnome-3-34-1804/66 squashfs ro,nodev,relatime 0 0 /dev/loop6 /snap/gnome-3-28-1804/128 squashfs ro,nodev,relatime 0 0 /dev/loop7 /snap/gnome-3-34-1804/60 squashfs ro,nodev,relatime 0 0 /dev/loop8 /snap/gnome-3-28-1804/145 squashfs ro,nodev,relatime 0 0 /dev/loop9 /snap/gtk-common-themes/1514 squashfs ro,nodev,relatime 0 0 /dev/sda1 /boot/efi vfat rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 /dev/sda5 / ext4 rw,relatime,errors=remount-ro 0 0 Fix by excluding: 1. Device name "/dev/fuse" because it's a character not a block device and the mount point is associated with snap, 2. Device names starting "/dev/loop" and where the mount point starts "/snap/" [1]. This is to allow for use of GParted with explicitly named loop devices. [1] The system /snap directory https://snapcraft.io/docs/system-snap-directory Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-10 09:42:54 -07:00
}
Only mask Systemd mounts on block devices (#129) The gparted shell wrapper masks Systemd mount units to prevent it automounting file systems while GParted is running [1], excluding virtual file system which GParted isn't interested in [2]. The problem is that there are a lot of virtual file systems and they have changed between Fedora 19 and 33 so now the exclusion list is out of date. Run GParted on Fedora 33 and query the mount units while it is running: $ systemctl list-units -t mount --full --all UNIT LOAD ACTIVE SUB DESCRIPTION -.mount loaded active mounted Root Mount * boot.mount masked active mounted /boot dev-hugepages.mount loaded active mounted Huge Pages File System dev-mqueue.mount loaded active mounted POSIX Message Queue File System * home.mount masked active mounted /home * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount proc-sys-fs-binfmt_misc.mount loaded inactive dead Arbitrary Executable File Formats File System run-user-1000-gvfs.mount loaded active mounted /run/user/1000/gvfs * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 sys-fs-fuse-connections.mount loaded active mounted FUSE Control File System sys-kernel-config.mount loaded active mounted Kernel Configuration File System sys-kernel-debug.mount loaded active mounted Kernel Debug File System * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * sysroot.mount masked inactive dead sysroot.mount * tmp.mount masked active mounted /tmp * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs * var.mount masked inactive dead var.mount LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 19 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. So it masked these virtual file systems which didn't need to be masked: * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs Lines from /proc/partitions for some of these virtual file systems: $ egrep '/run/user|/sys/kernel/tracing|/var/lib/nfs/rpc_pipefs' /proc/mounts tmpfs /run/user/42 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=42,gid=42,inode64 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=1000,gid=1000,inode64 0 0 none /sys/kernel/tracing tracefs rw,seclabel,relatime 0 0 sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 And for contrast the lines from /proc/mounts for disk backed file systems: $ egrep '^/dev/' /proc/mounts /dev/sda1 /boot ext4 rw,seclabel,relatime 0 0 /dev/sda2 / btrfs rw,seclabel,relatime,space_cache,subvolid=258,subvol=/root 0 0 /dev/sda2 /home btrfs rw,seclabel,relatime,space_cache,subvolid=256,subvol=/home 0 0 Going back to first principles GParted cares that Systemd doesn't automount file systems on block devices. So instead only mask mount units which are on block devices. Where the 'What' property starts "/dev/". Systemd maintains hundreds of properties for each unit. $ systemctl show boot.mount | wc -l 221 The properties of interest for all mount units can be queries like this: $ systemctl show --all --property=What,Id,LoadState '*.mount' ... What=sunrpc Id=var-lib-nfs-rpc_pipefs.mount LoadState=masked What=/dev/sda1 Id=boot.mount LoadState=masked ... [1] 4c109df9b59e55699bd42023cf4007ee359793e9 Use systemctl runtime mask to prevent automounting (#701676) [2] 43de8e326a9f6f099e5274619f16039bdc20c1a4 Do not mask virtual file systems when using systemctl (#708378) Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-04 16:00:54 -07:00
clear_properties()
}
Exclude snap /dev/loop file system image mounts (#129) On Ubuntu the gparted shell wrapper still attempts to mask lots of non-block device based file systems. Remove the --quiet option from the systemctl --runtime mask command to see: $ gparted Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-66.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10583.mount -> /dev/null. Created symlink /run/systemd/system/boot-efi.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1514.mount -> /dev/null. Created symlink /run/systemd/system/snap-core-10577.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1944.mount -> /dev/null. Created symlink /run/systemd/system/run-user-1000-doc.mount -> /dev/null. Created symlink /run/systemd/system/snap-gtk\x2dcommon\x2dthemes-1506.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-128.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-518.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d28\x2d1804-145.mount -> /dev/null. Created symlink /run/systemd/system/snap-core18-1932.mount -> /dev/null. Created symlink /run/systemd/system/snap-snap\x2dstore-467.mount -> /dev/null. Created symlink /run/systemd/system/snap-gnome\x2d3\x2d34\x2d1804-60.mount -> /dev/null. Created symlink /run/systemd/system/-.mount -> /dev/null. GParted 1.0.0 configuration --enable-libparted-dmraid --enable-online-resize libparted 3.3 The gparted shell wrapper is currently looking for non-masked Systemd mount units where the 'What' property starts "/dev/". However Ubuntu also uses snap packages which are mounted file images via loop devices: $ grep '^/dev/' /proc/mounts | sort /dev/fuse /run/user/1000/doc fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/loop0 /snap/core/10583 squashfs ro,nodev,relatime 0 0 /dev/loop10 /snap/snap-store/518 squashfs ro,nodev,relatime 0 0 /dev/loop11 /snap/snap-store/467 squashfs ro,nodev,relatime 0 0 /dev/loop12 /snap/gtk-common-themes/1506 squashfs ro,nodev,relatime 0 0 /dev/loop1 /snap/core/10577 squashfs ro,nodev,relatime 0 0 /dev/loop3 /snap/core18/1944 squashfs ro,nodev,relatime 0 0 /dev/loop4 /snap/core18/1932 squashfs ro,nodev,relatime 0 0 /dev/loop5 /snap/gnome-3-34-1804/66 squashfs ro,nodev,relatime 0 0 /dev/loop6 /snap/gnome-3-28-1804/128 squashfs ro,nodev,relatime 0 0 /dev/loop7 /snap/gnome-3-34-1804/60 squashfs ro,nodev,relatime 0 0 /dev/loop8 /snap/gnome-3-28-1804/145 squashfs ro,nodev,relatime 0 0 /dev/loop9 /snap/gtk-common-themes/1514 squashfs ro,nodev,relatime 0 0 /dev/sda1 /boot/efi vfat rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 0 0 /dev/sda5 / ext4 rw,relatime,errors=remount-ro 0 0 Fix by excluding: 1. Device name "/dev/fuse" because it's a character not a block device and the mount point is associated with snap, 2. Device names starting "/dev/loop" and where the mount point starts "/snap/" [1]. This is to allow for use of GParted with explicitly named loop devices. [1] The system /snap directory https://snapcraft.io/docs/system-snap-directory Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-10 09:42:54 -07:00
/^Where=/ { where = substr($0,7) }
Only mask Systemd mounts on block devices (#129) The gparted shell wrapper masks Systemd mount units to prevent it automounting file systems while GParted is running [1], excluding virtual file system which GParted isn't interested in [2]. The problem is that there are a lot of virtual file systems and they have changed between Fedora 19 and 33 so now the exclusion list is out of date. Run GParted on Fedora 33 and query the mount units while it is running: $ systemctl list-units -t mount --full --all UNIT LOAD ACTIVE SUB DESCRIPTION -.mount loaded active mounted Root Mount * boot.mount masked active mounted /boot dev-hugepages.mount loaded active mounted Huge Pages File System dev-mqueue.mount loaded active mounted POSIX Message Queue File System * home.mount masked active mounted /home * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount proc-sys-fs-binfmt_misc.mount loaded inactive dead Arbitrary Executable File Formats File System run-user-1000-gvfs.mount loaded active mounted /run/user/1000/gvfs * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 sys-fs-fuse-connections.mount loaded active mounted FUSE Control File System sys-kernel-config.mount loaded active mounted Kernel Configuration File System sys-kernel-debug.mount loaded active mounted Kernel Debug File System * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * sysroot.mount masked inactive dead sysroot.mount * tmp.mount masked active mounted /tmp * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs * var.mount masked inactive dead var.mount LOAD = Reflects whether the unit definition was properly loaded. ACTIVE = The high-level unit activation state, i.e. generalization of SUB. SUB = The low-level unit activation state, values depend on unit type. 19 loaded units listed. To show all installed unit files use 'systemctl list-unit-files'. So it masked these virtual file systems which didn't need to be masked: * proc-fs-nfsd.mount masked inactive dead proc-fs-nfsd.mount * run-user-1000.mount masked active mounted /run/user/1000 * run-user-42.mount masked active mounted /run/user/42 * sys-kernel-tracing.mount masked active mounted /sys/kernel/tracing * var-lib-machines.mount masked inactive dead var-lib-machines.mount * var-lib-nfs-rpc_pipefs.mount masked active mounted /var/lib/nfs/rpc_pipefs Lines from /proc/partitions for some of these virtual file systems: $ egrep '/run/user|/sys/kernel/tracing|/var/lib/nfs/rpc_pipefs' /proc/mounts tmpfs /run/user/42 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=42,gid=42,inode64 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=202656k,nr_inodes=50664,mode=700,uid=1000,gid=1000,inode64 0 0 none /sys/kernel/tracing tracefs rw,seclabel,relatime 0 0 sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 And for contrast the lines from /proc/mounts for disk backed file systems: $ egrep '^/dev/' /proc/mounts /dev/sda1 /boot ext4 rw,seclabel,relatime 0 0 /dev/sda2 / btrfs rw,seclabel,relatime,space_cache,subvolid=258,subvol=/root 0 0 /dev/sda2 /home btrfs rw,seclabel,relatime,space_cache,subvolid=256,subvol=/home 0 0 Going back to first principles GParted cares that Systemd doesn't automount file systems on block devices. So instead only mask mount units which are on block devices. Where the 'What' property starts "/dev/". Systemd maintains hundreds of properties for each unit. $ systemctl show boot.mount | wc -l 221 The properties of interest for all mount units can be queries like this: $ systemctl show --all --property=What,Id,LoadState '*.mount' ... What=sunrpc Id=var-lib-nfs-rpc_pipefs.mount LoadState=masked What=/dev/sda1 Id=boot.mount LoadState=masked ... [1] 4c109df9b59e55699bd42023cf4007ee359793e9 Use systemctl runtime mask to prevent automounting (#701676) [2] 43de8e326a9f6f099e5274619f16039bdc20c1a4 Do not mask virtual file systems when using systemctl (#708378) Closes #129 - Unit \xe2\x97\x8f.service does not exist, proceeding anyway
2021-01-04 16:00:54 -07:00
/^What=/ { what = substr($0,6) }
/^Id=/ { id = substr($0,4) }
/^LoadState=/ { loadstate = substr($0,11) }
/^$/ { process_unit() }
END { process_unit() }
'`
if test "x$MOUNTLIST" != "x"; then
systemctl --runtime mask --quiet -- $MOUNTLIST
fi
fi
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
#
# Create temporary blank overrides for all udev rules which automatically
Prevent GParted probe starting stopped bcache (#183) From the setup in the previous commit, unregister (stop) all of the bcache backing and cache devices. # bcache unregister /dev/sdb2 # bcache unregister /dev/sdb1 # bcache unregister /dev/sdc1 # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) inactive Non-Exist Alone /dev/sdb1 1 (data) inactive Non-Exist Alone /dev/sdc1 3 (cache) inactive N/A N/A Run GParted. Just the device scanning causes the stopped bcache devices to be restarted. # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) clean(running) bcache1 /dev/sdc1 /dev/sdb1 1 (data) clean(running) bcache0 /dev/sdc1 /dev/sdc1 3 (cache) active N/A N/A This is nothing new with this patchset, but as a result of existing udev behaviour. The chain of events goes like this: 1. GParted calls ped_device_get() on each whole device; 2. Libparted opens each partition read-write to flush the cache; 3. When each is closed the kernel emits a block change event; 4. Udev fires block rules to detect the possibly changed content; 5. Udev fires bcache register (AKA start) rule. More details with the help of udevadm monitor, strace and syslog: GParted | set_devices_thread() GParted | ped_device_get("/dev/sdb") Libparted| ... Libparted| openat(AT_FDCWD, "/dev/sdb1", O_WRONLY) = 9 Libparted| ioctl(9, BLKFLSBUF) = 0 Libparted| close(9) KERNEL | change /devices/.../block/sdb/sdb1 (block) KERNEL | add /devices/virtual/bdi/250:0 (bdi) KERNEL | add /devices/virtual/block/bcache0 (block) KERNEL | change /devices/virtual/block/bcache0 (block) UDEV | change /devices/.../block/sdb/sdb1 (block) UDEV | add /devices/virtual/bdi/250:0 (bdi) UDEV | add /devices/virtual/block/bcache0 (block) UDEV | change /devices/virtual/block/bcache0 (block) SYSLOG | kernel: bcache: register_bdev() registered backing device sdb1 # grep bcache-register /lib/udev/rules.d/69-bcache.rules RUN+="bcache-register $tempnode" Fix this by temporarily adding a blank udev override rule to suppress automatic starting of bcache devices, just as was previously done for Linux Software RAID arrays [1]. [1] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) Closes #183 - Basic support for bcache
2022-01-08 05:02:03 -07:00
# start Linux Software RAID array members and Bcache devices.
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
#
# Udev stores volatile / temporary runtime rules in directory /run/udev/rules.d.
Also find system default udev rules in /usr/lib/udev/rules.d (!116) When blanking of udev rules was first tested [1][2] and added [3] all the distributions at the time (CentOS 6, Debian 6, Fedora 19, openSUSE 12.2, Ubuntu 12.04 LTS) stored the system default rules in directory /lib/udev/rules.d. Now most distributions (CentOS Stream 9, Debian 11, Fedora 38, Ubuntu 22.04 LTS, openSUSE Leap 15.4) store the system default rules in directory /usr/lib/udev/rules.d. Most of these distributions have a merged /usr file system [4][5] so /lib is a symlink to /usr/lib and the system default rules can still found using the original directory. But openSUSE 15.4 doesn't have a merged /usr so the gparted shell wrapper doesn't find the system default rules in directory /usr/lib/udev/rules.d and doesn't prevent auto starting of Linux Software RAID arrays and bcache devices during a storage probe. An extra consideration is that Alpine Linux 3.17 doesn't have a merged /usr file system, but has both /lib/udev/rules.d and /usr/lib/udev/rules.d directories with different rules files. Therefore fix this by checking for system default udev rules in both directories. [1] Bug 709640 - Linux Swap Suspend and Software RAID partitions not recognised, comment 7 https://bugzilla.gnome.org/show_bug.cgi?id=709640#c7 [2] Bug 709640 - Linux Swap Suspend and Software RAID partitions not recognised, comment 12 https://bugzilla.gnome.org/show_bug.cgi?id=709640#c12 [3] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) [4] The Case for the /usr Merge http://0pointer.de/blog/projects/the-usr-merge [5] The Case for the /usr Merge https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/ Closes !116 - Systemd mount masking and udev rule location updates
2023-08-06 04:34:07 -06:00
# Volatile / temporary rules are used to override system default rules from
# /lib/udev/rules.d and/or /usr/lib/udev/rules.d (depending on udev
# configuration). (Permanent local administrative rules in directory
# /etc/udev/rules.d override all others). See udev(7) manual page for details.
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
#
# Default udev rules containing mdadm to incrementally start array members are
# found in 64-md-raid.rules and/or 65-md-incremental.rules, depending on the
# distribution and age. The rules may be commented out or not exist at all.
#
Prevent GParted probe starting stopped bcache (#183) From the setup in the previous commit, unregister (stop) all of the bcache backing and cache devices. # bcache unregister /dev/sdb2 # bcache unregister /dev/sdb1 # bcache unregister /dev/sdc1 # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) inactive Non-Exist Alone /dev/sdb1 1 (data) inactive Non-Exist Alone /dev/sdc1 3 (cache) inactive N/A N/A Run GParted. Just the device scanning causes the stopped bcache devices to be restarted. # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) clean(running) bcache1 /dev/sdc1 /dev/sdb1 1 (data) clean(running) bcache0 /dev/sdc1 /dev/sdc1 3 (cache) active N/A N/A This is nothing new with this patchset, but as a result of existing udev behaviour. The chain of events goes like this: 1. GParted calls ped_device_get() on each whole device; 2. Libparted opens each partition read-write to flush the cache; 3. When each is closed the kernel emits a block change event; 4. Udev fires block rules to detect the possibly changed content; 5. Udev fires bcache register (AKA start) rule. More details with the help of udevadm monitor, strace and syslog: GParted | set_devices_thread() GParted | ped_device_get("/dev/sdb") Libparted| ... Libparted| openat(AT_FDCWD, "/dev/sdb1", O_WRONLY) = 9 Libparted| ioctl(9, BLKFLSBUF) = 0 Libparted| close(9) KERNEL | change /devices/.../block/sdb/sdb1 (block) KERNEL | add /devices/virtual/bdi/250:0 (bdi) KERNEL | add /devices/virtual/block/bcache0 (block) KERNEL | change /devices/virtual/block/bcache0 (block) UDEV | change /devices/.../block/sdb/sdb1 (block) UDEV | add /devices/virtual/bdi/250:0 (bdi) UDEV | add /devices/virtual/block/bcache0 (block) UDEV | change /devices/virtual/block/bcache0 (block) SYSLOG | kernel: bcache: register_bdev() registered backing device sdb1 # grep bcache-register /lib/udev/rules.d/69-bcache.rules RUN+="bcache-register $tempnode" Fix this by temporarily adding a blank udev override rule to suppress automatic starting of bcache devices, just as was previously done for Linux Software RAID arrays [1]. [1] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) Closes #183 - Basic support for bcache
2022-01-08 05:02:03 -07:00
UDEV_TEMP_RULES='' # List of temporary override rules files.
if test -d /run/udev; then
test ! -d /run/udev/rules.d && mkdir /run/udev/rules.d
Also find system default udev rules in /usr/lib/udev/rules.d (!116) When blanking of udev rules was first tested [1][2] and added [3] all the distributions at the time (CentOS 6, Debian 6, Fedora 19, openSUSE 12.2, Ubuntu 12.04 LTS) stored the system default rules in directory /lib/udev/rules.d. Now most distributions (CentOS Stream 9, Debian 11, Fedora 38, Ubuntu 22.04 LTS, openSUSE Leap 15.4) store the system default rules in directory /usr/lib/udev/rules.d. Most of these distributions have a merged /usr file system [4][5] so /lib is a symlink to /usr/lib and the system default rules can still found using the original directory. But openSUSE 15.4 doesn't have a merged /usr so the gparted shell wrapper doesn't find the system default rules in directory /usr/lib/udev/rules.d and doesn't prevent auto starting of Linux Software RAID arrays and bcache devices during a storage probe. An extra consideration is that Alpine Linux 3.17 doesn't have a merged /usr file system, but has both /lib/udev/rules.d and /usr/lib/udev/rules.d directories with different rules files. Therefore fix this by checking for system default udev rules in both directories. [1] Bug 709640 - Linux Swap Suspend and Software RAID partitions not recognised, comment 7 https://bugzilla.gnome.org/show_bug.cgi?id=709640#c7 [2] Bug 709640 - Linux Swap Suspend and Software RAID partitions not recognised, comment 12 https://bugzilla.gnome.org/show_bug.cgi?id=709640#c12 [3] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) [4] The Case for the /usr Merge http://0pointer.de/blog/projects/the-usr-merge [5] The Case for the /usr Merge https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/ Closes !116 - Systemd mount masking and udev rule location updates
2023-08-06 04:34:07 -06:00
UDEV_TEMP_RULES=`for udev_default_rules_dir in /lib/udev/rules.d /usr/lib/udev/rules.d
do
test -d $udev_default_rules_dir || continue
egrep -l '^[^#].*mdadm (-I|--incremental)' $udev_default_rules_dir/*.rules 2> /dev/null
ls $udev_default_rules_dir/*bcache*.rules 2> /dev/null
done | sed 's,.*/lib/udev,/run/udev,g' | sort -u`
fi
Prevent GParted probe starting stopped bcache (#183) From the setup in the previous commit, unregister (stop) all of the bcache backing and cache devices. # bcache unregister /dev/sdb2 # bcache unregister /dev/sdb1 # bcache unregister /dev/sdc1 # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) inactive Non-Exist Alone /dev/sdb1 1 (data) inactive Non-Exist Alone /dev/sdc1 3 (cache) inactive N/A N/A Run GParted. Just the device scanning causes the stopped bcache devices to be restarted. # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) clean(running) bcache1 /dev/sdc1 /dev/sdb1 1 (data) clean(running) bcache0 /dev/sdc1 /dev/sdc1 3 (cache) active N/A N/A This is nothing new with this patchset, but as a result of existing udev behaviour. The chain of events goes like this: 1. GParted calls ped_device_get() on each whole device; 2. Libparted opens each partition read-write to flush the cache; 3. When each is closed the kernel emits a block change event; 4. Udev fires block rules to detect the possibly changed content; 5. Udev fires bcache register (AKA start) rule. More details with the help of udevadm monitor, strace and syslog: GParted | set_devices_thread() GParted | ped_device_get("/dev/sdb") Libparted| ... Libparted| openat(AT_FDCWD, "/dev/sdb1", O_WRONLY) = 9 Libparted| ioctl(9, BLKFLSBUF) = 0 Libparted| close(9) KERNEL | change /devices/.../block/sdb/sdb1 (block) KERNEL | add /devices/virtual/bdi/250:0 (bdi) KERNEL | add /devices/virtual/block/bcache0 (block) KERNEL | change /devices/virtual/block/bcache0 (block) UDEV | change /devices/.../block/sdb/sdb1 (block) UDEV | add /devices/virtual/bdi/250:0 (bdi) UDEV | add /devices/virtual/block/bcache0 (block) UDEV | change /devices/virtual/block/bcache0 (block) SYSLOG | kernel: bcache: register_bdev() registered backing device sdb1 # grep bcache-register /lib/udev/rules.d/69-bcache.rules RUN+="bcache-register $tempnode" Fix this by temporarily adding a blank udev override rule to suppress automatic starting of bcache devices, just as was previously done for Linux Software RAID arrays [1]. [1] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) Closes #183 - Basic support for bcache
2022-01-08 05:02:03 -07:00
for rule in $UDEV_TEMP_RULES; do
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
touch "$rule"
done
#
# Use udisks2-inhibit if udisks2-inhibit exists and deamon running.
# Else use both udisks and hal-lock for invocation if both binaries exist and both
# daemons are running.
# Else use udisks if binary exists and daemon is running.
# Otherwise use hal-lock for invocation if binary exists and daemon is running.
# If the above checks fail then simply run gpartedbin.
#
if test "x$HAVE_UDISKS2_INHIBIT" = "xyes"; then
$UDISKS2_INHIBIT_BIN $BASE_CMD
elif test "x$HAVE_UDISKS" = "xyes" && test "x$HAVE_HAL_LOCK" = "xyes"; then
udisks --inhibit -- \
hal-lock --interface org.freedesktop.Hal.Device.Storage --exclusive \
--run "$BASE_CMD"
elif test "x$HAVE_UDISKS" = "xyes"; then
udisks --inhibit -- $BASE_CMD
elif test "x$HAVE_HAL_LOCK" = "xyes"; then
hal-lock --interface org.freedesktop.Hal.Device.Storage --exclusive \
--run "$BASE_CMD"
else
$BASE_CMD
fi
status=$?
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
#
# Clear any temporary override udev rules used to stop udev automatically
Prevent GParted probe starting stopped bcache (#183) From the setup in the previous commit, unregister (stop) all of the bcache backing and cache devices. # bcache unregister /dev/sdb2 # bcache unregister /dev/sdb1 # bcache unregister /dev/sdc1 # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) inactive Non-Exist Alone /dev/sdb1 1 (data) inactive Non-Exist Alone /dev/sdc1 3 (cache) inactive N/A N/A Run GParted. Just the device scanning causes the stopped bcache devices to be restarted. # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) clean(running) bcache1 /dev/sdc1 /dev/sdb1 1 (data) clean(running) bcache0 /dev/sdc1 /dev/sdc1 3 (cache) active N/A N/A This is nothing new with this patchset, but as a result of existing udev behaviour. The chain of events goes like this: 1. GParted calls ped_device_get() on each whole device; 2. Libparted opens each partition read-write to flush the cache; 3. When each is closed the kernel emits a block change event; 4. Udev fires block rules to detect the possibly changed content; 5. Udev fires bcache register (AKA start) rule. More details with the help of udevadm monitor, strace and syslog: GParted | set_devices_thread() GParted | ped_device_get("/dev/sdb") Libparted| ... Libparted| openat(AT_FDCWD, "/dev/sdb1", O_WRONLY) = 9 Libparted| ioctl(9, BLKFLSBUF) = 0 Libparted| close(9) KERNEL | change /devices/.../block/sdb/sdb1 (block) KERNEL | add /devices/virtual/bdi/250:0 (bdi) KERNEL | add /devices/virtual/block/bcache0 (block) KERNEL | change /devices/virtual/block/bcache0 (block) UDEV | change /devices/.../block/sdb/sdb1 (block) UDEV | add /devices/virtual/bdi/250:0 (bdi) UDEV | add /devices/virtual/block/bcache0 (block) UDEV | change /devices/virtual/block/bcache0 (block) SYSLOG | kernel: bcache: register_bdev() registered backing device sdb1 # grep bcache-register /lib/udev/rules.d/69-bcache.rules RUN+="bcache-register $tempnode" Fix this by temporarily adding a blank udev override rule to suppress automatic starting of bcache devices, just as was previously done for Linux Software RAID arrays [1]. [1] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) Closes #183 - Basic support for bcache
2022-01-08 05:02:03 -07:00
# starting Linux Software RAID array members and Bcache devices.
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
#
Prevent GParted probe starting stopped bcache (#183) From the setup in the previous commit, unregister (stop) all of the bcache backing and cache devices. # bcache unregister /dev/sdb2 # bcache unregister /dev/sdb1 # bcache unregister /dev/sdc1 # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) inactive Non-Exist Alone /dev/sdb1 1 (data) inactive Non-Exist Alone /dev/sdc1 3 (cache) inactive N/A N/A Run GParted. Just the device scanning causes the stopped bcache devices to be restarted. # bcache show Name Type State Bname AttachToDev /dev/sdb2 1 (data) clean(running) bcache1 /dev/sdc1 /dev/sdb1 1 (data) clean(running) bcache0 /dev/sdc1 /dev/sdc1 3 (cache) active N/A N/A This is nothing new with this patchset, but as a result of existing udev behaviour. The chain of events goes like this: 1. GParted calls ped_device_get() on each whole device; 2. Libparted opens each partition read-write to flush the cache; 3. When each is closed the kernel emits a block change event; 4. Udev fires block rules to detect the possibly changed content; 5. Udev fires bcache register (AKA start) rule. More details with the help of udevadm monitor, strace and syslog: GParted | set_devices_thread() GParted | ped_device_get("/dev/sdb") Libparted| ... Libparted| openat(AT_FDCWD, "/dev/sdb1", O_WRONLY) = 9 Libparted| ioctl(9, BLKFLSBUF) = 0 Libparted| close(9) KERNEL | change /devices/.../block/sdb/sdb1 (block) KERNEL | add /devices/virtual/bdi/250:0 (bdi) KERNEL | add /devices/virtual/block/bcache0 (block) KERNEL | change /devices/virtual/block/bcache0 (block) UDEV | change /devices/.../block/sdb/sdb1 (block) UDEV | add /devices/virtual/bdi/250:0 (bdi) UDEV | add /devices/virtual/block/bcache0 (block) UDEV | change /devices/virtual/block/bcache0 (block) SYSLOG | kernel: bcache: register_bdev() registered backing device sdb1 # grep bcache-register /lib/udev/rules.d/69-bcache.rules RUN+="bcache-register $tempnode" Fix this by temporarily adding a blank udev override rule to suppress automatic starting of bcache devices, just as was previously done for Linux Software RAID arrays [1]. [1] a255abf3432ad106fac9c766f0816ada20be8e42 Prevent GParted starting stopped Linux Software RAID arrays (#709640) Closes #183 - Basic support for bcache
2022-01-08 05:02:03 -07:00
for rule in $UDEV_TEMP_RULES; do
Prevent GParted starting stopped Linux Software RAID arrays (#709640) Applying operations or just scanning the partitions in GParted was causing all stopped Linux Software RAID arrays to be automatically started. This is not new with this patch set, but as a result of the following behaviour which has existed for a long time. Chain of events goes likes this: 1) Gparted calls commit_to_os() to update the kernel with the new partition table; 2) Libparted calls ioctl() BLKPG_DEL_PARTITION on every partition to delete every partition from the kernel. Succeeds on non-busy partitions only; 3) Kernel emits udev partition remove event on every removed partition; 4) Libparted calls ioctl() BLKPG_ADD_PARTITION on every non-busy partition to re-add the partition to the kernel; 5) Kernel emits udev partition add event on every added partition; 6) Udev rule: SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \ RUN+="/sbin/mdadm -I $tempnode" from either /lib/udef/rules.d/64-md-raid.rules or .../65-md-incremental.rules incrementally starts the member in a Linux Software RAID array. Fix by temporarily adding blank override rules files which does nothing, so that when the udev add and remove events for Linux Software RAID array member partitions fire nothing is done; but only when required. Note that really old versions of udev don't have rules to incrementally start array members and some distributions comment out such rules. Bug #709640 - Linux Swap Suspend and Software RAID partitions not recognised
2013-10-11 08:22:45 -06:00
rm -f "$rule"
done
#
Add fallback of removing systemd mount unit masks directly (!116) On RHEL / CentOS 8 GParted reports this error to the terminal when it is closed: # gparted GParted 1.5.0-git configuration --enable-online-resize libparted 3.2 >> --runtime cannot be used with unmask # $? 0 and leaves mount units masked: # systemctl list-units '*.mount' UNIT LOAD ACTIVE SUB DESCRIPTION ------------------------------------------------------------------ * -.mount masked active mounted Root Mount * boot.mount masked active mounted boot.mount ... This is because of this change [1] released in systemd 239. Systemd bug 9393 [2] was raised and the change was reverted [3] in systemd 240. According to repology.org only RHEL / CentOS 8 (and clones) and Fedora 29 shipped with systemd 239 [4]. Fix by detecting non-zero exit status from systemctl and falling back to directly removing the runtime mount unit mask files instead. Then have to use systemctl daemon-reload to inform systemd to reload it's configuration from disk to discover the masks have been removed. [1] systemctl: when removing enablement or mask symlinks, cover both /run and /etc https://github.com/systemd/systemd/commit/4910b35078ad24dcbc63f372b2fee087640201d0 [2] systemctl no longer allows unmask in combination with --runtime #9393 https://github.com/systemd/systemd/issues/9393 [3] Revert "systemctl: when removing enablement or mask symlinks, cover both /run and /etc" https://github.com/systemd/systemd/commit/1830ac51a4ad1d82a198e587207df451b581c821 [4] Versions for systemd https://repology.org/project/systemd/versions Closes !116 - Systemd mount masking and udev rule location updates
2023-08-01 10:50:17 -06:00
# Use systemctl to unmask those mount points masked above
#
if test "x$HAVE_SYSTEMCTL" = "xyes" && test "x$MOUNTLIST" != "x"; then
Add fallback of removing systemd mount unit masks directly (!116) On RHEL / CentOS 8 GParted reports this error to the terminal when it is closed: # gparted GParted 1.5.0-git configuration --enable-online-resize libparted 3.2 >> --runtime cannot be used with unmask # $? 0 and leaves mount units masked: # systemctl list-units '*.mount' UNIT LOAD ACTIVE SUB DESCRIPTION ------------------------------------------------------------------ * -.mount masked active mounted Root Mount * boot.mount masked active mounted boot.mount ... This is because of this change [1] released in systemd 239. Systemd bug 9393 [2] was raised and the change was reverted [3] in systemd 240. According to repology.org only RHEL / CentOS 8 (and clones) and Fedora 29 shipped with systemd 239 [4]. Fix by detecting non-zero exit status from systemctl and falling back to directly removing the runtime mount unit mask files instead. Then have to use systemctl daemon-reload to inform systemd to reload it's configuration from disk to discover the masks have been removed. [1] systemctl: when removing enablement or mask symlinks, cover both /run and /etc https://github.com/systemd/systemd/commit/4910b35078ad24dcbc63f372b2fee087640201d0 [2] systemctl no longer allows unmask in combination with --runtime #9393 https://github.com/systemd/systemd/issues/9393 [3] Revert "systemctl: when removing enablement or mask symlinks, cover both /run and /etc" https://github.com/systemd/systemd/commit/1830ac51a4ad1d82a198e587207df451b581c821 [4] Versions for systemd https://repology.org/project/systemd/versions Closes !116 - Systemd mount masking and udev rule location updates
2023-08-01 10:50:17 -06:00
systemctl --runtime unmask --quiet -- $MOUNTLIST 2> /dev/null || \
{
cd /run/systemd/system &&
rm -f -- $MOUNTLIST &&
systemctl daemon-reload
}
fi
exit $status