You are not logged in.
Pages: 1
I have /, /var and /home on md arrays. It seems that only root is mounting, it gets to the lightdm login prompt but there are no users available. I suspect /var and /home are not mounting but it's hard to tell since I cant login. Doesn't work from tty either, user nor root can login. It's a multiboot system that used to work. I restored from an rsync backup but can't get it to boot. I've rebuilt initramfs and run grub-mkconfig. If I mount /var and /home in the livecd I can see the user directories.
Some things after booting from from livecd:
/etc/fstab, I've tried uuids and /dev/md/<name>:
# Arch
# Static information about the filesystems.
# See fstab(5) for details.
# <file system> <dir> <type> <options> <dump> <pass>
# efi /dev/sda1
UUID=21CE-1BD6 /efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,errors=remount-ro 0 2
# swap /dev/sda2
UUID=7e831633-1ca0-441e-8300-7da283e71acb none swap sw 0 0
# arch-var
#UUID=c5f8d1b4-c537-4efe-9bd3-f213af997eef /var btrfs rw,relatime 0 2
/dev/md/arch-var /var btrfs rw,relatime 0 2
# arch-home
#UUID=1d1458fa-cc5a-4ba2-8459-c87c944e0121 /home ext4 rw,relatime 0 2
/dev/md/arch-home /home ext4 rw,relatime 0 2
# arch-root
#UUID=ece1ec74-5911-4828-81ed-74376d49f13b / ext4 rw,relatime 0 1
/dev/md/arch-root / ext4 rw,relatime 0 1
# common-data /dev/sda12
UUID=9718141264977247632 /common zfs defaults 0 2cat /proc/mdadm:
Personalities : [raid0] [raid6] [raid5] [raid4] [raid10]
md122 : active raid10 sdb9[1] sdc9[2] sda11[0] sdd9[3]
234289152 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
bitmap: 0/2 pages [0KB], 65536KB chunk
md123 : active raid6 sdc6[2] sdb6[1] sda8[0] sdd6[3]
234289152 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/4] [UUUU]
bitmap: 0/1 pages [0KB], 65536KB chunk
md124 : active (auto-read-only) raid6 sdb5[1] sdc5[2] sda7[0] sdd5[3]
234289152 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/4] [UUUU]
bitmap: 0/1 pages [0KB], 65536KB chunk
md125 : active (auto-read-only) raid10 sdb8[1] sdc8[2] sda10[0] sdd8[3]
234289152 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
bitmap: 0/2 pages [0KB], 65536KB chunk
md126 : active raid0 sdb3[1] sdc3[2] sda5[0] sdd3[3]
39030784 blocks super 1.2 512k chunks
md127 : active raid0 sdb2[1] sdc2[2] sda4[0] sdd2[3]
39030784 blocks super 1.2 512k chunkslsblk -f:
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /run/archiso/airootfs
sda
├─sda1 vfat FAT32 21CE-1BD6
├─sda2 swap 1 Linux-Swap 7e831633-1ca0-441e-8300-7da283e71acb
├─sda3 zfs_member 5000 var 9469824929888068688
├─sda4 linux_raid_member 1.2 livecd:gentoo-var ead990ca-8c9d-59c6-d281-e4a67451a5f1
│ └─md126
├─sda5 linux_raid_member 1.2 archiso:arch-var a0c52d0b-fe12-774b-923f-362982ace963
│ └─md127 btrfs Arch-Var c5f8d1b4-c537-4efe-9bd3-f213af997eef
├─sda6 zfs_member 5000 home 9633320021458855508
├─sda7 linux_raid_member 1.2 livecd:gentoo-home 47a5e33f-c2b2-5445-dc4e-3d1e138b1207
│ └─md124
├─sda8 linux_raid_member 1.2 archiso:arch-home cc96e752-0fe0-52e7-91fa-2208f34738ca
│ └─md122 ext4 1.0 Arch-Home 1d1458fa-cc5a-4ba2-8459-c87c944e0121
├─sda9 zfs_member 5000 zroot 15526487576425198649
├─sda10 linux_raid_member 1.2 livecd:gentoo-root 94886ff3-565c-6eb1-579c-adc82e3596e8
│ └─md125
├─sda11 linux_raid_member 1.2 archiso:arch-root 498fe28d-21f4-4ff9-5f6c-1cf71602919e
│ └─md123 ext4 1.0 Arch-Root ece1ec74-5911-4828-81ed-74376d49f13b
└─sda12 zfs_member 5000 common 11542737325637951711
sdb
├─sdb1 zfs_member 5000 var 9469824929888068688
├─sdb2 linux_raid_member 1.2 livecd:gentoo-var ead990ca-8c9d-59c6-d281-e4a67451a5f1
│ └─md126
├─sdb3 linux_raid_member 1.2 archiso:arch-var a0c52d0b-fe12-774b-923f-362982ace963
│ └─md127 btrfs Arch-Var c5f8d1b4-c537-4efe-9bd3-f213af997eef
├─sdb4 zfs_member 5000 home 9633320021458855508
├─sdb5 linux_raid_member 1.2 livecd:gentoo-home 47a5e33f-c2b2-5445-dc4e-3d1e138b1207
│ └─md124
├─sdb6 linux_raid_member 1.2 archiso:arch-home cc96e752-0fe0-52e7-91fa-2208f34738ca
│ └─md122 ext4 1.0 Arch-Home 1d1458fa-cc5a-4ba2-8459-c87c944e0121
├─sdb7 zfs_member 5000 zroot 15526487576425198649
├─sdb8 linux_raid_member 1.2 livecd:gentoo-root 94886ff3-565c-6eb1-579c-adc82e3596e8
│ └─md125
└─sdb9 linux_raid_member 1.2 archiso:arch-root 498fe28d-21f4-4ff9-5f6c-1cf71602919e
└─md123 ext4 1.0 Arch-Root ece1ec74-5911-4828-81ed-74376d49f13b
sdc
├─sdc1 zfs_member 5000 var 9469824929888068688
├─sdc2 linux_raid_member 1.2 livecd:gentoo-var ead990ca-8c9d-59c6-d281-e4a67451a5f1
│ └─md126
├─sdc3 linux_raid_member 1.2 archiso:arch-var a0c52d0b-fe12-774b-923f-362982ace963
│ └─md127 btrfs Arch-Var c5f8d1b4-c537-4efe-9bd3-f213af997eef
├─sdc4 zfs_member 5000 home 9633320021458855508
├─sdc5 linux_raid_member 1.2 livecd:gentoo-home 47a5e33f-c2b2-5445-dc4e-3d1e138b1207
│ └─md124
├─sdc6 linux_raid_member 1.2 archiso:arch-home cc96e752-0fe0-52e7-91fa-2208f34738ca
│ └─md122 ext4 1.0 Arch-Home 1d1458fa-cc5a-4ba2-8459-c87c944e0121
├─sdc7 zfs_member 5000 zroot 15526487576425198649
├─sdc8 linux_raid_member 1.2 livecd:gentoo-root 94886ff3-565c-6eb1-579c-adc82e3596e8
│ └─md125
└─sdc9 linux_raid_member 1.2 archiso:arch-root 498fe28d-21f4-4ff9-5f6c-1cf71602919e
└─md123 ext4 1.0 Arch-Root ece1ec74-5911-4828-81ed-74376d49f13b
sdd
├─sdd1 zfs_member 5000 var 9469824929888068688
├─sdd2 linux_raid_member 1.2 livecd:gentoo-var ead990ca-8c9d-59c6-d281-e4a67451a5f1
│ └─md126
├─sdd3 linux_raid_member 1.2 archiso:arch-var a0c52d0b-fe12-774b-923f-362982ace963
│ └─md127 btrfs Arch-Var c5f8d1b4-c537-4efe-9bd3-f213af997eef
├─sdd4 zfs_member 5000 home 9633320021458855508
├─sdd5 linux_raid_member 1.2 livecd:gentoo-home 47a5e33f-c2b2-5445-dc4e-3d1e138b1207
│ └─md124
├─sdd6 linux_raid_member 1.2 archiso:arch-home cc96e752-0fe0-52e7-91fa-2208f34738ca
│ └─md122 ext4 1.0 Arch-Home 1d1458fa-cc5a-4ba2-8459-c87c944e0121
├─sdd7 zfs_member 5000 zroot 15526487576425198649
├─sdd8 linux_raid_member 1.2 livecd:gentoo-root 94886ff3-565c-6eb1-579c-adc82e3596e8
│ └─md125
└─sdd9 linux_raid_member 1.2 archiso:arch-root 498fe28d-21f4-4ff9-5f6c-1cf71602919e
└─md123 ext4 1.0 Arch-Root ece1ec74-5911-4828-81ed-74376d49f13b
sde
└─sde1 vfat FAT32 ARCH_202406 B876-81A2mdadm --detail --scan:
ARRAY /dev/md/arch-var metadata=1.2 UUID=a0c52d0b:fe12774b:923f3629:82ace963
ARRAY /dev/md/arch-home metadata=1.2 UUID=cc96e752:0fe052e7:91fa2208:f34738ca
ARRAY /dev/md/arch-root metadata=1.2 UUID=498fe28d:21f44ff9:5f6c1cf7:1602919e/etc/mdadm.conf:
# mdadm configuration file
#
# mdadm will function properly without the use of a configuration file,
# but this file is useful for keeping track of arrays and member disks.
# In general, a mdadm.conf file is created, and updated, after arrays
# are created. This is the opposite behavior of /etc/raidtab which is
# created prior to array construction.
#
#
# the config file takes two types of lines:
#
# DEVICE lines specify a list of devices of where to look for
# potential member disks
#
# ARRAY lines specify information about how to identify arrays so
# so that they can be activated
#
# You can have more than one device line and use wild cards. The first
# example includes SCSI the first partition of SCSI disks /dev/sdb,
# /dev/sdc, /dev/sdd, /dev/sdj, /dev/sdk, and /dev/sdl. The second
# line looks for array slices on IDE disks.
#
#DEVICE /dev/sd[bcdjkl]1
#DEVICE /dev/hda1 /dev/hdb1
#
# The designation "partitions" will scan all partitions found in
# /proc/partitions
DEVICE partitions
# ARRAY lines specify an array to assemble and a method of identification.
# Arrays can currently be identified by using a UUID, superblock minor number,
# or a listing of devices.
#
# super-minor is usually the minor number of the metadevice
# UUID is the Universally Unique Identifier for the array
# Each can be obtained using
#
# mdadm -D <md>
#
# To capture the UUIDs for all your RAID arrays to this file, run these:
# to get a list of running arrays:
# # mdadm -D --scan >>/etc/mdadm.conf
# to get a list from superblocks:
# # mdadm -E --scan >>/etc/mdadm.conf
#
#ARRAY /dev/md0 UUID=3aaa0122:29827cfa:5331ad66:ca767371
#ARRAY /dev/md1 super-minor=1
#ARRAY /dev/md2 devices=/dev/hda1,/dev/hdb1
#
# ARRAY lines can also specify a "spare-group" for each array. mdadm --monitor
# will then move a spare between arrays in a spare-group if one array has a
# failed drive but no spare
#ARRAY /dev/md4 uuid=b23f3c6d:aec43a9f:fd65db85:369432df spare-group=group1
#ARRAY /dev/md5 uuid=19464854:03f71b1b:e0df2edd:246cc977 spare-group=group1
#
# When used in --follow (aka --monitor) mode, mdadm needs a
# mail address and/or a program. To start mdadm's monitor mode, enable
# mdadm.service in systemd.
#
# If the lines are not found, mdadm will exit quietly
#MAILADDR root@mydomain.tld
#PROGRAM /usr/sbin/handle-mdadm-events
ARRAY /dev/md/arch-var metadata=1.2 UUID=a0c52d0b:fe12774b:923f3629:82ace963
ARRAY /dev/md/arch-root metadata=1.2 UUID=498fe28d:21f44ff9:5f6c1cf7:1602919e
ARRAY /dev/md/arch-home metadata=1.2 UUID=cc96e752:0fe052e7:91fa2208:f34738caLast edited by grumpy (2025-06-18 22:06:53)
Offline
/dev/md/names are unreliable. Stick to UUIDs (or LABEL) in your fstab.
If you must keep the name, try adding `HOMEHOST <ignore>` to your mdadm.conf.
Also you can use mdadm --assemble --update to change the hostname in your header (yours is `archiso` - perhaps also why it might work perfectly fine in the livecd...) but it will help only if you manage to set the host name before the arrays are assembled. You can also set --homehost= directly. If you pick a string longer than 32 bytes, it will remove the hostname entirely. Not sure whether that really helps though.
Last edited by frostschutz (2025-06-18 22:15:40)
Offline
Pages: 1