单纯提供一个相对的解答,并不是标准答案!
单纯就是个解答的参考,写完之后再来这边查查看答案跟你想的一样不一样!?
[root@station200 ~]# type mdadm mdadm 是 /usr/sbin/mdadm [root@station200 ~]# rpm -qf /usr/sbin/mdadm mdadm-4.1-9.el8.x86_64 [root@station200 ~]# yum install mdadm
[root@station200 ~]# gdisk /dev/vda Command (? for help): n Partition number (4-128, default 4): First sector (34-62914526, default = 46151680) or {+-}size{KMGTP}: Last sector (46151680-62914526, default = 62914526) or {+-}size{KMGTP}: +300M Current type is 'Linux filesystem' Hex code or GUID (L to show codes, Enter = 8300): fd00 Changed type of partition to 'Linux RAID' ..... Command (? for help): p Number Start (sector) End (sector) Size Code Name 1 2048 6143 2.0 MiB EF02 2 6144 4200447 2.0 GiB 8300 3 4200448 46151679 20.0 GiB 8E00 4 46151680 46766079 300.0 MiB FD00 Linux RAID 5 46766080 47380479 300.0 MiB FD00 Linux RAID 6 47380480 47994879 300.0 MiB FD00 Linux RAID 7 47994880 48609279 300.0 MiB FD00 Linux RAID 8 48609280 49223679 300.0 MiB FD00 Linux RAID Command (? for help): w Do you want to proceed? (Y/N): y [root@station200 ~]# partprobe所以,最终我们要用来创建软件磁盘数组的,就是 /dev/vda4 ~ /dev/vda8 这 8 个喔!
[root@station200 ~]# mdadm --create /dev/md0 --level=5 --chunk=256K --raid-devices=4 --spare-devices=1 /dev/vda{4,5,6,7,8} [root@station200 ~]# mdadm --detail /dev/md0 /dev/md0: Version : 1.2 Creation Time : Mon Jun 8 16:28:28 2020 Raid Level : raid5 Array Size : 918528 (897.00 MiB 940.57 MB) Used Dev Size : 306176 (299.00 MiB 313.52 MB) Raid Devices : 4 Total Devices : 5 Persistence : Superblock is persistent Update Time : Mon Jun 8 16:28:34 2020 State : clean Active Devices : 4 Working Devices : 5 Failed Devices : 0 Spare Devices : 1 Layout : left-symmetric Chunk Size : 256K Consistency Policy : resync Name : station200.centos:0 (local to host station200.centos) UUID : 320dde5d:e5b53aa7:1e4d690b:e1c215c9 Events : 18 Number Major Minor RaidDevice State 0 252 4 0 active sync /dev/vda4 1 252 5 1 active sync /dev/vda5 2 252 6 2 active sync /dev/vda6 5 252 7 3 active sync /dev/vda7 4 252 8 - spare /dev/vda8
[root@station200 ~]# mkfs.xfs -d su=256K,sw=3 /dev/md0
[root@station200 ~]# mkdir /srv/raid [root@station200 ~]# mount /dev/md0 /srv/raid [root@station200 ~]# df -T /srv/raid 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/md0 xfs 912896 39672 873224 5% /srv/raid
[root@station200 ~]# cat /proc/mdstat Personalities : [raid6] [raid5] [raid4] md0 : active raid5 vda7[5] vda8[4](S) vda6[2] vda5[1] vda4[0] 918528 blocks super 1.2 level 5, 256k chunk, algorithm 2 [4/4] [UUUU] [root@station200 ~]# cp -a /etc /home /srv/raid [root@station200 ~]# ll /srv/raid/ 总计 16 drwxr-xr-x. 149 root root 8192 6月 8 16:13 etc drwxr-xr-x. 29 root root 4096 5月 11 20:51 home [root@station200 ~]# df -T /srv/raid 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/md0 xfs 912896 385776 527120 43% /srv/raid所以磁盘数组与文件系统都是正常运作的!
[root@station200 ~]# mdadm --fail /dev/md0 /dev/vda7 mdadm: set /dev/vda7 faulty in /dev/md0 [root@station200 ~]# mdadm --detail /dev/md0 /dev/md0: Version : 1.2 Creation Time : Mon Jun 8 16:28:28 2020 Raid Level : raid5 Array Size : 918528 (897.00 MiB 940.57 MB) Used Dev Size : 306176 (299.00 MiB 313.52 MB) Raid Devices : 4 Total Devices : 5 Persistence : Superblock is persistent Update Time : Mon Jun 8 16:43:30 2020 State : clean Active Devices : 4 Working Devices : 4 Failed Devices : 1 Spare Devices : 0 Layout : left-symmetric Chunk Size : 256K Consistency Policy : resync Name : station200.centos:0 (local to host station200.centos) UUID : 320dde5d:e5b53aa7:1e4d690b:e1c215c9 Events : 37 Number Major Minor RaidDevice State 0 252 4 0 active sync /dev/vda4 1 252 5 1 active sync /dev/vda5 2 252 6 2 active sync /dev/vda6 4 252 8 3 active sync /dev/vda8 5 252 7 - faulty /dev/vda7
[root@station200 ~]# mdadm --remove /dev/md0 /dev/vda7 mdadm: hot removed /dev/vda7 from /dev/md0 [root@station200 ~]# mdadm --add /dev/md0 /dev/vda7 mdadm: added /dev/vda7 [root@station200 ~]# mdadm --detail /dev/md0 /dev/md0: Version : 1.2 Creation Time : Mon Jun 8 16:28:28 2020 Raid Level : raid5 Array Size : 918528 (897.00 MiB 940.57 MB) Used Dev Size : 306176 (299.00 MiB 313.52 MB) Raid Devices : 4 Total Devices : 5 Persistence : Superblock is persistent Update Time : Mon Jun 8 16:45:14 2020 State : clean Active Devices : 4 Working Devices : 5 Failed Devices : 0 Spare Devices : 1 Layout : left-symmetric Chunk Size : 256K Consistency Policy : resync Name : station200.centos:0 (local to host station200.centos) UUID : 320dde5d:e5b53aa7:1e4d690b:e1c215c9 Events : 39 Number Major Minor RaidDevice State 0 252 4 0 active sync /dev/vda4 1 252 5 1 active sync /dev/vda5 2 252 6 2 active sync /dev/vda6 4 252 8 3 active sync /dev/vda8 5 252 7 - spare /dev/vda7
[root@station200 ~]# gdisk /dev/vda ...... Command (? for help): l ...... 8300 Linux filesystem 8301 Linux reserved 8302 Linux /home 8303 Linux x86 root (/) 8304 Linux x86-64 root (/ 8305 Linux ARM64 root (/) 8306 Linux /srv 8307 Linux ARM32 root (/) 8400 Intel Rapid Start 8e00 Linux LVM a000 Android bootloader a001 Android bootloader 2 a002 Android boot a003 Android recovery a004 Android misc ......
[root@station200 ~]# gdisk /dev/vda ...... Command (? for help): n Partition number (9-128, default 9): First sector (34-62914526, default = 49223680) or {+-}size{KMGTP}: Last sector (49223680-62914526, default = 62914526) or {+-}size{KMGTP}: +300M Current type is 'Linux filesystem' Hex code or GUID (L to show codes, Enter = 8300): 8e00 Changed type of partition to 'Linux LVM' ...... Command (? for help): p ...... Number Start (sector) End (sector) Size Code Name 9 49223680 49838079 300.0 MiB 8E00 Linux LVM 10 49838080 50452479 300.0 MiB 8E00 Linux LVM 11 50452480 51066879 300.0 MiB 8E00 Linux LVM 12 51066880 51681279 300.0 MiB 8E00 Linux LVM Command (? for help): w Do you want to proceed? (Y/N): y [root@station200 ~]# partprobe
[root@station200 ~]# mkfs.xfs /dev/myvg/mylv [root@station200 ~]# mkdir /srv/lvm [root@station200 ~]# mount /dev/myvg/mylv /srv/lvm [root@station200 ~]# df -T /srv/lvm 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/myvg-mylv xfs 518816 30132 488684 6% /srv/lvm
[root@station200 ~]# lvcreate -L 300M -n mylvm2 myvg [root@station200 ~]# lvscan ACTIVE '/dev/myvg/mylv' [512.00 MiB] inherit ACTIVE '/dev/myvg/mylvm2' [304.00 MiB] inherit ACTIVE '/dev/centos/root' [10.00 GiB] inherit ACTIVE '/dev/centos/home' [3.00 GiB] inherit ACTIVE '/dev/centos/swap' [2.00 GiB] inherit [root@station200 ~]# mkfs.ext4 /dev/myvg/mylv [root@station200 ~]# mkdir /srv/lvm2 [root@station200 ~]# mount /dev/myvg/mylvm2 /srv/lvm2 [root@station200 ~]# df -T /srv/lvm2 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/myvg-mylvm2 ext4 293267 2062 271545 1% /srv/lvm2
[root@station200 ~]# vgdisplay myvg .... Total PE 103 Alloc PE / Size 72 / 1.12 GiB Free PE / Size 31 / 496.00 MiB VG UUID R8lCNk-H71V-g0XW-OtZe-pCFk-3H0d-OHZQ5p [root@station200 ~]# lvresize -l +31 /dev/myvg/mylv Size of logical volume myvg/mylv changed from 512.00 MiB (32 extents) to 1008.00 MiB (63 extents). Logical volume myvg/mylv successfully resized. [root@station200 ~]# vgdisplay myvg Total PE 103 Alloc PE / Size 103 / <1.61 GiB Free PE / Size 0 / 0 ...... [root@station200 ~]# lvscan ACTIVE '/dev/myvg/mylv' [1008.00 MiB] inherit ACTIVE '/dev/myvg/mylvm2' [640.00 MiB] inherit ACTIVE '/dev/centos/root' [10.00 GiB] inherit ACTIVE '/dev/centos/home' [3.00 GiB] inherit ACTIVE '/dev/centos/swap' [2.00 GiB] inherit
[root@station200 ~]# xfs_growfs --help xfs_growfs: 不适用的选项 -- - Usage: xfs_growfs [options] mountpoint Options: -d grow data/metadata section -l grow log section -r grow realtime section -n don't change anything, just show geometry -i convert log from external to internal format -t alternate location for mount table (/etc/mtab) -x convert log from internal to external format -D size grow data/metadata section to size blks -L size grow/shrink log section to size blks -R size grow realtime section to size blks -e size set realtime extent size to size blks -m imaxpct set inode max percent to imaxpct -V print version information [root@station200 ~]# df -T /dev/myvg/mylv 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/myvg-mylv xfs 518816 30132 488684 6% /srv/lvm [root@station200 ~]# xfs_growfs /srv/lvm [root@station200 ~]# df -T /dev/myvg/mylv 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/myvg-mylv xfs 1026720 33904 992816 4% /srv/lvm
[root@station200 ~]# df -T / 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/centos-root xfs 10475520 6381636 4093884 61% / [root@station200 ~]# vgdisplay centos ...... Total PE 5120 Alloc PE / Size 3840 / 15.00 GiB Free PE / Size 1280 / 5.00 GiB ...... # 所以,确实可以提供 2G 给根目录的!没问题! [root@station200 ~]# lvresize -L +2G /dev/centos/root Size of logical volume centos/root changed from 10.00 GiB (2560 extents) to 12.00 GiB (3072 extents). Logical volume centos/root successfully resized. [root@station200 ~]# xfs_growfs / [root@station200 ~]# df -T / 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/centos-root xfs 12572672 6396328 6176344 51% / # 容量从 10G 放大到 12G 了!
# 将 /dev/md0 卸载,并且停止使用 [root@station200 ~]# df -T /dev/md0 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/md0 xfs 912896 383496 529400 43% /srv/raid [root@station200 ~]# umount /dev/md0 [root@station200 ~]# cat /proc/mdstat Personalities : [raid6] [raid5] [raid4] md0 : active raid5 vda7[5](S) vda8[4] vda6[2] vda5[1] vda4[0] 918528 blocks super 1.2 level 5, 256k chunk, algorithm 2 [4/4] [UUUU] [root@station200 ~]# mdadm --stop /dev/md0 mdadm: stopped /dev/md0 [root@station200 ~]# cat /proc/mdstat Personalities : [raid6] [raid5] [raid4] # 将 /dev/vda{4,5,6,7,8} 这几个设备的表头数据 (有点类似 superblock) 删除 [root@station200 ~]# dd if=/dev/zero of=/dev/vda4 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda5 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda6 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda7 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda8 bs=10M count=1 [root@station200 ~]# lsblk /dev/vda{4..8} NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT vda4 252:4 0 300M 0 part vda5 252:5 0 300M 0 part vda6 252:6 0 300M 0 part vda7 252:7 0 300M 0 part vda8 252:8 0 300M 0 part # 将这 5 个 partition 删除 [root@station200 ~]# gdisk /dev/vda Command (? for help): d Partition number (1-13): 4 .... Command (? for help): w Do you want to proceed? (Y/N): y [root@station200 ~]# partprobe
# 卸载所有与 /dev/myvg/mylv, /dev/myvg/mylvm2 的设备,并将 myvg 设置为禁用 [root@station200 ~]# df -T /dev/myvg/{mylv,mylvm2} 文件系统 类型 1K-区段 已用 可用 已用% 挂载点 /dev/mapper/myvg-mylv xfs 1026720 33904 992816 4% /srv/lvm /dev/mapper/myvg-mylvm2 ext4 626473 2300 590753 1% /srv/lvm2 [root@station200 ~]# umount /srv/lvm /srv/lvm2 [root@station200 ~]# vgchange -a n myvg 0 logical volume(s) in volume group "myvg" now active [root@station200 ~]# lvscan inactive '/dev/myvg/mylv' [1008.00 MiB] inherit inactive '/dev/myvg/mylvm2' [640.00 MiB] inherit ACTIVE '/dev/centos/root' [12.00 GiB] inherit ACTIVE '/dev/centos/home' [3.00 GiB] inherit ACTIVE '/dev/centos/swap' [2.00 GiB] inherit # 移除 myvg [root@station200 ~]# vgscan Found volume group "myvg" using metadata type lvm2 Found volume group "centos" using metadata type lvm2 [root@station200 ~]# vgremove myvg Do you really want to remove volume group "myvg" containing 2 logical volumes? [y/n]: y Logical volume "mylv" successfully removed Logical volume "mylvm2" successfully removed Volume group "myvg" successfully removed [root@station200 ~]# vgscan Found volume group "centos" using metadata type lvm2 # 移除 /dev/vda{9,10,11,13} 这几个 PV [root@station200 ~]# pvscan PV /dev/vda3 VG centos lvm2 [20.00 GiB / 3.00 GiB free] PV /dev/vda9 lvm2 [300.00 MiB] PV /dev/vda10 lvm2 [300.00 MiB] PV /dev/vda11 lvm2 [300.00 MiB] PV /dev/vda12 lvm2 [300.00 MiB] PV /dev/vda13 lvm2 [500.00 MiB] Total: 6 [21.66 GiB] / in use: 1 [20.00 GiB] / in no VG: 5 [1.66 GiB] [root@station200 ~]# pvremove /dev/vda{9..13} # 将上述的 partition 删除 [root@station200 ~]# gdisk /dev/vda ..... [root@station200 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sr0 11:0 1 1024M 0 rom vda 252:0 0 30G 0 disk ├─vda1 252:1 0 2M 0 part ├─vda2 252:2 0 2G 0 part /boot └─vda3 252:3 0 20G 0 part ├─centos-root 253:0 0 12G 0 lvm / ├─centos-swap 253:1 0 2G 0 lvm [SWAP] └─centos-home 253:2 0 3G 0 lvm /home这样系统回复到最原始的状态了!
[root@station200 ~]# gdisk /dev/vda ...... Command (? for help): n Partition number (4-128, default 4): First sector (34-62914526, default = 46151680) or {+-}size{KMGTP}: Last sector (46151680-62914526, default = 62914526) or {+-}size{KMGTP}: +1500M Current type is 'Linux filesystem' Hex code or GUID (L to show codes, Enter = 8300): fd00 Changed type of partition to 'Linux RAID' ...... Command (? for help): p Number Start (sector) End (sector) Size Code Name 1 2048 6143 2.0 MiB EF02 2 6144 4200447 2.0 GiB 8300 3 4200448 46151679 20.0 GiB 8E00 4 46151680 49223679 1.5 GiB FD00 Linux RAID 5 49223680 52295679 1.5 GiB FD00 Linux RAID 6 52295680 55367679 1.5 GiB FD00 Linux RAID 7 55367680 58439679 1.5 GiB FD00 Linux RAID 8 58439680 61511679 1.5 GiB FD00 Linux RAID 9 61511680 62914526 685.0 MiB 8300 Linux filesystem Command (? for help): w Do you want to proceed? (Y/N): y [root@station200 ~]# partprobe
[root@station200 ~]# vim /etc/fstab /dev/mapper/centos-home /home xfs defaults,usrquota,grpquota 0 0
[root@station200 ~]# umount /home umount: /home: target is busy. [root@station200 ~]# lsof /home COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME pulseaudi 8423 student mem REG 253,2 16384 2097284 /home/student/.config/pulse/... pulseaudi 8423 student mem REG 253,2 12288 2097283 /home/student/.config/pulse/...看起来似乎是 student 在系统内,所以 /home 无法卸载。请注销所有的一般用户帐号 (避免使用到 /home), 然后可能还需要额外等待 1, 2 分钟,让系统清理所有的暂存信息后,才有办法继续卸载。
[root@station200 ~]# w 13:54:42 up 23:34, 1 user, load average: 0.00, 0.02, 0.00 USER TTY FROM LOGIN@ IDLE JCPU PCPU WHAT root pts/0 172.16.200.254 13:42 1.00s 0.05s 0.01s w # 确定只剩下 root 一个人比较好! [root@station200 ~]# umount /home [root@station200 ~]# df -Th /home 文件系统 类型 容量 已用 可用 已用% 挂载点 /dev/mapper/centos-root xfs 12G 6.7G 5.3G 56% / [root@station200 ~]# mount -a [root@station200 ~]# df -Th /home 文件系统 类型 容量 已用 可用 已用% 挂载点 /dev/mapper/centos-home xfs 3.0G 360M 2.7G 12% /home
[root@station200 ~]# mount | grep /home /dev/mapper/centos-home on /home type xfs (rw,relatime,seclabel,attr2,inode64,usrquota,grpquota)
# A. 创建一个名为 "quotaman" 的用户,该用户的密码设置为 "myPassWord" [root@station200 ~]# useradd quotaman [root@station200 ~]# echo "myPassWord" |passwd --stdin quotaman 更改用户 quotaman 的密码。 passwd:所有核对代符都已成功更新。 # B. 观察 quotaman 刚刚创建好帐号后的 quota 数值 [root@station200 ~]# xfs_quota -x -c "report -ubh" /home User quota on /home (/dev/mapper/centos-home) Blocks User ID Used Soft Hard Warn/Grace ---------- --------------------------------- root 8K 0 0 00 [------] ..... quotaman 20K 0 0 00 [------] # C. 创建 150M, 200M 的 quota 限制 [root@station200 ~]# xfs_quota -x -c "limit bsoft=150M bhard=200M -u quotaman" /home [root@station200 ~]# xfs_quota -x -c "report -ubh" /home User quota on /home (/dev/mapper/centos-home) Blocks User ID Used Soft Hard Warn/Grace ---------- --------------------------------- quotaman 20K 150M 200M 00 [------] # D. 在 tty5 登录 quotaman 并且用 dd 创建大文件 [quotaman@station200 ~]$ dd if=/dev/zero of=test.img bs=1M count=160 160+0 records in 160+0 records out 167772160 bytes (168 MB, 160 MiB) copied, 0.0752308 s, 2.2 GB/s [quotaman@station200 ~]$ ll -h -rw-rw-r--. 1 quotaman quotaman 160M 6月 11 08:38 test.img # E. 回归 root 的身份,再次观察 quotaman 的 quota 报告,观察 grace time [root@station200 ~]# xfs_quota -x -c "report -ubh" /home User quota on /home (/dev/mapper/centos-home) Blocks User ID Used Soft Hard Warn/Grace ---------- --------------------------------- quotaman 192.0M 150M 200M 00 [6 days] # 因为使用容量超过 160M 了,所以出现宽限时间! # F. 再以 quotaman 测试超过 200M 的情境 [quotaman@station200 ~]$ dd if=/dev/zero of=test.img bs=1M count=260 dd: 写入 'test.img' 时发生错误: 磁盘配额已满 200+0 records in 199+0 records out 208666624 bytes (209 MB, 199 MiB) copied, 0.0914792 s, 2.3 GB/s # 容量超过 200M 了!当然会失败
quotaman 需要如何处理数据后,才能够正常的继续操作系统呢?很简单,在 7 天的时间内,将使用容量降低到 150M 以下, 就可以恢复正常,也不会有 grace time 的限制问题了!
[root@station200 ~]# vim /etc/fstab #/dev/mapper/myvdo /srv/vdo xfs defaults,x-systemd.requires=vdo.service 0 0 [root@station200 ~]# umount /srv/vdo [root@station200 ~]# vdostats Device 1K-blocks Used Available Use% Space saving% /dev/mapper/myvdo 6131712 4341660 1790052 70% 78% [root@station200 ~]# vdo list myvdo [root@station200 ~]# vdo deactivate --name myvdo Deactivating VDO myvdo [root@station200 ~]# vdo remove --name myvdo Removing VDO myvdo Stopping VDO myvdo
# 1. 删除 LVM [root@station200 ~]# lvscan ...... ACTIVE '/dev/raidvg/raidlv' [<5.85 GiB] inherit [root@station200 ~]# vgchange -a n raidvg 0 logical volume(s) in volume group "raidvg" now active [root@station200 ~]# vgremove raidvg Do you really want to remove volume group "raidvg" containing 1 logical volumes? [y/n]: y Logical volume "raidlv" successfully removed Volume group "raidvg" successfully removed [root@station200 ~]# pvremove /dev/md0 Labels on physical volume "/dev/md0" successfully wiped. # 2. 删除 /dev/md0 这个 software RAID [root@station200 ~]# mdadm --stop /dev/md0 mdadm: stopped /dev/md0 [root@station200 ~]# dd if=/dev/zero of=/dev/vda4 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda5 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda6 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda7 bs=10M count=1 [root@station200 ~]# dd if=/dev/zero of=/dev/vda8 bs=10M count=1 # 3. 删除 partition 了! [root@station200 ~]# gdisk /dev/vda ...... Command (? for help): d Partition number (1-9): 9 ...... Command (? for help): p Number Start (sector) End (sector) Size Code Name 1 2048 6143 2.0 MiB EF02 2 6144 4200447 2.0 GiB 8300 3 4200448 46151679 20.0 GiB 8E00 Command (? for help): w Do you want to proceed? (Y/N): y [root@station200 ~]# partprobe
# a. 分割出 /dev/vda4, /dev/vda5 各 1G 与 1.5G 的容量 [root@station200 ~]# gdisk /dev/vda ...... [root@station200 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sr0 11:0 1 1024M 0 rom vda 252:0 0 30G 0 disk ├─vda1 252:1 0 2M 0 part ├─vda2 252:2 0 2G 0 part /boot ├─vda3 252:3 0 20G 0 part │ ├─centos-root 253:0 0 12G 0 lvm / │ ├─centos-swap 253:1 0 2G 0 lvm [SWAP] │ ├─centos-home 253:2 0 3G 0 lvm /home │ └─centos-lvm 253:3 0 1.5G 0 lvm ├─vda4 252:4 0 1G 0 part └─vda5 252:5 0 1.5G 0 part # b. 让 /dev/vda5 成为名为 mydata 的 stratis 保存池 [root@station200 ~]# systemctl restart stratisd.service [root@station200 ~]# systemctl enable stratisd.service [root@station200 ~]# stratis pool create mydata /dev/vda5 [root@station200 ~]# stratis pool add-cache mydata /dev/vda4 [root@station200 ~]# stratis pool list Name Total Physical Size Total Physical Used mydata 1.46 GiB 52 MiB # c. 让 /dev/vda4 成为 mydata 的缓存 (仿真的,不具备加速功能) [root@station200 ~]# stratis blockdev list Pool Name Device Node Physical Size State Tier mydata /dev/vda4 1 GiB InUse Cache <==这就是缓存! mydata /dev/vda5 1.46 GiB InUse Data # d. 最终创建 myfs1 文件系统 [root@station200 ~]# stratis filesystem create mydata myfs1 [root@station200 ~]# stratis filesystem list Pool Name Name Used Created Device UUID mydata myfs1 546 MiB Jun 11 2020 11:14 /stratis/mydata/myfs1 971d60a0593b43e98c652483d49f066c # e. 启动 quota 而挂载到 /srv/myfilesystem 目录内。 [root@station200 ~]# blkid /stratis/mydata/* /stratis/mydata/myfs1: UUID="971d60a0-593b-43e9-8c65-2483d49f066c" TYPE="xfs" [root@station200 ~]# vim /etc/fstab UUID="971d60a0-593b-43e9-8c65-2483d49f066c" /srv/myfilesystem xfs defaults,usrquota,grpquota,x-systemd.requires=stratisd.service 0 0 [root@station200 ~]# mkdir /srv/myfilesystem [root@station200 ~]# mount -a [root@station200 ~]# mount | grep myfilesystem /dev/mapper/stratis-1-.... on /srv/myfilesystem type xfs (rw,relatime,seclabel,attr2,inode64, sunit=2048,swidth=2048,usrquota,grpquota,x-systemd.requires=stratisd.service)
# a. 将所有其他容量分割给 /dev/vda6 [root@station200 ~]# gdisk /dev/vda ...... [root@station200 ~]# lsblk /dev/vda6 NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT vda6 252:6 0 5.5G 0 part # b. 让 /dev/vda6 加入成为名为 myvdo 且逻辑容量具有 10G。 [root@station200 ~]# vdo create --name myvdo --vdoLogicalSize 10G --device /dev/vda6 Creating VDO myvdo Starting VDO myvdo Starting compression on VDO myvdo VDO instance 1 volume is ready at /dev/mapper/myvdo [root@station200 ~]# vdostats --human-readable Device Size Used Available Use% Space saving% /dev/mapper/myvdo 5.5G 3.5G 2.0G 63% N/A # c. 让 myvdo 格式化为 ext4 文件系统,并且启动 quota 及挂载于 /srv/myvdo 目录中 [root@station200 ~]# mkfs.ext4 /dev/mapper/myvdo [root@station200 ~]# vim /etc/fstab /dev/mapper/myvdo /srv/myvdo ext4 defaults,usrquota,grpquota,x-systemd.requires=vdo.service 0 0 [root@station200 ~]# mkdir /srv/myvdo [root@station200 ~]# mount -a [root@station200 ~]# mount | grep myvdo /dev/mapper/myvdo on /srv/myvdo type ext4 (rw,relatime,seclabel,quota,usrquota,grpquota,x-systemd.requires=vdo.service)
# 针对 /srv/myfilesystem 的 XFS 文件系统 quota 设置 [root@station200 ~]# xfs_quota -x -c "limit bsoft=800M bhard=1000M -u student" /srv/myfilesystem [root@station200 ~]# xfs_quota -x -c "report -buh" /srv/myfilesystem User quota on /srv/myfilesystem (/dev/mapper/stratis-1-.....) Blocks User ID Used Soft Hard Warn/Grace ---------- --------------------------------- root 0 0 0 00 [------] student 0 800M 1000M 00 [------] # 针对 student 在 EXT4 文件系统的 quota 设置 [root@station200 ~]# quotacheck -avug [root@station200 ~]# quotacheck -vug /srv/myvdo quotacheck: Scanning /dev/mapper/myvdo [/srv/myvdo] done quotacheck: Checked 3 directories and 2 files # 可分别针对全系统或特定文件系统进行 EXT 家族的 quota 搜索 [root@station200 ~]# ll /srv/myvdo/ -rw-------. 1 root root 6144 6月 11 11:50 aquota.group -rw-------. 1 root root 6144 6月 11 11:50 aquota.user # 搜索完毕后,会创建出两个重要的纪录档 [root@station200 ~]# setquota -u student 800M 1000M 0 0 /srv/myvdo # 这在设置用户的 quota,参考 setquota --help [root@station200 ~]# repquota -uv /srv/myvdo/ *** Report for user quotas on device /dev/mapper/myvdo Block grace time: 7days; Inode grace time: 7days Block limits File limits User used soft hard grace used soft hard grace ---------------------------------------------------------------------- root -- 20 0 0 2 0 0 student -- 0 819200 1024000 0 0 0 Statistics: Total blocks: 7 Data blocks: 1 Entries: 2 Used average: 2.000000 # 最终显示用户的 quota 设计!