Configuration and use of lvm and raid

  • lvs
  • raid

lvm: logical volumes manager, which creates a virtual block device from a physical device and organizes one or more underlying block devices into a module of a logical device, can better manage the disk, and dynamically manage the disk by encapsulating the underlying device.

Basic concepts:
PV: physical volume physical volume is at the bottom
VG: volume group volume group, built on the physical volume, with the minimum unit of pe
LV: logical volume logical volume is built on the volume group. The unallocated space in the volume group can be used to build a new logical volume. After the establishment of the logical volume, the space can be dynamically expanded and reduced

Management use:

pv management tools vg management tools lv management tools
pvs: brief display of PV information vgs: display vg brief information lvs: display lv brief information
pvdisplay: display details vgdisplay display display details lvdisplay display details
pvcreate /dev/device...: create PV Vgcreate [- s ා [kkmmggtppee]] vg#name / dev / device...] ා create VG, - s indicates pe size, default 4m Lvcreate - L ා [mmggtt] - N name volumegroup ා - L indicates how many pe's there are, - L lv size
pvremove /dev/device... Delete PV Vgextend vg? Name / dev / device...? expand vg Lvextend - L [+] ා [mmggtt] / dev / VG ා / lv ා: lv after the expansion, you also need to modify the logical size: resizefs / dev / VG ᦇ / lv ﹐ name / lv ﹐
pvmove /dev/device /dev/device: data migration Vgreduce VG? Name / dev / device Reduce logical volume see reduction steps
Vgremove VG ﹣ name delete volume group: note data migration is required before shrinking Lvremove / dev / VG? Name / LV? Name? Delete logical volumes

lv reduction steps:

# umount /dev/VG_NAME/LV_NAME#Uninstall first, xfs can be uninstalled
# e2fsck -f /dev/VG_NAME/LV_NAME #Force file system detection
# resize2fs /dev/VG_NAME/LV_NAME #[mMgGtT] #Perform reduction operations
# lvreduce -L [-]#[mMgGtT] /dev/VG_NAME/LV_NAME #Reduce lv
# mount

Simple practice:

[root@xt ~]# pvcreate /dev/sdb{1,2}
  Physical volume "/dev/sdb1" successfully created.
  Physical volume "/dev/sdb2" successfully created.
[root@xt ~]# pvs
  PV         VG Fmt  Attr PSize PFree
  /dev/sdb1     lvm2 ---  1.00g 1.00g
  /dev/sdb2     lvm2 ---  1.00g 1.00g
[root@xt ~]# pvdisplay 
  "/dev/sdb2" is a new physical volume of "1.00 GiB"
  --- NEW Physical volume ---
  PV Name               /dev/sdb2
  VG Name               
  PV Size               1.00 GiB
  Allocatable           NO
  PE Size               0   
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               3XTOcT-uIc3-73Hc-alkn-EKWJ-yyqv-CZYDsR

  "/dev/sdb1" is a new physical volume of "1.00 GiB"
  --- NEW Physical volume ---
  PV Name               /dev/sdb1
  VG Name               
  PV Size               1.00 GiB
  Allocatable           NO
  PE Size               0   
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               KZ6Mwf-GPQc-xGQf-xWwS-FCnS-yLtn-XqJofh
= ================================================== 
[root@xt ~]# vgcreate myvg /dev/sdb1
  Volume group "myvg" successfully created
[root@xt ~]# vgs
  VG   #PV #LV #SN Attr   VSize    VFree   
  myvg   1   0   0 wz--n- 1020.00m 1020.00m
[root@xt ~]# vgextend myvg /dev/sdb2
  Volume group "myvg" successfully extended
[root@xt ~]# vgs
  VG   #PV #LV #SN Attr   VSize VFree
  myvg   2   0   0 wz--n- 1.99g 1.99g
[root@xt ~]# vgdisplay 
  --- Volume group ---
  VG Name               myvg
  System ID             
  Format                lvm2
  Metadata Areas        2
  Metadata Sequence No  2
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                2
  Act PV                2
  VG Size               1.99 GiB
  PE Size               4.00 MiB
  Total PE              510
  Alloc PE / Size       0 / 0   
  Free  PE / Size       510 / 1.99 GiB
  VG UUID               b5t7N7-xMBg-5O8w-xS5m-2OTV-IpBd-98mR5w
===================================================
[root@xt ~]# lvcreate -L 200m --name mylv myvg 
  Logical volume "mylv" created.
[root@xt ~]# lvs
  LV   VG   Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  mylv myvg -wi-a----- 200.00m                                                    
[root@xt ~]# lvdisplay 
  --- Logical volume ---
  LV Path                /dev/myvg/mylv
  LV Name                mylv
  VG Name                myvg
  LV UUID                PelWrs-D13Q-8btE-q0RV-tFsA-6miC-FTqwns
  LV Write Access        read/write
  LV Creation host, time xt.com, 2019-01-11 22:24:56 +0800
  LV Status              available
  # open                 0
  LV Size                200.00 MiB
  Current LE             50
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:0
=================================================
[root@xt ~]# mkfs.ext4 /dev/myvg/mylv 
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=1024 (log=0)
Fragment size=1024 (log=0)
Stride=0 blocks, Stripe width=0 blocks
51200 inodes, 204800 blocks
10240 blocks (5.00%) reserved for the super user
First data block=1
Maximum filesystem blocks=33816576
25 block groups
8192 blocks per group, 8192 fragments per group
2048 inodes per group
Superblock backups stored on blocks: 
    8193, 24577, 40961, 57345, 73729

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done 

[root@xt ~]# mount /dev/myvg/mylv /mnt
[root@xt ~]# df -h
Filesystem             Size  Used Avail Use% Mounted on
/dev/sda1               10G  4.0G  6.1G  40% /
devtmpfs               984M     0  984M   0% /dev
tmpfs                  993M     0  993M   0% /dev/shm
tmpfs                  993M  8.7M  985M   1% /run
tmpfs                  993M     0  993M   0% /sys/fs/cgroup
/dev/sda3              256M   83M  173M  33% /boot
tmpfs                  199M     0  199M   0% /run/user/0
/dev/mapper/myvg-mylv  190M  1.6M  175M   1% /mnt
==========================================
//Simulated data migration: the same vg is required
[root@xt mnt]# mount /dev/myvg/mylv /mnt
[root@xt mnt]# cp -r /tmp/ /mnt/
[root@xt tmp]# pvs
  PV         VG   Fmt  Attr PSize    PFree    
  /dev/sdb1  myvg lvm2 a--  1020.00m  820.00m #The data is mainly in sdb1, and the capacity of migration to the target is larger than the existing one
  /dev/sdb2  myvg lvm2 a--  1020.00m 1020.00m
[root@xt tmp]# pvmove /dev/sdb1 /dev/sdb2
  /dev/sdb1: Moved: 4.00%
  /dev/sdb1: Moved: 100.00%
[root@xt tmp]# pvs
  PV         VG   Fmt  Attr PSize    PFree   
  /dev/sdb1  myvg lvm2 a--  1020.00m 1020.00m
  /dev/sdb2  myvg lvm2 a--  1020.00m  820.00m
[root@xt tmp]# vgreduce myvg  /dev/sdb1
  Removed "/dev/sdb1" from volume group "myvg"
[root@xt tmp]# pvs
  PV         VG   Fmt  Attr PSize    PFree  
  /dev/sdb1       lvm2 ---     1.00g   1.00g
  /dev/sdb2  myvg lvm2 a--  1020.00m 820.00m
[root@xt tmp]# vgs
  VG   #PV #LV #SN Attr   VSize    VFree  
  myvg   1   1   0 wz--n- 1020.00m 820.00m

Tags: Linux Fragment

Posted on Tue, 03 Dec 2019 03:57:35 -0800 by greggustin