设置ceph,noout标志

ceph osd set noout

查看分区起始位置

[root@nfdw2-yn-tstack-osd03 ceph-80]# parted -l
Model: TOSHIBA AL15SEB18EQ (scsi)

Model: MSCC LOGICAL VOLUME (scsi)
Disk /dev/sdx: 960GB
Sector size (logical/physical): 512B/4096B
Partition Table: gpt
Disk Flags: 

Number  Start   End     Size   File system  Name                  Flags
 1      1049kB  525MB   524MB  fat16        EFI System Partition  boot
 2      525MB   1050MB  524MB  ext4
 3      1050MB  799GB   798GB  ext4
 4      799GB   960GB   161GB  ext4


Model: NVMe Device (nvme)
Disk /dev/nvme0n1: 2000GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags: 

Number  Start   End     Size    File system  Name          Flags
 1      1049kB  21.5GB  21.5GB               ceph journal
 2      21.5GB  43.0GB  21.5GB               ceph journal
 3      43.0GB  64.4GB  21.5GB               ceph journal
 4      64.4GB  85.9GB  21.5GB               ceph journal
 5      85.9GB  107GB   21.5GB               ceph journal
 6      107GB   129GB   21.5GB               ceph journal
 7      129GB   150GB   21.5GB               ceph journal
 8      150GB   172GB   21.5GB               ceph journal
 9      172GB   193GB   21.5GB               ceph journal
10      193GB   215GB   21.5GB               ceph journal
11      215GB   236GB   21.5GB               ceph journal
12      236GB   258GB   21.5GB               ceph journal
13      258GB   279GB   21.5GB               ceph journal
14      279GB   301GB   21.5GB               ceph journal
15      301GB   322GB   21.5GB               ceph journal
16      322GB   344GB   21.5GB               ceph journal


[root@nfdw2-yn-tstack-osd03 ceph-80]# 

创建分区表

# 为新的journal磁盘创建分区表,例如盘符为sdf
parted -s /dev/sdf mklabel gpt

日志盘分区

[root@nfdw2-yn-tstack-mon01 ~]# parted -s /dev/nvme0n1 mkpart xfs 85.9GB  107GB
[root@nfdw2-yn-tstack-mon01 ~]# 

ceph日志盘个别osd没有挂载

[root@nfdw2-yn-tstack-mon01 ceph-3]# ceph-disk list
/dev/loop0 other, unknown
/dev/nvme0n1 :
 /dev/nvme0n1p1 other, ebd0a0a2-b9e5-4433-87c0-68b6b72699c7
 /dev/nvme0n1p2 other, ebd0a0a2-b9e5-4433-87c0-68b6b72699c7
 /dev/nvme0n1p3 other, ebd0a0a2-b9e5-4433-87c0-68b6b72699c7
 /dev/nvme0n1p4 other, ebd0a0a2-b9e5-4433-87c0-68b6b72699c7
/dev/sda :
 /dev/sda1 ceph data, active, cluster ceph, osd.3
/dev/sdb :
 /dev/sdb1 ceph data, active, cluster ceph, osd.11
/dev/sdc :
 /dev/sdc1 ceph data, active, cluster ceph, osd.20
/dev/sdd :
 /dev/sdd1 ceph data, active, cluster ceph, osd.25
/dev/sde :
 /dev/sde1 ceph data, active, cluster ceph, osd.32, journal /dev/sdr1
/dev/sdf :
 /dev/sdf1 ceph data, active, cluster ceph, osd.38, journal /dev/sdr2
/dev/sdg :
 /dev/sdg1 ceph data, active, cluster ceph, osd.46, journal /dev/sdr3
/dev/sdh :
 /dev/sdh1 ceph data, active, cluster ceph, osd.50, journal /dev/sdr4
/dev/sdi :
 /dev/sdi1 ceph data, active, cluster ceph, osd.55, journal /dev/sds1
/dev/sdj :
 /dev/sdj1 ceph data, active, cluster ceph, osd.60, journal /dev/sds2
/dev/sdk :
 /dev/sdk1 ceph data, active, cluster ceph, osd.64, journal /dev/sds3
/dev/sdl :
 /dev/sdl1 ceph data, active, cluster ceph, osd.66, journal /dev/sds4
/dev/sdm :
 /dev/sdm1 ceph data, active, cluster ceph, osd.67, journal /dev/sdt1
/dev/sdn :
 /dev/sdn1 ceph data, active, cluster ceph, osd.72, journal /dev/sdt2
/dev/sdo :
 /dev/sdo1 ceph data, active, cluster ceph, osd.75, journal /dev/sdt3
/dev/sdp :
 /dev/sdp1 ceph data, active, cluster ceph, osd.76, journal /dev/sdt4
/dev/sdq :
 /dev/sdq1 other
 /dev/sdq2 other
 /dev/sdq3 other
 /dev/sdq4 other
/dev/sdr :
 /dev/sdr1 ceph journal, for /dev/sde1
 /dev/sdr2 ceph journal, for /dev/sdf1
 /dev/sdr3 ceph journal, for /dev/sdg1
 /dev/sdr4 ceph journal, for /dev/sdh1
/dev/sds :
 /dev/sds1 ceph journal, for /dev/sdi1
 /dev/sds2 ceph journal, for /dev/sdj1
 /dev/sds3 ceph journal, for /dev/sdk1
 /dev/sds4 ceph journal, for /dev/sdl1
/dev/sdt :
 /dev/sdt1 ceph journal, for /dev/sdm1
 /dev/sdt2 ceph journal, for /dev/sdn1
 /dev/sdt3 ceph journal, for /dev/sdo1
 /dev/sdt4 ceph journal, for /dev/sdp1
/dev/sdu other, unknown
/dev/sdv other, unknown
/dev/sdw other, unknown
/dev/sdx :
 /dev/sdx1 other, vfat, mounted on /boot/efi
 /dev/sdx2 other, ext4, mounted on /boot
 /dev/sdx3 other, ext4, mounted on /
 /dev/sdx4 other, ext4, mounted on /var/log
You have new mail in /var/spool/mail/root
[root@nfdw2-yn-tstack-mon01 ceph-3]# 

更换日志盘

[root@nfdw2-yn-tstack-mon01 ~]# ceph-osd -i 25 --flush-journal
2020-12-19 14:07:28.416005 7fb1f2225d80 -1 journal do_read_entry(1294585856): bad header magic
2020-12-19 14:07:28.416024 7fb1f2225d80 -1 journal do_read_entry(1294585856): bad header magic
2020-12-19 14:07:28.441435 7fb1f2225d80 -1 flushed journal /var/lib/ceph/osd/ceph-25/journal for object store /var/lib/ceph/osd/ceph-25
[root@nfdw2-yn-tstack-mon01 ~]# rm -f /var/lib/ceph/osd/ceph-25/journal
[root@nfdw2-yn-tstack-mon01 ~]# ll /dev/disk/by-partuuid/|grep nvme0n1p4
lrwxrwxrwx 1 root root 15 Dec 19 14:07 e91c938d-ba2a-4c8c-8c4c-4abe5371a39b -> ../../nvme0n1p4
[root@nfdw2-yn-tstack-mon01 ~]# ls -l /dev/disk/by-partuuid/|grep nvme0n1p4
lrwxrwxrwx 1 root root 15 Dec 19 14:07 e91c938d-ba2a-4c8c-8c4c-4abe5371a39b -> ../../nvme0n1p4
[root@nfdw2-yn-tstack-mon01 ~]# ln -s /dev/disk/by-partuuid/e91c938d-ba2a-4c8c-8c4c-4abe5371a39b /var/lib/ceph/osd/ceph-25/journal
me0n1p4 -i 25You have new mail in /var/spool/mail/root
[root@nfdw2-yn-tstack-mon01 ~]# echo e91c938d-ba2a-4c8c-8c4c-4abe5371a39b > /var/lib/ceph/osd/ceph-25/journal_uuid 
[root@nfdw2-yn-tstack-mon01 ~]# chown ceph:ceph /dev/nvme0n1p4
[root@nfdw2-yn-tstack-mon01 ~]# ceph-osd --mkjournal --osd-journal /dev/nvme0n1p4 -i 25
2020-12-19 14:08:30.136318 7f336be46d80 -1 created new journal /dev/nvme0n1p4 for object store /var/lib/ceph/osd/ceph-25
[root@nfdw2-yn-tstack-mon01 ~]#

注:通过ceph -s查看集群状态全部为active+clean后,再执行ceph osd unset noout