1,ceph 集群状态
[root@cephnode01 my-cluster]# ceph -s
cluster:
id: 09cca6f9-9942-43c8-aa0d-67684be6751b
health: HEALTH_OK
services:
mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03
mgr: cephnode01(active), standbys: cephnode02, cephnode03
osd: 3 osds: 3 up, 3 in
rgw: 1 daemon active
data:
pools: 4 pools, 32 pgs
objects: 187 objects, 1.1 KiB
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs: 32 active+clean
io:
client: 36 KiB/s rd, 0 B/s wr, 35 op/s rd, 23 op/s wr
2,查看ceph 版本
[root@cephnode01 my-cluster]# ceph -v
ceph version 13.2.10 (564bdc4ae87418a232fc901524470e1a0f76d641) mimic (stable)
3,ceph -w //监控集群的实时更改
4,查看集群是否健康
[root@cephnode01 my-cluster]# ceph health
HEALTH_OK
查看详细
ceph health detail
5,查看mon节点的时间同步情况
[root@cephnode01 my-cluster]# ceph time-sync-status
{
"time_skew_status": {
"cephnode01": {
"skew": 0.000000,
"latency": 0.000000,
"health": "HEALTH_OK"
},
"cephnode02": {
"skew": 0.000000,
"latency": 0.001137,
"health": "HEALTH_OK"
},
"cephnode03": {
"skew": 0.002950,
"latency": 0.001264,
"health": "HEALTH_OK"
}
},
"timechecks": {
"epoch": 6,
"round": 28,
"round_status": "finished"
}
}
6,查看osd的使用信息
[root@cephnode01 my-cluster]# ceph osd df
ID CLASS WEIGHT REWEIGHT SIZE USE DATA OMAP META AVAIL %USE VAR PGS
0 hdd 0.00980 1.00000 10 GiB 1.0 GiB 3.8 MiB 0 B 1 GiB 9.0 GiB 10.04 1.00 32
1 hdd 0.00980 1.00000 10 GiB 1.0 GiB 3.8 MiB 0 B 1 GiB 9.0 GiB 10.04 1.00 32
2 hdd 0.00980 1.00000 10 GiB 1.0 GiB 3.8 MiB 0 B 1 GiB 9.0 GiB 10.04 1.00 32
TOTAL 30 GiB 3.0 GiB 11 MiB 0 B 3 GiB 27 GiB 10.04
MIN/MAX VAR: 1.00/1.00 STDDEV: 0
7, ceph osd dump //osd的map信息
8,查看osd.0节点ip和主机名
[root@cephnode01 my-cluster]# ceph osd find osd.0
{
"osd": 0,
"ip": "10.1.234.131:6802/16720",
"osd_fsid": "9864afee-8fcb-47ea-8849-79f2e2fc33aa",
"crush_location": {
"host": "cephnode01",
"root": "default"
}
}
9,查看osd的状态和编号以及分布情况
[root@cephnode01 my-cluster]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.02939 root default
-3 0.00980 host cephnode01
0 hdd 0.00980 osd.0 up 1.00000 1.00000
-5 0.00980 host cephnode02
1 hdd 0.00980 osd.1 up 1.00000 1.00000
-7 0.00980 host cephnode03
2 hdd 0.00980 osd.2 up 1.00000 1.00000
10,查看osd元数据的详细信息
[root@cephnode01 my-cluster]# ceph osd metadata 0
{
"id": 0,
"arch": "x86_64",
"back_addr": "10.1.234.131:6803/16720",
"back_iface": "eth0",
"bluefs": "1",
"bluefs_single_shared_device": "1",
"bluestore_bdev_access_mode": "blk",
"bluestore_bdev_block_size": "4096",
"bluestore_bdev_dev": "253:2",
"bluestore_bdev_dev_node": "dm-2",
"bluestore_bdev_driver": "KernelDevice",
"bluestore_bdev_model": "",
"bluestore_bdev_partition_path": "/dev/dm-2",
"bluestore_bdev_rotational": "1",
"bluestore_bdev_size": "10733223936",
"bluestore_bdev_type": "hdd",
"ceph_release": "mimic",
"ceph_version": "ceph version 13.2.10 (564bdc4ae87418a232fc901524470e1a0f76d641) mimic (stable)",
"ceph_version_short": "13.2.10",
"cpu": "Intel Core Processor (Broadwell, IBRS)",
"default_device_class": "hdd",
"devices": "dm-2,vdc",
"distro": "centos",
"distro_description": "CentOS Linux 7 (Core)",
"distro_version": "7",
"front_addr": "10.1.234.131:6802/16720",
"front_iface": "eth0",
"hb_back_addr": "10.1.234.131:6804/16720",
"hb_front_addr": "10.1.234.131:6805/16720",
"hostname": "cephnode01",
"journal_rotational": "1",
"kernel_description": "#1 SMP Thu Nov 8 23:39:32 UTC 2018",
"kernel_version": "3.10.0-957.el7.x86_64",
"mem_swap_kb": "2097148",
"mem_total_kb": "3880180",
"os": "Linux",
"osd_data": "/var/lib/ceph/osd/ceph-0",
"osd_objectstore": "bluestore",
"rotational": "1"
}
11,存放所有的sock
[root@cephnode01 my-cluster]# ls /var/run/ceph/
ceph-client.rgw.cephnode01.16905.93930511736832.asok ceph-mds.cephnode01.asok ceph-mgr.cephnode01.asok ceph-mon.cephnode01.asok ceph-osd.0.asok
12,运行状态导出集群monmap(集群正常时确认集群monIP和进程名)
[root@cephnode01 my-cluster]# ceph mon getmap -o /mnt/monmap
got monmap epoch 1
[root@cephnode01 my-cluster]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid 09cca6f9-9942-43c8-aa0d-67684be6751b
last_changed 2020-09-04 09:34:55.084532
created 2020-09-04 09:34:55.084532
0: 10.1.234.131:6789/0 mon.cephnode01
1: 10.1.234.132:6789/0 mon.cephnode02
2: 10.1.234.133:6789/0 mon.cephnode03
13,查看当前集群主mon
[root@cephnode01 my-cluster]# ceph quorum_status -f json-pretty|grep 'leader'
"quorum_leader_name": "cephnode01",
14,查看mon状态
[root@cephnode01 my-cluster]# ceph mon stat
e1: 3 mons at {cephnode01=10.1.234.131:6789/0,cephnode02=10.1.234.132:6789/0,cephnode03=10.1.234.133:6789/0}, election epoch 6, leader 0 cephnode01, quorum 0,1,2 cephnode01,cephnode02,cephnode03
15,查看osd的状态
[root@cephnode01 my-cluster]# ceph osd stat
3 osds: 3 up, 3 in; epoch: e28
16,ceph osd dump //查看osd的map信息
17,查看集群中的存储池名称
[root@cephnode01 my-cluster]# ceph osd pool ls
.rgw.root
default.rgw.control
default.rgw.meta
default.rgw.log
18,ceph osd pool set mytest size 3 //可以修改mytest池的副本数为3
19,ceph osd pool ls detail //查看池的的详细信息
20,查看池的IO情况
[root@cephnode01 my-cluster]# ceph osd pool stats
pool .rgw.root id 1
nothing is going on
pool default.rgw.control id 2
nothing is going on
pool default.rgw.meta id 3
nothing is going on
pool default.rgw.log id 4
client io 27 KiB/s rd, 0 B/s wr, 26 op/s rd, 17 op/s wr
ceph pg dump //查看pg的详细信息
ceph pg map 1.6c //查看单个pg和osd的映射信息
ceph pg {pg-id} query //获取pg的详细信息