Ceph add new nodes

  • Category: 電腦相關
  • Last Updated: Thursday, 13 July 2017 11:59
  • Published: Wednesday, 12 July 2017 10:36
  • Written by sam

新加osd,透過ceph-deploy

[root@ceph1 ~]# vi /etc/hosts
10.0.252.160    px160
10.0.252.117    ceph1
10.0.252.118    ceph2
10.0.252.119    ceph3
10.0.252.161    px161
10.0.252.162    px162
10.0.252.163    px163

adduser

root@px160:~# adduser sam
root@px160:/etc# apt-get install sudo
root@px160:/etc# cat << EOF >/etc/sudoers.d/sam
sam ALL = (root) NOPASSWD:ALL
Defaults:sam !requiretty
EOF
oot@px160:/etc# chmod 440 /etc/sudoers.d/sam

一樣拿本機的第二顆來當osd

 

root@px160:/# fdisk -l /dev/sdb
Disk /dev/sdb: 558.4 GiB, 599550590976 bytes, 1170997248 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: AFE7A5CD-72F8-D440-AB54-14D475499B4E

Device          Start        End    Sectors   Size Type
/dev/sdb1        2048 1170978815 1170976768 558.4G Solaris /usr &; Apple ZFS
/dev/sdb9  1170978816 1170995199      16384     8M Solaris reserved 1

root@px160:/# apt-get install parted
root@px160:/# parted -s /dev/sdb mklabel gpt mkpart primary xfs 0% 100%
root@px160:/# mkfs.xfs /dev/sdb -f
meta-data=/dev/sdb               isize=512    agcount=4, agsize=36593664 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=0, rmapbt=0, reflink=0
data     =                       bsize=4096   blocks=146374656, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=71472, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
root@px160:/# blkid -o value -s TYPE /dev/sdb
xfs

更新一下ceph

[global]
fsid = 587284b0-c1fd-4c66-bdf2-cb561ec511be
mon_initial_members = ceph1, ceph2, ceph3, ceph4, ceph5, ceph6, ceph7
mon_host = 10.0.252.117,10.0.252.118,10.0.252.119,10.0.252.160,10.0.252.161,10.0.252.162,10.0.252.163
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network = 10.0.252.0/24
[client.rgw.ceph1]
rgw_frontends = "civetweb port=80"
[client.rgw.ceph2]
rgw_frontends = "civetweb port=80"
[client.rgw.ceph3]
rgw_frontends = "civetweb port=80"
[client.rgw.ceph4]
rgw_frontends = "civetweb port=80"
[client.rgw.ceph5]
rgw_frontends = "civetweb port=80"
[client.rgw.ceph6]
rgw_frontends = "civetweb port=80"
[client.rgw.ceph7]
rgw_frontends = "civetweb port=80"
osd_journal_size = 10000
osd_pool_default_size = 3
osd_pool_default_min_size = 3
osd_crush_chooseleaf_type = 1
osd_crush_update_on_start = true
max_open_files = 131072
osd pool default pg num = 128
osd pool default pgp num = 128
mon_pg_warn_max_per_osd = 0
[mgr]
mgr modules = dashboard

因為新加入的節點是debian 9.0

[sam@ceph1 ceph]$ ceph --version
ceph version 10.2.8 (f5b1f1fd7c0be0506ba73502a675de9d048b744e)
root@px160:/etc/ceph# ceph --version
ceph version 10.2.5 (c461ee19ecbc0c5c330aca20f7392c9a00730367)
相去不遠…騙它一下
root@px163:/home# echo "8.0" > /etc/debian_version
[sam@ceph1 ceph]$ ceph-deploy --overwrite-conf config push ceph4 ceph5 ceph6 ceph7 --sync config

誠信為本…

手動加osd好了

root@px161:/etc/ceph# mkdir -p /var/lib/ceph/osd/ceph-4
root@px161:/etc/ceph# mount /dev/sdb /var/lib/ceph/osd/ceph-4
root@px161:/etc/ceph# mount -o remount,noatime /var/lib/ceph/osd/ceph-4
root@px161:/etc/ceph# ceph osd create 4
4
root@px161:/etc/ceph# ceph-osd -i 4 --mkfs --mkkey
2017-07-13 10:32:39.518410 7f8049f20900 -1 journal FileJournal::_open: disabling aio for non-block journal.  Use journal_force_aio to force use of aio anyway
2017-07-13 10:32:39.644246 7f8049f20900 -1 journal FileJournal::_open: disabling aio for non-block journal.  Use journal_force_aio to force use of aio anyway
2017-07-13 10:32:39.644880 7f8049f20900 -1 filestore(/var/lib/ceph/osd/ceph-4) could not find #-1:7b3f43c4:::osd_superblock:0# in index: (2) No such file or directory
2017-07-13 10:32:39.958956 7f8049f20900 -1 created object store /var/lib/ceph/osd/ceph-4 for osd.4 fsid 587284b0-c1fd-4c66-bdf2-cb561ec511be
2017-07-13 10:32:39.959033 7f8049f20900 -1 auth: error reading file: /var/lib/ceph/osd/ceph-4/keyring: can't open /var/lib/ceph/osd/ceph-4/keyring: (2) No such file or directory
2017-07-13 10:32:39.959272 7f8049f20900 -1 created new key in keyring /var/lib/ceph/osd/ceph-4/keyring
root@px161:/etc/ceph# ceph auth add osd.4 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-4/keyring
added key for osd.4
oot@px161:/etc/ceph# ceph osd crush add-bucket px161 host
added bucket px161 type host to crush map
root@px161:/etc/ceph# ceph osd crush move px161 root=default
moved item id -7 name 'px161' to location {root=default} in crush map
root@px161:/etc/ceph# ceph osd crush add osd.4 2.9 host=px161
add item id 4 name 'osd.4' weight 2.9 at location {host=px161} to crush map
root@px161:/# systemctl restart ceph-osd@4
[root@ceph3 /]# ceph osd tree ##檢查、完成##
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 8.79999 root default
-2 1.00000     host ceph1
 0 1.00000         osd.0       up  1.00000          1.00000
-3 1.00000     host ceph2
 1 1.00000         osd.1       up  1.00000          1.00000
-4 1.00000     host ceph3
 2 1.00000         osd.2       up  1.00000          1.00000
-5 2.89999     host px160
 3 2.89999         osd.3       up  1.00000          1.00000
-6       0     host ceph4
-7 2.89999     host px161
 4 2.89999         osd.4       up  1.00000          1.00000

搬日誌

[root@ceph3 ~]# ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 5.89999 root default
-2 1.00000     host ceph1
 0 1.00000         osd.0       up  1.00000          1.00000
-3 1.00000     host ceph2
 1 1.00000         osd.1       up  1.00000          1.00000
-4 1.00000     host ceph3
 2 1.00000         osd.2       up  1.00000          1.00000
-5 2.89999     host px160
 3 2.89999         osd.3       up  1.00000          1.00000
-6       0     host ceph4

停止並設定noout

root@px160:/var/lib/ceph/osd/ceph-3# ceph osd set noout
set noout
root@px160:/var/lib/ceph/osd/ceph-3# systemctl stop ceph-osd@3
root@px160:/var/lib/ceph/osd/ceph-3# ceph osd tree
-5 2.89999     host px160
 3 2.89999         osd.3     down  1.00000          1.00000
root@px160:/var/lib/ceph/osd/ceph-3# ceph-osd -i 3 --flush-journal
2017-07-13 09:58:50.140030 7f4fa5677900 -1 journal FileJournal::_open: disabling aio for non-block journal.  Use journal_force_aio to force use of aio anyway
2017-07-13 09:58:50.239504 7f4fa5677900 -1 flushed journal /var/lib/ceph/osd/ceph-3/journal for object store /var/lib/ceph/osd/ceph-3
正來是要作分割區給它,但我沒有,只是搬到另一粒硬碟,將它們分開而已
root@px160:/var/lib/ceph/osd/ceph-3# lsblk ##目前狀態##
NAME                         MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
sda                            8:0    0 136.1G  0 disk
├─sda1                         8:1    0     1M  0 part
├─sda2                         8:2    0   256M  0 part
└─sda3                         8:3    0 135.9G  0 part
  ├─pve-swap                 253:0    0     8G  0 lvm  [SWAP]
  ├─pve-root                 253:1    0  33.8G  0 lvm  /
  ├─pve-data_tmeta           253:2    0    80M  0 lvm
  │ └─pve-data-tpool         253:4    0  78.1G  0 lvm
  │   ├─pve-data             253:5    0  78.1G  0 lvm
  │   └─pve-vm--100--disk--1 253:6    0     8G  0 lvm
  └─pve-data_tdata           253:3    0  78.1G  0 lvm
    └─pve-data-tpool         253:4    0  78.1G  0 lvm
      ├─pve-data             253:5    0  78.1G  0 lvm
      └─pve-vm--100--disk--1 253:6    0     8G  0 lvm
sdb                            8:16   0 558.4G  0 disk /var/lib/ceph/osd/ceph-3
sr0                           11:0    1  1024M  0 rom
root@px160:/# touch journal
root@px160:/var/lib/ceph/osd/ceph-3# rm journal
root@px160:/var/lib/ceph/osd/ceph-3# ln -s /journal /var/lib/ceph/osd/ceph-3/journal
root@px160:/var/lib/ceph/osd/ceph-3# ceph-osd -i 3 --mkjournal
2017-07-13 10:02:25.581757 7ff33d010900 -1 journal FileJournal::_open: disabling aio for non-block journal.  Use journal_force_aio to force use of aio anyway
2017-07-13 10:02:25.589825 7ff33d010900 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected fce5ce23-26d7-44e0-837f-df9a0a757008, invalid (someone else's?) journal
2017-07-13 10:02:25.661888 7ff33d010900 -1 created new journal /var/lib/ceph/osd/ceph-3/journal for object store /var/lib/ceph/osd/ceph-3
root@px160:/var/lib/ceph/osd/ceph-3# systemctl restart ceph-osd@3
root@px160:/var/lib/ceph/osd/ceph-3# ceph osd unset noout
[root@ceph3 ~]# ceph osd tree ##finish##
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 5.89999 root default
-2 1.00000     host ceph1
 0 1.00000         osd.0       up  1.00000          1.00000
-3 1.00000     host ceph2
 1 1.00000         osd.1       up  1.00000          1.00000
-4 1.00000     host ceph3
 2 1.00000         osd.2       up  1.00000          1.00000
-5 2.89999     host px160
 3 2.89999         osd.3       up  1.00000          1.00000

標準作法,連結至分割區

root@px160:/var/lib/ceph/osd/ceph-3# ls -l /dev/disk/by-partuuid/
total 0
lrwxrwxrwx 1 root root 10 Jul 12 11:18 30785667-6605-4414-a771-f877dd74863c -> ../../sda2
lrwxrwxrwx 1 root root 10 Jul 12 11:18 dcdc78b5-8dbc-4861-86ff-3e218e28d3db -> ../../sda3
lrwxrwxrwx 1 root root 10 Jul 12 11:18 f64dc1ba-8f67-4e06-8bd9-ab4d673aca3a -> ../../sda1

手動移除osd

[root@ceph3 /]#ceph osd out 3
[root@ceph3 /]#systemctl stop ceph-osd@3
[root@ceph3 /]#ceph osd crush remove osd.3
[root@ceph3 /]#ceph auth del osd.3
[root@ceph3 /]#ceph osd rm 3