debian9 部署openstack+ ceph

阿凡达2018-08-14 10:37

debian9 openstack搭建

  • debian8 升级 debian9

      aptitude update && sudo aptitude upgrade
      sed s/jessie/stretch/ /etc/apt/sources.list | tee /etc/apt/sources.list
      aptitude update && aptitude dist-upgrade
      reboot
    
  • debian9 官方底层软件版本

      root@debian9:~/ceph# virsh version
      Compiled against library: libvirt 3.0.0
      Using library: libvirt 3.0.0
      Using API: QEMU 3.0.0
      Running hypervisor: QEMU 2.8.0
    
      root@debian9:~/ceph# ceph -v
      ceph version 10.2.5
    
  • 加入到之前的devstack环境

      sudo aptitude default-libmysqlclient-dev libxml2-dev libxslt1-dev libxslt1.1  libsqlite3-dev
    
      pip install 'pbr==0.9.0'
    
      sudo aptitude install libvirt-clients libvirt-daemon libvirt-daemon-system libvirt0 python-libvirt
    
      root@debian9:/opt/stack/cinder/bin# cp * /usr/local/bin/
    
      从之前的devstack ubuntu中,
      ubuntu@devstack-ntse2:~$ scp -r /usr/local/bin/nova* root@debian9:/usr/local/bin/
      ubuntu@devstack-ntse2:~$ scp -r /etc/rsync/ root@debian9:/etc/
    
      nova,cinder,novaclient,cinderclient源码pip install安装。
    

Deploy ceph

Reference

Add mon


aptitude install ceph ceph-common librados2 librbd1 librbd-dev librados-dev python-ceph python-rbd python-rados

root@debian9:/opt/stack/ceph# cat /etc/ceph/ceph.conf
[global]
fsid = 38a3cd02-0d3b-11e7-9332-778da17f28b5
mon_initial_members = debian9
public_network = 10.166.224.58/22
mon_host = 10.166.224.58
osd journal size = 1024
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
osd max object name len = 256
osd max object namespace len = 64


ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add debian9 10.166.224.58 --fsid 38a3cd02-0d3b-11e7-9332-778da17f28b5 /tmp/monmap
mkdir /var/lib/ceph/mon/ceph-debian9
ceph-mon --mkfs -i debian9 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
cp ceph.conf /etc/ceph/
touch /var/lib/ceph/mon/ceph-debian9/done



osd init failed (36) File name too long 问题处理:
http://www.linuxidc.com/Linux/2017-03/141578.htm


ceph命令执行出错(http://tracker.ceph.com/issues/11388 bug)处理
wget https://raw.githubusercontent.com/ceph/ceph/jewel/src/pybind/ceph_
cp ceph_argparse.py /usr/bin/

ceph -s

Start mon

ceph-mon -i debian9 --pid-file /var/run/ceph/mon.debian9.pid -c /etc/ceph/ceph.conf --cluster ceph

Add osd

  ceph osd create 0
  mkdir /var/lib/ceph/osd/ceph-0
  ceph osd create 1
  mkdir /var/lib/ceph/osd/ceph-1
  fdisk -l
  mkfs.ext4 /dev/vdc
  mkfs.ext4 /dev/vdd
  mount -o user_xattr /dev/vdc /var/lib/ceph/osd/ceph-0
  mount -o user_xattr /dev/vdd /var/lib/ceph/osd/ceph-1
  ceph-osd -i 0 --mkfs --mkkey 
  ceph-osd -i 1 --mkfs --mkkey 
  ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring
  ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-1/keyring

Start osd

ceph --cluster ceph osd crush add-bucket debian9 host
ceph osd crush move debian9 root=default
ceph osd crush add osd.0 1.0 host=debian9
ceph osd crush add osd.1 1.0 host=debian9

sudo touch /var/lib/ceph/osd/ceph-0/sysvinit
sudo touch /var/lib/ceph/osd/ceph-1/sysvinit
service ceph start ceph.0
service ceph start ceph.1

ceph-osd -i 0 --pid-file /var/run/ceph/osd.0.pid -c /etc/ceph/ceph.conf --cluster ceph
ceph-osd -i 1 --pid-file /var/run/ceph/osd.1.pid -c /etc/ceph/ceph.conf --cluster ceph
ceph active+undersized+degraded 问题处理

ceph --cluster ceph osd crush add-bucket debian9-0 host
ceph --cluster ceph osd crush add-bucket debian9-1 host
ceph osd tree
ceph osd crush move debian9-0 root=default
ceph osd crush move debian9-1 root=default
ceph osd tree
ceph osd crush set 0 1.0 host=debian9-0
ceph osd crush set 1 1.0 host=debian9-1


ceph osd pool create vms 64 64
ceph osd pool create images 64 64
ceph osd pool create volumes 64 64

debian9 ceph系统盘问题

root@debian9:/etc/libvirt# qemu-system-x86_64 -drive format=?
Supported formats: blkdebug blkreplay blkverify bochs cloop dmg file ftp ftps gluster host_cdrom host_device http https iscsi iser luks nbd nfs null-aio null-co parallels qcow qcow2 qed quorum raw rbd replication sheepdog ssh vdi vhdx vmdk vpc vvfat
root@debian9:/etc/libvirt# qemu-system-x86_64 -name instance-00000121 -S -machine pc-i440fx-2.5,accel=tcg,usb=off -cpu SandyBridge,+erms,+smep,+fsgsbase,+pdpe1gb,+hypervisor,+rdrand,+f16c,+osxsave,+pcid,+ss,+vme -m 512 -realtime mlock=off -smp 1,sockets=1,cores=1,threads=1 -uuid cc09639f-3c9f-446e-8f98-45dff92d9b85  -nographic -no-user-config -nodefaults  -drive file=rbd:vms/e7dd6c1e-b682-4e14-abe7-2f8e067fdbda_disk:auth_supported=none:mon_host=10.166.224.58\:6789,format=raw,if=none,id=drive-virtio-disk0,cache=none
qemu-system-x86_64: -drive file=rbd:vms/e7dd6c1e-b682-4e14-abe7-2f8e067fdbda_disk:auth_supported=none:mon_host=10.166.224.58:6789,format=raw,if=none,id=drive-virtio-disk0,cache=none: Unknown protocol 'rbd'


新版的要求安装 qemu-block-extra包来解决。(https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1495895)

debian9 挂ceph盘问题

2017-03-21 11:10:37.792 ERROR nova.virt.libvirt.driver [req-81cd3f5a-6d3a-45f4-b4c2-dad4a79e9dd6 admin admin] attach volume failed, errcode: 1, errmsg: internal error: unable to execute QEMU command 'device_add': Property 'virtio-blk-device.drive' can't find value 'drive-virtio-disk4'


解决办法: 新版本的ceph,要求指定ceph_user,否则连接不上ceph cluster。


网易云新用户大礼包:https://www.163yun.com/gift

本文来自网易实践者社区,经作者管强授权发布。