OpenStack-Rocky版单机部署
1.安装环境:
操作系统:centos 7.5地址规划:192.168.254.10 openstack-server架构:所有组件(包括控制节点、计算节点、网络节点)全部安装一个节点
2.系统配置:
[root@localhost ~]# hostname openstack-server[root@openstack-server ~]# vim /etc/hostnameopenstack-server[root@openstack-server ~]# vim /etc/hosts192.168.254.10 openstack-server openstack-server.smoke.com[root@openstack-server ~]# ifconfigenp4s0: flags=4163 mtu 1500 inet 192.168.254.10 netmask 255.255.255.224 broadcast 192.168.254.31 inet6 fe80::119a:26d0:b028:74d0 prefixlen 64 scopeid 0x20 ether 00:e0:4c:0f:ff:a9 txqueuelen 1000 (Ethernet) RX packets 42277 bytes 39441483 (37.6 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 14912 bytes 1016294 (992.4 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0lo: flags=73 mtu 65536 inet 127.0.0.1 netmask 255.0.0.0 inet6 ::1 prefixlen 128 scopeid 0x10 loop txqueuelen 1000 (Local Loopback) RX packets 32 bytes 2792 (2.7 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 32 bytes 2792 (2.7 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
3.安装时间同步服务NTP:[root@openstack-server ~]# yum install chrony
修改chrony服务配置:
[root@openstack-server ~]# vim /etc/chrony.confallow 192.168.254.0/27
启动chrony服务:
[root@openstack-server ~]# systemctl enable chronyd.service[root@openstack-server ~]# systemctl start chronyd.service
设置时区:[root@openstack-server ~]# timedatectl set-timezone Asia/Shanghai
4.安装阿里的OpenStack源:
[root@openstack-server ~]# vim /etc/yum.repos.d/OpenStack-Rocky.repo[openstack-rocky]name=openstack-rockybaseurl=https://mirrors.aliyun.com/centos/7.5.1804/cloud/x86_64/openstack-rocky/gpgcheck=0gpgkey=https://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7repo_gpgcheck=0enabled=1
[root@openstack-server ~]# yum clean all[root@openstack-server ~]# yum makecache
还可以使用官方yum源方式:
[root@openstack-server ~]# yum install centos-release-openstack-rocky[root@openstack-server ~]# yum install https://rdoproject.org/repos/rdo-release.rpm
升级软件包:[root@openstack-server ~]# yum -y upgrade
安装OpenStack client:[root@openstack-server ~]# yum -y install python-openstackclient
安装openstack-selinux:[root@openstack-server ~]# yum -y install openstack-selinux
5.安装Mariadb:[root@openstack-server ~]# yum -y install mariadb mariadb-server python2-PyMySQL
修改Mariadb配置文件:
[root@openstack-server ~]# mv /etc/my.cnf /etc/my.cnf.bak[root@openstack-server ~]# cp /usr/share/mariadb/my-large.cnf /etc/my.cnf
[root@openstack-server ~]# vim /etc/my.cnf[mysqld]bind-address = 192.168.254.10default-storage-engine = innodbinnodb_file_per_table = onmax_connections = 4096collation-server = utf8_general_cicharacter-set-server = utf8
启动Mariadb服务:
[root@openstack-server ~]# systemctl enable mariadb.service[root@openstack-server ~]# systemctl start mariadb.service
初始化Mariadb:[root@openstack-server ~]# mysql_secure_installation
(按提示操作设置root密码)
6.安装rabbitmq-server:[root@openstack-server ~]# yum -y install rabbitmq-server
启动rabbitmq-server服务:
[root@openstack-server ~]# systemctl enable rabbitmq-server.service[root@openstack-server ~]# systemctl start rabbitmq-server.service
添加openstack用户:
[root@openstack-server ~]# rabbitmqctl add_user openstack openstack[root@openstack-server ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
开启web管理插件:[root@openstack-server ~]# rabbitmq-plugins enable rabbitmq_management
使用web访问rabbitmq-server(默认账号guest,密码guest):
设置openstack用户Tags为administrator(点击Admin -- openstack):
点击Update this user:
查看设置:
7.安装memcached:[root@openstack-server ~]# yum -y install memcached python-memcached
修改memcached服务配置:
[root@openstack-server ~]# vim /etc/sysconfig/memcachedPORT="11211"USER="memcached"MAXCONN="1024"CACHESIZE="64"OPTIONS="-l 0.0.0.0,::1"
启动memcached服务:
[root@openstack-server ~]# systemctl enable memcached.service[root@openstack-server ~]# systemctl start memcached.service
8.安装etcd服务:[root@openstack-server ~]# yum -y install etcd
修改etcd服务配置:
[root@openstack-server ~]# vim /etc/etcd/etcd.conf#[Member]ETCD_DATA_DIR="/var/lib/etcd/default.etcd"ETCD_LISTEN_PEER_URLS="http://192.168.254.10:2380"ETCD_LISTEN_CLIENT_URLS="http://192.168.254.10:2379"ETCD_NAME="openstack-server"#[Clustering]ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.254.10:2380"ETCD_ADVERTISE_CLIENT_URLS="http://192.168.254.10:2379"ETCD_INITIAL_CLUSTER="openstack-server=http://192.168.254.10:2380"ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"ETCD_INITIAL_CLUSTER_STATE="new"
启动etcd服务:
[root@openstack-server ~]# systemctl enable etcd[root@openstack-server ~]# systemctl start etcd
9.安装keystone:
在Mariadb创建keystone库和用户:
[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE keystone;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';"
安装keystone:[root@openstack-server ~]# yum -y install openstack-keystone httpd mod_wsgi
修改keystone服务配置:
[root@openstack-server ~]# vim /etc/keystone/keystone.conf[database]connection = mysql+pymysql://keystone:keystone@openstack-server/keystone[token]provider = fernet
同步数据库:[root@openstack-server ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
初始化Fernet key仓库:
[root@openstack-server ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone[root@openstack-server ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
引导身份服务:
[root@openstack-server ~]# keystone-manage bootstrap --bootstrap-password admin \> --bootstrap-admin-url http://openstack-server:5000/v3/ \> --bootstrap-internal-url http://openstack-server:5000/v3/ \> --bootstrap-public-url http://openstack-server:5000/v3/ \> --bootstrap-region-id RegionOne
修改httpd服务配置:
[root@openstack-server ~]# vim /etc/httpd/conf/httpd.confServerName openstack-server
创建wsgi-keysone配置文件链接:[root@openstack-server ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
启动httpd服务:
[root@openstack-server ~]# systemctl enable httpd.service[root@openstack-server ~]# systemctl start httpd.service
[root@openstack-server ~]# vim admin-openrc.shexport OS_USERNAME=adminexport OS_PASSWORD=adminexport OS_PROJECT_NAME=adminexport OS_USER_DOMAIN_NAME=Defaultexport OS_PROJECT_DOMAIN_NAME=Defaultexport OS_AUTH_URL=http://openstack-server:5000/v3export OS_IDENTITY_API_VERSION=3
创建域,项目,用户,角色:
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack domain create --description "An Example Domain" example[root@openstack-server ~]# openstack project create --domain default \> --description "Service Project" service[root@openstack-server ~]# openstack project create --domain default \> --description "Demo Project" myproject[root@openstack-server ~]# openstack user create --domain default \> --password-prompt myuser[root@openstack-server ~]# openstack role create myrole[root@openstack-server ~]# openstack role add --project myproject --user myuser myrole
验证keystone是否安装成功:
[root@openstack-server ~]# unset OS_AUTH_URL OS_PASSWORD[root@openstack-server ~]# openstack --os-auth-url http://openstack-server:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
创建myuser环境变量:
[root@openstack-server ~]# vim myuser-openrc.shexport OS_USERNAME=myuserexport OS_PASSWORD=myuserexport OS_PROJECT_NAME=myprojectexport OS_USER_DOMAIN_NAME=Defaultexport OS_PROJECT_DOMAIN_NAME=Defaultexport OS_AUTH_URL=http://openstack-server:5000/v3export OS_IDENTITY_API_VERSION=3
使用myuser用户进行测试:
[root@openstack-server ~]# . myuser-openrc.sh[root@openstack-server ~]# openstack --os-auth-url http://openstack-server:5000/v3 \> --os-project-domain-name Default --os-user-domain-name Default \> --os-project-name myproject --os-username myuser token issue
修改用户环境变量脚本:
[root@openstack-server ~]# vim admin-openrc.shexport OS_USERNAME=adminexport OS_PASSWORD=adminexport OS_PROJECT_NAME=adminexport OS_USER_DOMAIN_NAME=Defaultexport OS_PROJECT_DOMAIN_NAME=Defaultexport OS_AUTH_URL=http://openstack-server:5000/v3export OS_IDENTITY_API_VERSION=3export OS_IMAGE_API_VERSION=2
[root@openstack-server ~]# vim myuser-openrc.shexport OS_USERNAME=myuserexport OS_PASSWORD=myuserexport OS_PROJECT_NAME=myprojectexport OS_USER_DOMAIN_NAME=Defaultexport OS_PROJECT_DOMAIN_NAME=Defaultexport OS_AUTH_URL=http://openstack-server:5000/v3export OS_IDENTITY_API_VERSION=3export OS_IMAGE_API_VERSION=2
使用脚本测试:
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack token issue
10.安装glance:
在Mariadb创建glance库和用户:
[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE glance;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
创建glance用户,服务,端点:
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack user create --domain default --password-prompt glance[root@openstack-server ~]# openstack role add --project service --user glance admin[root@openstack-server ~]# openstack service create --name glance \> --description "OpenStack Image" image[root@openstack-server ~]# openstack endpoint create --region RegionOne \> image public http://openstack-server:9292[root@openstack-server ~]# openstack endpoint create --region RegionOne \> image internal http://openstack-server:9292[root@openstack-server ~]# openstack endpoint create --region RegionOne \> image admin http://openstack-server:9292
安装glance:[root@openstack-server ~]# yum -y install openstack-glance
修改glance-api和glance-registry服务配置:
[root@openstack-server ~]# vim /etc/glance/glance-api.conf[database]connection = mysql+pymysql://glance:glance@openstack-server/glance[keystone_authtoken]www_authenticate_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_name = Defaultuser_domain_name = Defaultproject_name = serviceusername = glancepassword = glance[paste_deploy]flavor = keystone[glance_store]stores = file,httpdefault_store = filefilesystem_store_datadir = /var/lib/glance/images
[root@openstack-server ~]# vim /etc/glance/glance-registry.conf[database]connection = mysql+pymysql://glance:glance@openstack-server/glance[keystone_authtoken]www_authenticate_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_name = Defaultuser_domain_name = Defaultproject_name = serviceusername = glancepassword = glance[paste_deploy]flavor = keystone
同步glance数据库:[root@openstack-server ~]# su -s /bin/sh -c "glance-manage db_sync" glance
启动glance-api和glance-registry服务:
[root@openstack-server ~]# systemctl enable openstack-glance-api.service \> openstack-glance-registry.service[root@openstack-server ~]# systemctl start openstack-glance-api.service \> openstack-glance-registry.service
使用sdb1创建lvm用于存储镜像:
[root@openstack-server ~]# fdisk -l /dev/sdb磁盘 /dev/sdb:250.1 GB, 250059350016 字节,488397168 个扇区Units = 扇区 of 1 * 512 = 512 bytes扇区大小(逻辑/物理):512 字节 / 512 字节I/O 大小(最小/最佳):512 字节 / 512 字节磁盘标签类型:dos磁盘标识符:0x441e1e17 设备 Boot Start End Blocks Id System/dev/sdb1 2048 104859647 52428800 8e Linux LVM
[root@openstack-server ~]# pvcreate /dev/sdb1[root@openstack-server ~]# vgcreate glance-vg /dev/sdb1[root@openstack-server ~]# lvcreate -L 50G -n glance-lv glance-vg[root@openstack-server ~]# mkfs.xfs /dev/glance-vg/glance-lv[root@openstack-server ~]# blkid /dev/glance-vg/glance-lv/dev/glance-vg/glance-lv: UUID="072c4d36-7502-484b-b857-357a870dcc87" TYPE="xfs"[root@openstack-server ~]# vim /etc/fstabUUID=072c4d36-7502-484b-b857-357a870dcc87 /var/lib/glance/images/ xfs defaults 0 0[root@openstack-server ~]# mount -a[root@openstack-server ~]# chown -R glance:glance /var/lib/glance/
验证操作:
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img[root@openstack-server ~]# openstack image create "cirros" \> --file cirros-0.4.0-x86_64-disk.img \> --disk-format qcow2 --container-format bare \> --public[root@openstack-server ~]# openstack image list+--------------------------------------+--------+--------+| ID | Name | Status |+--------------------------------------+--------+--------+| 99b186e3-b29f-4366-ab5c-ebf5e53ef262 | cirros | active |+--------------------------------------+--------+--------+
11.安装nova:
在Mariadb创建nova相关库和用户(控制节点):
[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE nova_api;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE nova;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE nova_cell0;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE placement;"
[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \> IDENTIFIED BY 'nova';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \> IDENTIFIED BY 'nova';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \> IDENTIFIED BY 'nova';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \> IDENTIFIED BY 'nova';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \> IDENTIFIED BY 'nova';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \> IDENTIFIED BY 'nova';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \> IDENTIFIED BY 'placement';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \> IDENTIFIED BY 'placement';"
创建nova用户,服务,端点;
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack user create --domain default --password-prompt nova[root@openstack-server ~]# openstack role add --project service --user nova admin[root@openstack-server ~]# openstack service create --name nova \> --description "OpenStack Compute" compute[root@openstack-server ~]# openstack endpoint create --region RegionOne \> compute public http://openstack-server:8774/v2.1[root@openstack-server ~]# openstack endpoint create --region RegionOne \> compute internal http://openstack-server:8774/v2.1[root@openstack-server ~]# openstack endpoint create --region RegionOne \> compute admin http://openstack-server:8774/v2.1
创建placement用户,服务,端点:
[root@openstack-server ~]# openstack user create --domain default --password-prompt placement[root@openstack-server ~]# openstack role add --project service --user placement admin[root@openstack-server ~]# openstack service create --name placement \> --description "Placement API" placement[root@openstack-server ~]# openstack endpoint create --region RegionOne \> placement public http://openstack-server:8778[root@openstack-server ~]# openstack endpoint create --region RegionOne \> placement internal http://openstack-server:8778[root@openstack-server ~]# openstack endpoint create --region RegionOne \> placement admin http://openstack-server:8778
安装nove-api、nova-conductor、nova-console、nova-novncproxy、nova-schedule、nova-placement-api服务(控制节点):
[root@openstack-server ~]# yum -y install openstack-nova-api openstack-nova-conductor \> openstack-nova-console openstack-nova-novncproxy \> openstack-nova-scheduler openstack-nova-placement-api
修改nova服务配置:
[root@openstack-server ~]# vim /etc/nova/nova.conf[DEFAULT]enabled_apis=osapi_compute,metadatatransport_url=rabbit://openstack:openstack@openstack-servermy_ip=192.168.254.10use_neutron=truefirewall_driver=nova.virt.firewall.NoopFirewallDriver[api_database]connection=mysql+pymysql://nova:nova@openstack-server/nova_api[database]connection=mysql+pymysql://nova:nova@openstack-server/nova[placement_database]connection=mysql+pymysql://placement:placement@openstack-server/placement[api]auth_strategy=keystone[keystone_authtoken]auth_url=http://openstack-server:5000/v3memcached_servers=openstack-server:11211auth_type=passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = novapassword = nova[vnc]enabled=trueserver_listen=0.0.0.0server_proxyclient_address=$my_ip[glance]api_servers=http://openstack-server:9292[oslo_concurrency]lock_path=/var/lib/nova/tmp[placement]region_name=RegionOneproject_domain_name = Defaultproject_name = serviceauth_type = passworduser_domain_name = Defaultauth_url = http://openstack-server:5000/v3username = placementpassword = placement
官网文档提示包bug问题,需要修改 /etc/httpd/conf.d/00-nova-placement-api.conf,添加/usr/bin相关内容到文件尾部;
[root@openstack-server ~]# vim /etc/httpd/conf.d/00-nova-placement-api.conf = 2.4> Require all granted Order allow,deny Allow from all
重启httpd服务:[root@openstack-server ~]# systemctl restart httpd
同步nova数据库:
[root@openstack-server ~]# su -s /bin/sh -c "nova-manage api_db sync" nova[root@openstack-server ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova[root@openstack-server ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova28087259-877a-4ff7-b2a3-a4367a1fbd8d[root@openstack-server ~]# su -s /bin/sh -c "nova-manage db sync" nova[root@openstack-server ~]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
启动nova-api、nova-scheduler、nova-conductor、nova-novncproxy服务:
[root@openstack-server ~]# systemctl enable openstack-nova-api.service \> openstack-nova-scheduler.service openstack-nova-conductor.service \> openstack-nova-novncproxy.service[root@openstack-server ~]# systemctl start openstack-nova-api.service \> openstack-nova-scheduler.service openstack-nova-conductor.service \> openstack-nova-novncproxy.service
安装nova-compute(计算节点):[root@openstack-server ~]# yum install openstack-nova-compute
修改nova配置文件:
[root@openstack-server ~]# vim /etc/nova/nova.conf[DEFAULT]enabled_apis=osapi_compute,metadatatransport_url=rabbit://openstack:openstack@openstack-servermy_ip=192.168.254.10use_neutron=truefirewall_driver=nova.virt.firewall.NoopFirewallDriver[api]auth_strategy=keystone[keystone_authtoken]auth_url = http://openstack-server:5000/v3memcached_servers=openstack-server:11211auth_type=passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = novapassword = nova[vnc]enabled=trueserver_listen=0.0.0.0server_proxyclient_address=$my_ipnovncproxy_base_url = http://openstack-server:6080/vnc_auto.html[glance]api_servers=http://openstack-server:9292[oslo_concurrency]lock_path=/var/lib/nova/tmp[placement]region_name=RegionOneproject_domain_name = Defaultproject_name = serviceauth_type = passworduser_domain_name = Defaultauth_url = http://openstack-server:5000/v3username = placementpassword = placement
查看cpu是否支持虚拟化(0代表不支持):
[root@openstack-server ~]# egrep -c '(vmx|svm)' /proc/cpuinfo4
修改虚拟化类型,如果不支持cpu虚拟化使用qemu,如果支持使用kvm:
[root@openstack-server ~]# vim /etc/nova/nova.conf[libvirt]virt_type=kvm
启动nova-compute和libvirtd服务:
[root@openstack-server ~]# systemctl enable libvirtd.service openstack-nova-compute.service[root@openstack-server ~]# systemctl start libvirtd.service openstack-nova-compute.service
将计算节点cell数据库:
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack compute service list --service nova-compute[root@openstack-server ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
编辑nova服务配置:
[root@openstack-server ~]# vim /etc/nova/nova.conf[scheduler]discover_hosts_in_cells_interval=300
验证操作:[root@openstack-server ~]# . admin-openrc.sh
[root@openstack-server ~]# openstack compute service list+----+----------------+------------------+----------+---------+-------+----------------------------+| ID | Binary | Host | Zone | Status | State | Updated At |+----+----------------+------------------+----------+---------+-------+----------------------------+| 1 | nova-conductor | openstack-server | internal | enabled | up | 2018-10-23T13:45:26.000000 || 3 | nova-scheduler | openstack-server | internal | enabled | up | 2018-10-23T13:45:26.000000 || 10 | nova-compute | openstack-server | nova | enabled | up | 2018-10-23T13:45:27.000000 |+----+----------------+------------------+----------+---------+-------+----------------------------+
[root@openstack-server ~]# openstack catalog list+-----------+-----------+-----------------------------------------------+| Name | Type | Endpoints |+-----------+-----------+-----------------------------------------------+| glance | image | RegionOne || | | internal: http://openstack-server:9292 || | | RegionOne || | | public: http://openstack-server:9292 || | | RegionOne || | | admin: http://openstack-server:9292 || | | || keystone | identity | RegionOne || | | admin: http://openstack-server:5000/v3/ || | | RegionOne || | | internal: http://openstack-server:5000/v3/ || | | RegionOne || | | public: http://openstack-server:5000/v3/ || | | || placement | placement | RegionOne || | | public: http://openstack-server:8778 || | | RegionOne || | | admin: http://openstack-server:8778 || | | RegionOne || | | internal: http://openstack-server:8778 || | | || nova | compute | RegionOne || | | public: http://openstack-server:8774/v2.1 || | | RegionOne || | | admin: http://openstack-server:8774/v2.1 || | | RegionOne || | | internal: http://openstack-server:8774/v2.1 || | | |+-----------+-----------+-----------------------------------------------+
[root@openstack-server ~]# openstack image list+--------------------------------------+--------+--------+| ID | Name | Status |+--------------------------------------+--------+--------+| 99b186e3-b29f-4366-ab5c-ebf5e53ef262 | cirros | active |+--------------------------------------+--------+--------+
[root@openstack-server ~]# nova-status upgrade check+-------------------------------+| 升级检查结果 |+-------------------------------+| 检查: Cells v2 || 结果: 成功 || 详情: None |+-------------------------------+| 检查: Placement API || 结果: 成功 || 详情: None |+-------------------------------+| 检查: Resource Providers || 结果: 成功 || 详情: None |+-------------------------------+| 检查: Ironic Flavor Migration || 结果: 成功 || 详情: None |+-------------------------------+| 检查: API Service Version || 结果: 成功 || 详情: None |+-------------------------------+| 检查: Request Spec Migration || 结果: 成功 || 详情: None |+-------------------------------+
12.安装neutron:
在Mariadb创建neutron相关库和用户(控制节点):
[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE neutron;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';"
创建neutron用户、服务、端点;
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack user create --domain default --password-prompt neutron[root@openstack-server ~]# openstack role add --project service --user neutron admin[root@openstack-server ~]# openstack service create --name neutron \> --description "OpenStack Networking" network[root@openstack-server ~]# openstack endpoint create --region RegionOne \> network public http://openstack-server:9696[root@openstack-server ~]# openstack endpoint create --region RegionOne \> network internal http://openstack-server:9696[root@openstack-server ~]# openstack endpoint create --region RegionOne \> network admin http://openstack-server:9696
Networking Option 1: Provider networks:
安装neutron、neutron-ml2、neutron-linuxbridge、ebtables:
[root@openstack-server ~]# yum -y install openstack-neutron openstack-neutron-ml2 \> openstack-neutron-linuxbridge ebtables
修改neutron服务配置:
[root@openstack-server ~]# vim /etc/neutron/neutron.conf[database]connection = mysql+pymysql://neutron:neutron@openstack-server/neutron[DEFAULT]core_plugin = ml2transport_url = rabbit://openstack:openstack@openstack-serverauth_strategy = keystonenotify_nova_on_port_status_changes = truenotify_nova_on_port_data_changes = true[keystone_authtoken]www_authenticate_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = neutronpassword = neutron[nova]auth_url = http://openstack-server:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = novapassword = nova[oslo_concurrency]lock_path = /var/lib/neutron/tmp
修改ml2配置文件:
[root@openstack-server ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini[ml2]type_drivers = flat,vlantenant_network_types = flatmechanism_drivers = linuxbridgeextension_drivers = port_security[ml2_type_flat]flat_networks = provider[securitygroup]enable_ipset = true
修改linuxbridge_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini[linux_bridge]physical_interface_mappings = provider:enp4s0[vxlan]enable_vxlan = false[securitygroup]enable_security_group = truefirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
开启系统内核支持网络桥防火墙:
[root@openstack-server ~]# modprobe bridge[root@openstack-server ~]# modprobe br_netfilter[root@openstack-server ~]# vim /etc/sysctl.confnet.bridge.bridge-nf-call-iptables = 1net.bridge.bridge-nf-call-ip6tables = 1[root@openstack-server ~]# sysctl -p /etc/sysctl.conf
修改dhcp_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/dhcp_agent.ini[DEFAULT]interface_driver = linuxbridgedhcp_driver = neutron.agent.linux.dhcp.Dnsmasqenable_isolated_metadata = true
Networking Option 2: Self-service networks:
安装openstack-neutron、openstack-neutron-ml2、openstack-neutron-linuxbridge、ebtables服务;
[root@openstack-server ~]# yum install openstack-neutron openstack-neutron-ml2 \> openstack-neutron-linuxbridge ebtables
修改neutron服务配置:
[root@openstack-server ~]# vim /etc/neutron/neutron.conf[database]connection = mysql+pymysql://neutron:neutron@openstack-server/neutron[DEFAULT]core_plugin = ml2service_plugins = routertransport_url = rabbit://openstack:openstack@openstack-serverauth_strategy = keystonenotify_nova_on_port_status_changes = truenotify_nova_on_port_data_changes = true[keystone_authtoken]www_authenticate_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = neutronpassword = neutron[nova]auth_url = http://openstack-server:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = novapassword = nova[oslo_concurrency]lock_path = /var/lib/neutron/tmp
修改ml2配置文件:
[root@openstack-server ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini[ml2]type_drivers = flat,vlan,vxlantenant_network_types = vxlanmechanism_drivers = linuxbridge,l2populationextension_drivers = port_security[ml2_type_flat]flat_networks = provider[ml2_type_vxlan]vni_ranges = 1:1000[securitygroup]enable_ipset = true
修改linuxbridge_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini[linux_bridge]physical_interface_mappings = provider:enp4s0[vxlan]enable_vxlan = truelocal_ip = 192.168.254.10l2_population = true[securitygroup]enable_security_group = truefirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
开启系统内核支持网络桥防火墙:
[root@openstack-server ~]# modprobe bridge[root@openstack-server ~]# modprobe br_netfilter[root@openstack-server ~]# vim /etc/sysctl.confnet.bridge.bridge-nf-call-iptables = 1net.bridge.bridge-nf-call-ip6tables = 1[root@openstack-server ~]# sysctl -p /etc/sysctl.conf
修改layer-3_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/l3_agent.ini[DEFAULT]interface_driver = linuxbridge
修改dhcp_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/dhcp_agent.ini[DEFAULT]interface_driver = linuxbridgedhcp_driver = neutron.agent.linux.dhcp.Dnsmasqenable_isolated_metadata = true
修改metadata_agent配置文件:
[root@openstack-server ~]# vim /etc/neutroNetworking Option 2: Self-service networks:n/metadata_agent.ini[DEFAULT]nova_metadata_host = openstack-servermetadata_proxy_shared_secret = neutron(neutron和nova通信共享秘钥)
修改nova服务配置:
[root@openstack-server ~]# vim /etc/nova/nova.conf[neutron]url = http://openstack-server:9696auth_url = http://openstack-server:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = neutronpassword = neutronservice_metadata_proxy = truemetadata_proxy_shared_secret = neutron(nova和neutron通信共享秘钥)
创建网络服务初始化脚本软连接:[root@openstack-server ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步neutron数据库:
[root@openstack-server ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \> --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
重启nova-api服务:[root@openstack-server ~]# systemctl restart openstack-nova-api.service
启动neutron-server、 neutron-linuxbridge-agent、neutron-dhcp-agent、neutron-metadata-agent服务:
[root@openstack-server ~]# systemctl enable neutron-server.service \> neutron-linuxbridge-agent.service neutron-dhcp-agent.service \> neutron-metadata-agent.service[root@openstack-server ~]# systemctl start neutron-server.service \> neutron-linuxbridge-agent.service neutron-dhcp-agent.service \> neutron-metadata-agent.service
如果使用Networking Option 2: Self-service networks还需要启动neutron-l3-agent服务:
[root@openstack-server ~]# systemctl enable neutron-l3-agent.service[root@openstack-server ~]# systemctl start neutron-l3-agent.service
安装openstack-neutron-linuxbridge、ebtables、ipset(计算节点):[root@openstack-server ~]# yum install openstack-neutron-linuxbridge ebtables ipset
修改neutron服务配置:
[root@openstack-server ~]# vim /etc/neutron/neutron.conf[DEFAULT]transport_url = rabbit://openstack:openstack@openstack-serverauth_strategy = keystone[keystone_authtoken]www_authenticate_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultproject_name = serviceusername = neutronpassword = neutron[oslo_concurrency]lock_path = /var/lib/neutron/tmp
Networking Option 1: Provider networks:
修改linuxbridge_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini[linux_bridge]physical_interface_mappings = provider:enp4s0[vxlan]enable_vxlan = false[securitygroup]enable_security_group = truefirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
开启系统内核支持网络桥防火墙:
[root@openstack-server ~]# modprobe bridge[root@openstack-server ~]# modprobe br_netfilter[root@openstack-server ~]# cat >> /etc/sysctl.conf << EOF> net.bridge.bridge-nf-call-iptables = 1> net.bridge.bridge-nf-call-ip6tables = 1> EOF[root@openstack-server ~]# sysctl -p /etc/sysctl.conf
Networking Option 2: Self-service networks:
修改linuxbridge_agent配置文件:
[root@openstack-server ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini[linux_bridge]physical_interface_mappings = provider:enp4s0[vxlan]enable_vxlan = truelocal_ip = 192.168.254.10l2_population = true[securitygroup]enable_security_group = truefirewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
开启系统内核支持网络桥防火墙:
[root@openstack-server ~]# modprobe bridge[root@openstack-server ~]# modprobe br_netfilter[root@openstack-server ~]# cat >> /etc/sysctl.conf << EOF> net.bridge.bridge-nf-call-iptables = 1> net.bridge.bridge-nf-call-ip6tables = 1> EOF[root@openstack-server ~]# sysctl -p /etc/sysctl.conf
修改nova服务配置:
[root@openstack-server ~]# vim /etc/nova/nova.conf[neutron]url = http://openstack-server:9696auth_url = http://openstack-server:5000auth_type = passwordproject_domain_name = defaultuser_domain_name = defaultregion_name = RegionOneproject_name = serviceusername = neutronpassword = neutron
重启nova-compute服务(控制节点):[root@openstack-server ~]# systemctl restart openstack-nova-compute.service
启动neutron-linuxbridge-agent服务:
[root@openstack-server ~]# systemctl enable neutron-linuxbridge-agent.service[root@openstack-server ~]# systemctl start neutron-linuxbridge-agent.service
验证操作:[root@openstack-server ~]# . admin-openrc.sh
[root@openstack-server ~]# openstack extension list --network+-----------------------------------------------------------------------------------------------------------------------------------------+--------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+| Name | Alias | Description |+-----------------------------------------------------------------------------------------------------------------------------------------+--------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+| Default Subnetpools | default-subnetpools | Provides ability to mark and use a subnetpool as the default. || Network IP Availability | network-ip-availability | Provides IP availability data for each network and subnet. || Network Availability Zone | network_availability_zone | Availability zone support for network. || Network MTU (writable) | net-mtu-writable | Provides a writable MTU attribute for a network resource. || Port Binding | binding | Expose port bindings of a virtual port to external application || agent | agent | The agent management extension. || Subnet Allocation | subnet_allocation | Enables allocation of subnets from a subnet pool || DHCP Agent Scheduler | dhcp_agent_scheduler | Schedule networks among dhcp agents || Neutron external network | external-net | Adds external network attribute to network resource. || Neutron Service Flavors | flavors | Flavor specification for Neutron advanced services. || Network MTU | net-mtu | Provides MTU attribute for a network resource. || Availability Zone | availability_zone | The availability zone extension. || Quota management support | quotas | Expose functions for quotas management per tenant || Tag support for resources with standard attribute: subnet, trunk, router, network, policy, subnetpool, port, security_group, floatingip | standard-attr-tag | Enables to set tag on resources with standard attribute. || Availability Zone Filter Extension | availability_zone_filter | Add filter parameters to AvailabilityZone resource || If-Match constraints based on revision_number | revision-if-match | Extension indicating that If-Match based on revision_number is supported. || Filter parameters validation | filter-validation | Provides validation on filter parameters. || Multi Provider Network | multi-provider | Expose mapping of virtual networks to multiple physical networks || Quota details management support | quota_details | Expose functions for quotas usage statistics per project || Address scope | address-scope | Address scopes extension. || Empty String Filtering Extension | empty-string-filtering | Allow filtering by attributes with empty string value || Subnet service types | subnet-service-types | Provides ability to set the subnet service_types field || Neutron Port MAC address regenerate | port-mac-address-regenerate | Network port MAC address regenerate || Resource timestamps | standard-attr-timestamp | Adds created_at and updated_at fields to all Neutron resources that have Neutron standard attributes. || Provider Network | provider | Expose mapping of virtual networks to physical networks || Neutron Service Type Management | service-type | API for retrieving service providers for Neutron advanced services || Neutron Extra DHCP options | extra_dhcp_opt | Extra options configuration for DHCP. For example PXE boot options to DHCP clients can be specified (e.g. tftp-server, server-ip-address, bootfile-name) || Port filtering on security groups | port-security-groups-filtering | Provides security groups filtering when listing ports || Resource revision numbers | standard-attr-revisions | This extension will display the revision number of neutron resources. || Pagination support | pagination | Extension that indicates that pagination is enabled. || Sorting support | sorting | Extension that indicates that sorting is enabled. || security-group | security-group | The security groups extension. || RBAC Policies | rbac-policies | Allows creation and modification of policies that control tenant access to resources. || standard-attr-description | standard-attr-description | Extension to add descriptions to standard attributes || IP address substring filtering | ip-substring-filtering | Provides IP address substring filtering when listing ports || Port Security | port-security | Provides port security || Allowed Address Pairs | allowed-address-pairs | Provides allowed address pairs || project_id field enabled | project-id | Extension that indicates that project_id field is enabled. || Port Bindings Extended | binding-extended | Expose port bindings of a virtual port to external application |+-----------------------------------------------------------------------------------------------------------------------------------------+--------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
[root@openstack-server ~]# openstack network agent list+--------------------------------------+--------------------+------------------+-------------------+-------+-------+---------------------------+| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |+--------------------------------------+--------------------+------------------+-------------------+-------+-------+---------------------------+| 12d016a1-f747-49cc-b6be-0d793877d394 | Linux bridge agent | openstack-server | None | :-) | UP | neutron-linuxbridge-agent || 9639fcea-da54-4bad-b3a6-16ffb96f3243 | Metadata agent | openstack-server | None | :-) | UP | neutron-metadata-agent || dc6d79c5-62e0-48fb-8a19-556b68bc7063 | DHCP agent | openstack-server | nova | :-) | UP | neutron-dhcp-agent |+--------------------------------------+--------------------+------------------+-------------------+-------+-------+---------------------------+
13.安装Dashboard:
安装openstack-dashboard(控制节点):[root@openstack-server ~]# yum -y install openstack-dashboard
修改dashboard配置文件:
[root@openstack-server ~]# vim /etc/openstack-dashboard/local_settingsOPENSTACK_HOST = "openstack-server"ALLOWED_HOSTS = ['openstack-server', 'localhost']SESSION_ENGINE = 'django.contrib.sessions.backends.cache'CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'openstack-server:11211', }}OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOSTOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = TrueOPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2,}OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"OPENSTACK_NEUTRON_NETWORK = { 'enable_router': False, 'enable_quotas': False, 'enable_distributed_router': False, 'enable_ha_router': False, 'enable_lb': False, 'enable_firewall': False, 'enable_***': False, 'enable_fip_topology_check': False,}TIME_ZONE = "Asia/Shanghai"
修改openstack-dashboard服务配置:
[root@openstack-server ~]# vim /etc/httpd/conf.d/openstack-dashboard.confWSGIApplicationGroup %{GLOBAL}
重启httpd服务:[root@openstack-server ~]# systemctl restart httpd.service memcached.service
验证操作:
通过浏览器访问http://openstack-server/dashboard 输入域default,账号myuser,密码myuser;
14.安装cinder:
在Mariadb创建cinder相关库和用户(控制节点):
[root@openstack-server ~]# mysql -uroot -psmoke520 -e "CREATE DATABASE cinder;"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';"[root@openstack-server ~]# mysql -uroot -psmoke520 -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
创建cinder用户、服务、端点;
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# openstack user create --domain default --password-prompt cinder[root@openstack-server ~]# openstack role add --project service --user cinder admin[root@openstack-server ~]# openstack service create --name cinderv2 \> --description "OpenStack Block Storage" volumev2[root@openstack-server ~]# openstack service create --name cinderv3 \> --description "OpenStack Block Storage" volumev3[root@openstack-server ~]# openstack endpoint create --region RegionOne \> volumev2 public http://openstack-server:8776/v2/%\(project_id\)s[root@openstack-server ~]# openstack endpoint create --region RegionOne \> volumev2 internal http://openstack-server:8776/v2/%\(project_id\)s[root@openstack-server ~]# openstack endpoint create --region RegionOne \> volumev2 admin http://openstack-server:8776/v2/%\(project_id\)s[root@openstack-server ~]# openstack endpoint create --region RegionOne \> volumev3 public http://openstack-server:8776/v3/%\(project_id\)s[root@openstack-server ~]# openstack endpoint create --region RegionOne \> volumev3 internal http://openstack-server:8776/v3/%\(project_id\)s[root@openstack-server ~]# openstack endpoint create --region RegionOne \> volumev3 admin http://openstack-server:8776/v3/%\(project_id\)s
安装openstack-cinder:[root@openstack-server ~]# yum -y install openstack-cinder
修改cinder服务配置:
[root@openstack-server ~]# vim /etc/cinder/cinder.conf
[database]connection = mysql+pymysql://cinder:cinder@openstack-server/cinder[DEFAULT]transport_url = rabbit://openstack:openstack@openstack-serverauth_strategy = keystonemy_ip = 192.168.254.10[keystone_authtoken]auth_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_id = defaultuser_domain_id = defaultproject_name = serviceusername = cinderpassword = cinder[oslo_concurrency]lock_path = /var/lib/cinder/tmp
同步cinder数据库:[root@openstack-server ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
修改nova服务配置:
[root@openstack-server ~]# vim /etc/nova/nova.conf[cinder]os_region_name = RegionOne
重启nova-api服务:[root@openstack-server ~]# systemctl restart openstack-nova-api.service
启动cinder-api、cinder-scheduler服务:
[root@openstack-server ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service[root@openstack-server ~]# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
安装lvm2、device-mapper-persistent-data(计算节点):[root@openstack-server ~]# yum -y install lvm2 device-mapper-persistent-data
启动lvm2-lvmetad服务:
[root@openstack-server ~]# systemctl enable lvm2-lvmetad.service[root@openstack-server ~]# systemctl start lvm2-lvmetad.service
将/dev/sdb2作为vlm块存储设备:
[root@openstack-server ~]# fdisk -l /dev/sdb磁盘 /dev/sdb:250.1 GB, 250059350016 字节,488397168 个扇区Units = 扇区 of 1 * 512 = 512 bytes扇区大小(逻辑/物理):512 字节 / 512 字节I/O 大小(最小/最佳):512 字节 / 512 字节磁盘标签类型:dos磁盘标识符:0x441e1e17 设备 Boot Start End Blocks Id System/dev/sdb1 2048 106956799 53477376 8e Linux LVM/dev/sdb2 106956800 276826111 84934656 8e Linux LVM
[root@openstack-server ~]# pvcreate /dev/sdb2[root@openstack-server ~]# vgcreate cinder-volumes /dev/sdb2
修改lvm配置文件:
[root@openstack-server ~]# vim /etc/lvm/lvm.confdevices {...filter = [ "a/sdb2/","r/.*/"]...}
安装openstack-cinder、targetcli python-keystone服务:[root@openstack-server ~]# yum -y install openstack-cinder targetcli python-keystone
修改cinder服务配置:
[root@openstack-server ~]# vim /etc/cinder/cinder.conf[database]connection = mysql+pymysql://cinder:cinder@openstack-server/cinder[DEFAULT]transport_url = rabbit://openstack:openstack@openstack-serverauth_strategy = keystonemy_ip = 192.168.254.10enabled_backends = lvmglance_api_servers = http://openstack-server:9292[keystone_authtoken]www_authenticate_uri = http://openstack-server:5000auth_url = http://openstack-server:5000memcached_servers = openstack-server:11211auth_type = passwordproject_domain_id = defaultuser_domain_id = defaultproject_name = serviceusername = cinderpassword = cinder[lvm]volume_driver = cinder.volume.drivers.lvm.LVMVolumeDrivervolume_group = cinder-volumesiscsi_protocol = iscsiiscsi_helper = lioadm[oslo_concurrency]lock_path = /var/lib/cinder/tmp
启动cinder-volume、target服务:
[root@openstack-server ~]# systemctl enable openstack-cinder-volume.service target.service[root@openstack-server ~]# systemctl start openstack-cinder-volume.service target.service
验证操作:
[root@openstack-server ~]# openstack volume service list+------------------+----------------------+------+---------+-------+----------------------------+| Binary | Host | Zone | Status | State | Updated At |+------------------+----------------------+------+---------+-------+----------------------------+| cinder-scheduler | openstack-server | nova | enabled | up | 2018-10-25T14:07:19.000000 || cinder-volume | openstack-server@lvm | nova | enabled | up | 2018-10-25T14:07:24.000000 |+------------------+----------------------+------+---------+-------+----------------------------+
15.启动虚拟机实例:
创建Provider network网络:
[root@openstack-server ~]# . myuser-openrc.sh[root@openstack-server ~]# openstack network create --share --external \> --provider-physical-network provider \> --provider-network-type flat provider[root@openstack-server ~]# openstack subnet create --network provider \> --allocation-pool start=192.168.254.11,end=192.168.254.15 \> --dns-nameserver 114.114.114.114 --gateway 192.168.254.1 \> --subnet-range 192.168.254.0/27 provider[root@openstack-server ~]# openstack network list+--------------------------------------+----------+--------------------------------------+| ID | Name | Subnets |+--------------------------------------+----------+--------------------------------------+| 9979b724-3868-42b9-9e0b-61b42fd794a0 | provider | 12dbf504-9f38-40d1-b273-e1409bc712b2 |+--------------------------------------+----------+--------------------------------------+
创建Self-service network网络:
[root@openstack-server ~]# . myuser-openrc.sh[root@openstack-server ~]# openstack network create selfservice[root@openstack-server ~]# openstack subnet create --network selfservice \> --dns-nameserver 114.114.114.114 --gateway 172.16.1.1 \> --subnet-range 172.16.1.0/24 selfservice[root@openstack-server ~]# openstack router create router[root@openstack-server ~]# openstack router add subnet router selfservice[root@openstack-server ~]# openstack router set router --external-gateway provider
验证操作:
[root@openstack-server ~]# . admin-openrc.sh[root@openstack-server ~]# ip netnsqrouter-0251f464-87d3-466e-9889-5b58eaeeb19b (id: 2)qdhcp-ad37ab93-04df-4b47-99d3-10dc0b2e630e (id: 1)qdhcp-cd105ed5-cb4d-4fd9-a4f3-3ab1642d7cb4 (id: 0)
[root@openstack-server ~]# openstack port list --router router+--------------------------------------+------+-------------------+-------------------------------------------------------------------------------+--------+| ID | Name | MAC Address | Fixed IP Addresses | Status |+--------------------------------------+------+-------------------+-------------------------------------------------------------------------------+--------+| 6390935b-7ab1-4608-a386-8f8d068a2ee0 | | fa:16:3e:4a:74:9e | ip_address='192.168.254.14', subnet_id='9e8f1c21-fc37-4dd7-b111-b4e25160b731' | ACTIVE || d44e3892-fb37-4c8e-b962-f1035f164409 | | fa:16:3e:c1:1c:72 | ip_address='172.16.1.1', subnet_id='f5ae3b68-4397-4caf-be61-63ef193e024c' | ACTIVE |+--------------------------------------+------+-------------------+-------------------------------------------------------------------------------+--------+
创建flavor模板:[root@openstack-server ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
创建秘钥链:
[root@openstack-server ~]# . myuser-openrc.sh[root@openstack-server ~]# ssh-keygen -q -N ""[root@openstack-server ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
验证操作:
[root@openstack-server ~]# openstack keypair list+-------+-------------------------------------------------+| Name | Fingerprint |+-------+-------------------------------------------------+| mykey | f3:95:1d:7f:24:e0:ba:a2:7f:9a:e8:98:7a:79:f7:f6 |+-------+-------------------------------------------------+
添加安全组:
[root@openstack-server ~]# openstack security group rule create --proto icmp default[root@openstack-server ~]# openstack security group rule create --proto tcp --dst-port 22 default[root@openstack-server ~]# openstack security group list+--------------------------------------+---------+-------------+----------------------------------+------+| ID | Name | Description | Project | Tags |+--------------------------------------+---------+-------------+----------------------------------+------+| 5c642955-4c0d-4913-83ac-ecd7fdc95846 | default | 缺省安全组 | f9d82471a2d84cdca15994649ad3ce17 | [] |+--------------------------------------+---------+-------------+----------------------------------+------+
Launch an instance on the provider network(在provider网络运行实例):
[root@openstack-server ~]# . demo-openrc[root@openstack-server ~]# openstack flavor list+----+---------+-----+------+-----------+-------+-----------+| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |+----+---------+-----+------+-----------+-------+-----------+| 0 | m1.nano | 64 | 1 | 0 | 1 | True |+----+---------+-----+------+-----------+-------+-----------+
[root@openstack-server ~]# openstack image list+--------------------------------------+--------+--------+| ID | Name | Status |+--------------------------------------+--------+--------+| 68cc1d9d-3018-4c42-a20c-70d0e4215a24 | cirros | active |+--------------------------------------+--------+--------+
[root@openstack-server ~]# openstack network list+--------------------------------------+-------------+--------------------------------------+| ID | Name | Subnets |+--------------------------------------+-------------+--------------------------------------+| ad37ab93-04df-4b47-99d3-10dc0b2e630e | selfservice | f5ae3b68-4397-4caf-be61-63ef193e024c || cd105ed5-cb4d-4fd9-a4f3-3ab1642d7cb4 | provider | 9e8f1c21-fc37-4dd7-b111-b4e25160b731 |+--------------------------------------+-------------+--------------------------------------+
[root@openstack-server ~]# openstack security group list+--------------------------------------+---------+-------------+----------------------------------+------+| ID | Name | Description | Project | Tags |+--------------------------------------+---------+-------------+----------------------------------+------+| 48512492-a516-4219-9a94-c81ac593963d | default | 缺省安全组 | c6b624a854694b4bb6dacd361bd7589d | [] |+--------------------------------------+---------+-------------+----------------------------------+------+
[root@openstack-server ~]# openstack server create --flavor m1.nano --image cirros \> --nic net-id=9979b724-3868-42b9-9e0b-61b42fd794a0 --security-group default \> --key-name mykey provider-instance
[root@openstack-server ~]# openstack console url show selfservice-instance(获取vnc url)
Launch an instance on the self-service network(在self-service网络运行实例):
[root@openstack-server ~]# . myuser-openrc.sh[root@openstack-server ~]# openstack flavor list+----+---------+-----+------+-----------+-------+-----------+| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public |+----+---------+-----+------+-----------+-------+-----------+| 0 | m1.nano | 64 | 1 | 0 | 1 | True |+----+---------+-----+------+-----------+-------+-----------+
[root@openstack-server ~]# openstack image list+--------------------------------------+--------+--------+| ID | Name | Status |+--------------------------------------+--------+--------+| 68cc1d9d-3018-4c42-a20c-70d0e4215a24 | cirros | active |+--------------------------------------+--------+--------+
[root@openstack-server ~]# openstack network list+--------------------------------------+-------------+--------------------------------------+| ID | Name | Subnets |+--------------------------------------+-------------+--------------------------------------+| ad37ab93-04df-4b47-99d3-10dc0b2e630e | selfservice | f5ae3b68-4397-4caf-be61-63ef193e024c || cd105ed5-cb4d-4fd9-a4f3-3ab1642d7cb4 | provider | 9e8f1c21-fc37-4dd7-b111-b4e25160b731 |+--------------------------------------+-------------+--------------------------------------+
[root@openstack-server ~]# openstack security group list+--------------------------------------+---------+-------------+----------------------------------+------+| ID | Name | Description | Project | Tags |+--------------------------------------+---------+-------------+----------------------------------+------+| 48512492-a516-4219-9a94-c81ac593963d | default | 缺省安全组 | c6b624a854694b4bb6dacd361bd7589d | [] |+--------------------------------------+---------+-------------+----------------------------------+------+
[root@openstack-server ~]# openstack server create --flavor m1.nano --image cirros \> --nic net-id=ad37ab93-04df-4b47-99d3-10dc0b2e630e --security-group default \> --key-name mykey selfservice-instance
[root@openstack-server ~]# openstack server list+--------------------------------------+----------------------+--------+-------------------------+--------+---------+| ID | Name | Status | Networks | Image | Flavor |+--------------------------------------+----------------------+--------+-------------------------+--------+---------+| 105e9757-7ba5-4a3f-81b7-cecdff2fa167 | selfservice-instance | ACTIVE | selfservice=172.16.1.10 | cirros | m1.nano |+--------------------------------------+----------------------+--------+-------------------------+--------+---------+
[root@openstack-server ~]# openstack console url show selfservice-instance(获取vnc url)
创建卷:
[root@openstack-server ~]# . myuser-openrc.sh[root@openstack-server ~]# openstack volume create --size 1 volume1+---------------------+--------------------------------------+| Field | Value |+---------------------+--------------------------------------+| attachments | [] || availability_zone | nova || bootable | false || consistencygroup_id | None || created_at | 2018-11-04T14:38:32.000000 || description | None || encrypted | False || id | 2a67c881-b7d6-47fb-9da4-c37dcb0ccf72 || multiattach | False || name | volume1 || properties | || replication_status | None || size | 1 || snapshot_id | None || source_volid | None || status | creating || type | None || updated_at | None || user_id | 2a2e5a1a1a464efaabaca83b439999e4 |+---------------------+--------------------------------------+
[root@openstack-server ~]# openstack volume list+--------------------------------------+---------+-----------+------+----------------------------------+| ID | Name | Status | Size | Attached to |+--------------------------------------+---------+-----------+------+----------------------------------+| 2a67c881-b7d6-47fb-9da4-c37dcb0ccf72 | volume1 | available | 1 | || a63a0afe-3be8-45aa-b7be-820d88874fc4 | | in-use | 20 | Attached to centos6 on /dev/vda |+--------------------------------------+---------+-----------+------+----------------------------------+