名称外网IP内网IP
controller控制节点192.168.100.100192.168.10.1
computer01计算节点192.168.100.101192.168.10.2
cinder01

添加新网卡添加内网IP

步骤

1.1基础配置

关闭防火墙和selinux

systemctl stop firewalld.service

systemctl disable firewalld.service

vim /etc/selinux/config(将“SELINUX”的值改为“disabled”)

setenforce 0

安装源

yum install centos-release-openstack-queens

yum install python2-openstackclient -y   

安装时钟服务

yum install chrony -y

安装数据库

yum install mariadb mariadb-server python2-PyMySQL -y

修改配置文件

vim /etc/my.cnf.d/openstack.cnf

[mysqld]
bind-address = 192.168.100.100 #本机IP
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

安装数据库

systemctl restart mariadb
systemctl enable mariadb

mysql_secure_installation
Disallow root login remotely? [Y/n] n

修改主机名

hostnamectl set-hostname controller
hostnamectl set-hostname compute01

安装rabbitmq

yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service    

#为了防止rabbitmq的openstack消失,可以把controller主机名写入到rabbitmq-env.conf文件中
echo 'NODENAME=rabbit@controller' | sudo tee -a /etc/rabbitmq/rabbitmq-env.conf

#为rabbitmq添加名为“openstack”的用户,密码为“123456”
rabbitmqctl add_user openstack 123456

#为openstack用户添加最高权限
rabbitmqctl set_permissions openstack ".*" ".*" ".*"

#验证rabbitmq是否成功安装,端口(5672)是否正常
netstat -lantu |grep 5672 

安装缓存服务

yum install memcached python-memcached -y

vim /etc/sysconfig/memcached  #在OPTIONS末尾加入控制节点主机名
OPTIONS="-l 127.0.0.1,::1,controller"

systemctl enable memcached
systemctl restart memcached

安装Etcd

yum install etcd -y

vim /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.100.100:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.100.100:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.100.100:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.100.100:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.100.100:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"

systemctl enable etcd
systemctl restart etcd

配置DNS

vim /etc/hosts
192.168.100.100 controller
192.168.100.101 compute
192.168.100.102 cinder

2.1Keystone

在控制节点(controller)的MariaDB上创建Keystone数据库:

mysql -uroot -p123456    

CREATE DATABASE keystone;       

为系统用户keystone,赋予开放本地/远程登录权限,登录密码为“123456”

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456';

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';

安装Keystone组件

yum install openstack-keystone httpd mod_wsgi -y  

备份原始配置文件

cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak

将原始配置文件去掉带”#“号行:

cat /etc/keystone/keystone.conf.bak | grep -v ^# | uniq > /etc/keystone/keystone.conf

修改配置文件

vim /etc/keystone/keystone.conf 

[database]
connection=mysql+pymysql://keystone:123456@controller/keystone 
[token]
provider = fernet

填充数据库

su -s /bin/sh -c "keystone-manage db_sync " keystone

初始化Fernet key库:

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

引导身份认证服务,配置keystone的相关认证信息:(未来openstack登录界面的管理员admin密码,在此设置)

keystone-manage bootstrap --bootstrap-password 123456 \
 --bootstrap-admin-url http://controller:5000/v3/ \
 --bootstrap-internal-url http://controller:5000/v3/ \
 --bootstrap-public-url http://controller:5000/v3/ \
 --bootstrap-region-id RegionOne

参数说明:
--bootstrap-password:keystone管理员密码
--bootstrap-admin-url:管理员认证URL
--bootstrap-internal-url:内部认证URL
--bootstrap-public-url:外部认证URL
--bootstrap-region-id:指定区域名

配置Apache
在Apache配置文件中设置ServerName为本机主机名,的第96行加入 ServerName controller

vim /etc/httpd/conf/httpd.conf

为wsgi-keystone.conf创建链接到Apache服务目录:

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

systemctl enable httpd.service
systemctl restart httpd.service

2.2验证

创建环境脚本

我们可以模拟一个帐号登录环境,比如管理员登录,那就需要创建一个环境脚本:

vim /root/admin-openrc

export OS_USERNAME=admin
export OS_PASSWORD=123456
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3 
export OS_IDENTITY_API_VERSION=3

参数说明:
export OS_USERNAME=admin:登录keystone的admin(管理员)帐号
export OS_PASSWORD=ADMIN_PASS:keystone预设的密码
export OS_PROJECT_NAME=admin:指定Openstack的项目类型
export OS_USER_DOMAIN_NAME=Default:指定Openstack用户所属域
export OS_PROJECT_DOMAIN_NAME=Default:指定Openstack项目所属域
export OS_AUTH_URL=http://controller:35357/v3:指定认证链接
export OS_IDENTITY_API_VERSION=3:指定认证版本

执行脚本:

. /root/admin-openrc 

查看当前环境:

env | grep OS 

验证

openstack token issue

鉴权通过,创建一个domain(域),名为“example”,描述为“Test Example”

openstack domain create --description "Test Example" example

在默认域(default domain)下,创建一个project(项目),名为“service”,描述为“Service Project”

openstack project create --domain default --description "Service Project" service

查看当前环境下的所有项目(project):

openstack project list

一般情况下,除了管理员外,我们还需要一些非特权项目以及用户

在默认域下(default domain)下,创建一个project(项目),名为“demo”,描述为“Demo Project”

openstack project create --domain default --description "Demo Project" demo

在默认域下创建一个用户,名为“leon”,后置手动设置密码 123456

openstack user create leon --domain default --password-prompt

创建Openstack的“普通用户”角色,名为“user”

openstack role create user 

查看当前都有哪些角色:

openstack role list

将用户“leon”在“demo”项目中的角色,规划为“普通用户”的角色(指令不会返回结果)

openstack role add --project demo --user leon user

验证登录:

unset OS_AUTH_URL OS_PASSWORD

用户admin登录:

openstack --os-auth-url http://controller:35357/v3 \
 --os-project-domain-name Default --os-user-domain-name Default \
 --os-project-name admin --os-username admin token issue
 
password:admin_passwd

用户leon登录:

openstack --os-auth-url http://controller:5000/v3 \
 --os-project-domain-name Default --os-user-domain-name Default \
 --os-project-name demo --os-username leon token issue
 
password:123456

练习:

在默认域下,创建一个用户,名为“aa“,后设置密码123456

openstack user create aa --domain default --password-prompt 

将用户“aa”在“demo”项目中的角色,规划为“普通用户”的角色:

openstack role add --project demo --user aa user

查看结果:

openstack role list --user aa --project demo

3.1Glance

在控制节点(controller)的MariaDB上创建glance数据库:

mysql -u root -p123456

CREATE DATABASE glance;

创建glance数据库用户glance,并开放本地/远程登录,登录密码为“glance_db”

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance_db';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance_db';

Flush privileges;

在控制节点(controller)上创建glance用户,名为glance

先执行环境变量

. /root/admin-openrc

设置glance用户密码为123456

openstack user create glance --domain default --password 123456

将glance用户在service项目(project)中赋予管理员角色admin

openstack role add --project service --user glance admin

创建一个类型为image(镜像)的服务(service)实体,描述为“OpenStack Image”

openstack service create --name glance --description "OpenStack Image" image

为image服务实体在RegionOne区域中创建三种供访问的Endpoint API

openstack endpoint create --region RegionOne image public http://controller:9292

openstack endpoint create --region RegionOne image internal http://controller:9292

openstack endpoint create --region RegionOne image admin http://controller:9292

(若要删除endpoint,可先查询endpoint的id)

openstack endpoint list
openstack endpoint delete [endpoint-id]

安装并配置glance组件

yum install openstack-glance -y

配置glance-api配置文件:

cp /etc/glance/glance-api.conf  /etc/glance/glance-api.conf.bak

cat /etc/glance/glance-api.conf.bak  |  grep -v ^#  |  uniq > /etc/glance/glance-api.conf

vim /etc/glance/glance-api.conf

写入

[database]
connection = mysql+pymysql://glance:glance_db@controller/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 123456
[paste_deploy]
flavor = keystone

注释:

[database] #数据库设置
connection = mysql+pymysql://glance:glance_db@controller/glance
[glance_store] #glance设置
stores = file,http #存储方式
default_store = file #默认存储类型
filesystem_store_datadir = /var/lib/glance/images/ #默认存储路径
[keystone_authtoken] #keystone鉴权设置
auth_uri = http://controller:5000 #鉴权uri
auth_url = http://controller:5000 #鉴权url
memcached_servers = controller:11211 #memcached服务链接
auth_type = password #认证方式
project_domain_name = Default #指定项目域
user_domain_name = Default #指定用户域
project_name = service #指定项目
username = glance #指定服务用户名
password = 123456 #服务用户名密码
[paste_deploy] #认证模式
......

配置glance-registry配置文件

cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak

cat /etc/glance/glance-registry.conf.bak | grep -v ^# | uniq > /etc/glance/glance-registry.conf

vim /etc/glance/glance-registry.conf 

写入以下内容

[database]
connection = mysql+pymysql://glance:glance_db@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 123456
[paste_deploy]
flavor = keystone

填充glance数据库:

su -s /bin/sh -c "glance-manage db_sync" glance

启动服务并加入开机自启

systemctl enable openstack-glance-api.service openstack-glance-registry.service

systemctl restart openstack-glance-api.service openstack-glance-registry.service

3.2验证

运行管理员环境脚本,下载一个测试镜像

. /root/admin-openrc

wget https://yukisrye.top:442/f/PaCD/cirros-0.4.0-x86_64-disk.img

创建openstack镜像
使用glance创建一个公用(public)的,有效的openstack镜像cirros

openstack image create "cirros" \
 --file cirros-0.4.0-x86_64-disk.img \
 --disk-format qcow2 --container-format bare \
 --public

备注
openstack image create "cirros" \
--file cirros-0.4.0-x86_64-disk.img \ #源镜像路径
--disk-format qcow2 --container-format bare \ #镜像模式以及容器类型
--public #镜像权限

查询镜像:

openstack image list

4.1Nova-api

创建nova组件相关数据库:

mysql -uroot -p123456

CREATE DATABASE nova_api;

CREATE DATABASE nova;

CREATE DATABASE nova_cell0;

创建nova数据库相关用户以及访问权限:

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_db';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova_db';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_db';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova_db';

GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_db';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova_db';

flush privileges;

创建nova相关信息

在控制节点(controller)上创建nova用户,名为nova

先执行环境变量:

. /root/admin-openrc

在openstack默认域中创建nova用户:(执行后需要设置密码:123456,并确认密码)

openstack user create nova --domain default  --password 123456             

将nova用户在service项目(project)中赋予管理员角色admin

openstack role add --project service --user nova admin(执行后无返回结果即正常)

创建一个名为compute的服务(service)实体,描述为“OpenStack Compute”

openstack service create --name nova --description "OpenStack Compute" compute  

为compute服务实体在RegionOne区域中创建三种供访问的Endpoint API

openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1

在openstack默认域中创建placement用户:

(执行后需设置密码:123456,并确认密码)

openstack user create placement  --domain default --password 123456

将placement用户在service项目(project)中赋予管理员角色admin

openstack role add --project service --user placement admin(执行后无返回结果即正常)

创建一个名为placement的服务(service)实体,描述为“Placement API”

openstack service create --name placement --description "Placement API" placement

为Placement服务实体在RegionOne区域中创建三种供访问的Endpoint API

openstack endpoint create --region RegionOne placement public http://controller:8778

openstack endpoint create --region RegionOne placement internal http://controller:8778

openstack endpoint create --region RegionOne placement admin http://controller:8778

安装并配置nova组件

yum install openstack-nova-api \
openstack-nova-conductor \
openstack-nova-console \
openstack-nova-novncproxy \
openstack-nova-scheduler \
openstack-nova-placement-api -y

修改nova配置文件

cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
cat /etc/nova/nova.conf.bak | grep -v ^# | uniq > /etc/nova/nova.conf
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:123456@controller
my_ip = 192.168.100.100
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection=mysql+pymysql://nova:nova_db@controller/nova_api
[database]
connection = mysql+pymysql://nova:nova_db@controller/nova
[glance]
api_servers = http://controller:9292
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = 123456
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip

注释:
[DEFAULT]
......
......
my_ip = 192.168.100.100 #本机管理网口地址
......
firewall_driver = nova.virt.firewall.NoopFirewallDriver #需要关闭自带firewalld服务
[api] #API验证设置
......
[api_database] #数据库设置
......
[database] #数据库设置
......
[glance] #glance设置
......
[keystone_authtoken] #keystone鉴权设置
auth_url = http://controller:5000/v3 #鉴权url
memcached_servers = controller:11211 #memcached服务链接
auth_type = password #认证方式
project_domain_name = default #指定项目域
user_domain_name = default #指定用户域
project_name = service #指定项目
username = nova #指定服务用户名
password = 123456 #服务用户名密码
[oslo_concurrency]
......
[placement] #placement设置
......
[vnc] #vnc设置
......

基于补丁bug,还需要配置Placement API配置文件,在第13行加入以下参数

vim /etc/httpd/conf.d/00-nova-placement-api.conf

<Directory /usr/bin>
  <IfVersion >= 2.4>
   Require all granted
  </IfVersion>
  <IfVersion < 2.4>
   Order allow,deny
   Allow from all
  </IfVersion>
</Directory>

systemctl restart httpd

填充nova-api数据库

su -s /bin/sh -c "nova-manage api_db sync" nova    

填充cell0数据库

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

创建cell1的cell

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
正常返回一个连串无规则代码即可:4c2eafe4-415c-43cd-a03a-d64c97f04782

同步nova数据库

su -s /bin/sh -c "nova-manage db sync" nova
可忽略警告信息

较验cell0和cell1的注册是否正常

nova-manage cell_v2 list_cells

4.2Nova-compute

计算节点配置

配置于计算节点compute01

从控制节点拷贝/etc/hosts文件,确保域名解析一致:

scp root@192.168.100.100:/etc/hosts /etc/hosts

安装chrony,确保与控制节点(controller)时间同步

yum install chrony

配置时间同步:

vim /etc/chrony.conf

在第3行-6行前加“#”注释掉,不使用默认的时间同步服务器,在末行添加:

server controller  iburst      #指定ntp服务器为controller

重启服务:

systemctl restart chronyd
systemctl enable chronyd

较验时间同步:

chronyc sources -v  

*号表示时间已同步(需要等待一段时间):

MS Name/IP address         Stratum Poll Reach LastRx Last sample               
 ↓#已同步
^* controller                   10   6    17     1   +367ns[ +161us] +/-  133us

安装nova-compute并编辑相关配置文件

yum install openstack-nova-compute -y

编辑nova配置文件:

cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
cat /etc/nova/nova.conf.bak | grep -v ^# | uniq > /etc/nova/nova.conf
vim /etc/nova/nova.conf

修改配置文件为

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:123456@controller
my_ip = 192.168.100.101
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[glance]
api_servers = http://controller:9292
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = 123456
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123456
[vnc]
enabled = True
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html

检测计算节点是否支持硬件虚拟化加速

egrep -c '(vmx|svm)' /proc/cpuinfo

(由于本实验是由虚拟机来做计算节点,所以返回值<1时,需要在/etc/nova/nova.conf配置文件的[libvirt]选项中加入以下内容:

vim /etc/nova/nova.conf

[libvirt]
virt_type = qemu

Vmware WorkStation 工具,也需要开启虚拟机Intel-VT技术)

启动服务并加入开机自启

systemctl enable libvirtd.service openstack-nova-compute.service

systemctl restart libvirtd.service openstack-nova-compute.service

控制节点配置

控制节点同步cell(需要配置完compute节点后,才执行以下命令

nova-manage cell_v2 discover_hosts --verbose

启动服务并加入开机自启

systemctl enable openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service \
openstack-nova-consoleauth.service openstack-nova-scheduler.service \
openstack-nova-conductor.service openstack-nova-novncproxy.service

在控制节点(controller)查看当前所有计算节点(compute):

. /root/admin-openrc

openstack compute service list --service nova-compute

如果有新加入的计算节点,需要执行以下命令来发现新计算节点

su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

或者在控制节点的nova.conf配置文件里,设置自动发现计算节点的时间间隔

vim /etc/nova/nova.conf

[scheduler]
discover_hosts_in_cells_interval = 300        #默认单位:秒

5.1Neutron

控制节点

创建数据库并给予权限

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron_db';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%'IDENTIFIED BY 'neutron_db';

flush privileges;

创建neutron相关信息

在控制节点(controller)上创建openstack平台用户,名为neutron

先执行环境变量:

 . /root/admin-openrc

在openstack默认域中创建neutron用户:

openstack user create --domain default --password 123456 neutron                  

将neutron用户在service项目(project)中赋予管理员角色admin

openstack role add --project service --user neutron admin

创建一个名为network的服务(service)实体,描述为“OpenStack Networking”

openstack service create --name neutron --description "OpenStack Networking" network

为neutron服务实体在RegionOne区域中创建三种供访问的Endpoint API

openstack endpoint create --region RegionOne network public http://controller:9696

openstack endpoint create --region RegionOne network internal http://controller:9696

openstack endpoint create --region RegionOne network admin http://controller:9696

网络配置

配置neutron主配置文件:/etc/neutron/neutron.conf

cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
cat /etc/neutron/neutron.conf.bak | grep -v ^# | uniq > /etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf

修改配置文件

[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:123456@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[database]
connection = mysql+pymysql://neutron:neutron_db@controller/neutron 
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 123456
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

配置ml2模块

cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak

cat /etc/neutron/plugins/ml2/ml2_conf.ini.bak | grep -v ^# | uniq > /etc/neutron/plugins/ml2/ml2_conf.ini

vim /etc/neutron/plugins/ml2/ml2_conf.ini

修改配置文件

[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security 
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true

网桥配置

Centos默认不开启bridge-nf透明网桥功能,启动bridge-nf方式:

vim /usr/lib/sysctl.d/00-system.conf 
#将0值改为“1”
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1

或:

编辑文件添加:

vim /etc/sysctl.conf 

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1

加重网桥模块:

modprobe br_netfilter

执行:

/sbin/sysctl -p                               

将此模块加入开机自加载:

① 在/etc/新建rc.sysinit 文件,并写入以下内容:

vim /etc/rc.sysinit

\#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done

② 在/etc/sysconfig/modules/目录下新建文件br_netfilter.modules

vim /etc/sysconfig/modules/br_netfilter.modules

modprobe br_netfilter

增加权限:

chmod 755 /etc/sysconfig/modules/br_netfilter.modules

重启后检查模块:

lsmod |grep br_netfilter

配置网桥模块

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak

cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak | grep -v ^# | uniq > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

编辑配置文件:

[linux_bridge]
physical_interface_mappings = provider:ens33
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = true
local_ip = 192.168.10.1
l2_population = true 

注释:

[linux_bridge]
physical_interface_mappings = provider:ens33
此处的“ens33”为控制节点的外部/管理网络网卡名
[vxlan]
local_ip = 192.168.10.1 #控制节点隧道网口IP地址

配置三层代理Layer-3(L3)为自助虚拟网络提供路由和NAT服务

cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak
cat /etc/neutron/l3_agent.ini.bak | grep -v ^# | uniq > /etc/neutron/l3_agent.ini
vim /etc/neutron/l3_agent.ini

[DEFAULT]
interface_driver = linuxbridge

配置DHCP代理配置文件

cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
cat /etc/neutron/dhcp_agent.ini.bak | grep -v ^# | uniq > /etc/neutron/dhcp_agent.ini
vim /etc/neutron/dhcp_agent.ini

[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

配置元数据代理配置文件:

cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
cat /etc/neutron/metadata_agent.ini.bak | grep -v ^# | uniq > /etc/neutron/metadata_agent.ini
vim /etc/neutron/metadata_agent.ini

[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = meta_123456

配置控制节点nova配置文件里[neutron]选项设置

vim /etc/nova/nova.conf

[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456
service_metadata_proxy = true
metadata_proxy_shared_secret = meta_123456

在控制节点(controller)上创建ml2软链接,并同步neutron数据库

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

在控制节点上(controller)重启/启动相关服务,并将neutron相关服务加入开机自启

systemctl restart openstack-nova-api.service

systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-l3-agent.service neutron-metadata-agent.service

systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-l3-agent.service neutron-metadata-agent.service
或
systemctl restart neutron-server.service
systemctl restart neutron-linuxbridge-agent.service 
systemctl restart neutron-dhcp-agent.service 
systemctl restart neutron-metadata-agent.service
systemctl restart neutron-l3-agent.service

计算节点

安装组件

yum install openstack-neutron-linuxbridge ebtables ipset

配置neutron主配置文件

cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
cat /etc/neutron/neutron.conf.bak | grep -v ^# | uniq > /etc/neutron/neutron.conf
vim /etc/neutron/neutron.conf

修改配置文件

[DEFAULT]
transport_url = rabbit://openstack:123456@controller
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 123456
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

配置网桥配置文件:/etc/neutron/plugins/ml2/linuxbridge_agent.ini

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak

cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak | grep -v ^# | uniq > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

修改配置文件

[securitygroup]
enable_security_group = true
firewall_driver =neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = true
local_ip = 192.168.10.2
l2_population = true

注释:
local_ip = 192.168.10.2 #计算节点隧道网口IP地址

Centos默认不开启bridge-nf透明网桥功能,启动bridge-nf方式:

vim /usr/lib/sysctl.d/00-system.conf 
#将0值改为“1”
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1

或:

编辑文件添加:

vim /etc/sysctl.conf 

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1

加重网桥模块:

modprobe br_netfilter

执行:

/sbin/sysctl -p                               

将此模块加入开机自加载:

① 在/etc/新建rc.sysinit 文件,并写入以下内容:

vim /etc/rc.sysinit

#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done

② 在/etc/sysconfig/modules/目录下新建文件br_netfilter.modules

vim /etc/sysconfig/modules/br_netfilter.modules

modprobe br_netfilter

增加权限:

chmod 755 /etc/sysconfig/modules/br_netfilter.modules

重启后检查模块:

lsmod |grep br_netfilter

修改配置文件

vim /etc/nova/nova.conf

[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 123456

重启/启动相关服务,并加入开机自启

systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

部署验证

在控制节点(controller)操作:

. /root/admin-openrc

openstack network agent list      #(provider二层网络)提示AgentType有L3 agent和HOST为computer01为成功

6.1Horizon

在控制节点(controller)安装horizon组件

yum install openstack-dashboard 

修改配置文件

cp /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.bak

vim /etc/openstack-dashboard/local_settings


#第188行修改为
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

#第38行修改为
ALLOWED_HOSTS = ['*']

#第164行,缓存设置,加入以下内容变为:
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': 'controller:11211',
    },
}

#第64行配置API版本
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}

#第75行,开启多域支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

#第97行设置默认域为“Default”:
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'

#第464行设置时区为
TIME_ZONE = "Asia/Shanghai"

修改配置文件:

vim /etc/httpd/conf.d/openstack-dashboard.conf
#在第4行加入
WSGIApplicationGroup %{GLOBAL}

重启服务

systemctl restart httpd.service
systemctl restart memcached.service

登录Horizon

http://192.168.100.100/dashboard
#192.168.100.100为controller的管理网段IP地址
Domain:default        
用户:admin        
密码:123456

成功进入

常见问题解决方式:

按照文章内容照做报错:

是否关闭防火墙和selinux?

systemctl stop firewalld
setenforce 0

是否登录?

. /root/admin-openrc

是否正确配置密码?

本文中关于数据库密码皆为‘user_db'
本文中关于其他密码皆为‘123456'

是否多写或少写配置文件内容?

最后修改:2024 年 05 月 10 日
如果觉得我的文章对你有用,请随意赞赏