`
haoningabc
  • 浏览: 1444599 次
  • 性别: Icon_minigender_1
  • 来自: 北京
社区版块
存档分类
最新评论

M版openstack(ovs,dvr,动态迁移)

阅读更多
主要内容
1.先搭建三个节点的环境,dvr模式
2.建一个vm的例子
3.动态迁移的配置和例子


控制节点
192.168.139.161
计算节点两个,第二个可以复制过去改ip即可
192.168.139.193
192.168.139.194


先做两个节点控制节点和计算节点
mcontroller605  192.168.139.161
mcompute605  192.168.139.193
rm /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

/etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
IPADDR=192.168.139.161
NETMASK=255.255.240.0
GATEWAY=192.168.128.1
DNS1=114.114.114.114

/etc/hosts
##############

#############
yum remove PackageKit -y
yum install centos-release-openstack-mitaka -y
yum install https://repos.fedorapeople.org/repos/openstack/openstack-mitaka/rdo-release-mitaka-6.noarch.rpm -y
yum upgrade -y
yum install python-openstackclient openstack-selinux -y
vim /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld
systemctl stop NetworkManager
systemctl disable NetworkManager


//以上是两个节点都要装的,注意ip

//下面是控制节点开始安装:
yum install mariadb mariadb-server python2-PyMySQL -y
/etc/my.cnf.d/openstack.cnf
[mysqld]
default-time-zone=+8:00
#如果时间不准,看看是否是相差8小时
bind-address = 192.168.139.161
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

systemctl enable mariadb.service
systemctl start mariadb.service
mysql_secure_installation


yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack haoning
rabbitmqctl set_permissions openstack ".*" ".*" ".*"


如果异常重启可能导致openstack用户丢失,
可以rabbitmqctl delete_user openstack
再重新执行上两行

yum install memcached python-memcached -y
这里一定要改,否则novnc可能不好用,计算节点通过控制节点访问的时候
vim /etc/sysconfig/memcache
把127.0.0.1改成0.0.0.0
systemctl enable memcached.service
systemctl start memcached.service
mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'  IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'mcontroller605'  IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'mcompute605'  IDENTIFIED BY 'haoning';

//加了两个host,不加似乎有问题

openssl rand -hex 10
18b155be197348f7b330
yum install openstack-keystone httpd mod_wsgi -y
/etc/keystone/keystone.conf

[DEFAULT]
admin_token = 18b155be197348f7b330
[database]
connection = mysql+pymysql://keystone:haoning@mcontroller605/keystone
[token]
provider = fernet

###############
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

/etc/httpd/conf/httpd.conf
#########################
ServerName mcontroller605
########################
/etc/httpd/conf.d/wsgi-keystone.conf
#####################
Listen 5000
Listen 35357

<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

###################
systemctl enable httpd.service
systemctl start httpd.service

第一次执行的时候需要这个
export OS_TOKEN=18b155be197348f7b330
export OS_URL=http://mcontroller605:35357/v3
export OS_IDENTITY_API_VERSION=3
openstack service create --name keystone --description "OpenStack Identity" identity
openstack endpoint create --region wuhan  identity public http://mcontroller605:5000/v3
openstack endpoint create --region wuhan  identity internal http://mcontroller605:5000/v3
openstack endpoint create --region wuhan  identity admin http://mcontroller605:35357/v3

openstack domain create --description "Default Domain" default
openstack project create --domain default --description "Admin Project" admin
openstack user create --domain default --password-prompt admin
openstack role create admin
openstack role add --project admin --user admin admin

openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user

openstack domain list
openstack project list
openstack user list
openstack role list


unset OS_TOKEN OS_URL
openstack --os-auth-url http://mcontroller605:35357/v3  --os-project-domain-name default --os-user-domain-name default  --os-project-name admin --os-username admin token issue
openstack --os-auth-url http://mcontroller605:5000/v3  --os-project-domain-name default --os-user-domain-name default  --os-project-name demo --os-username demo token issue


admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=haoning
export OS_AUTH_URL=http://mcontroller605:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

demo-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=haoning
export OS_AUTH_URL=http://mcontroller605:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2


★★★★★★★安装glance
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'mcontroller605'  IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'mcompute605'  IDENTIFIED BY 'haoning';

openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin

openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region wuhan image public http://mcontroller605:9292
openstack endpoint create --region wuhan image internal http://mcontroller605:9292
openstack endpoint create --region wuhan image admin http://mcontroller605:9292

openstack endpoint list

yum install openstack-glance -y


/etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:haoning@mcontroller605/glance
[keystone_authtoken]
auth_uri = http://mcontroller605:5000
auth_url = http://mcontroller605:35357
memcached_servers = mcontroller605:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = haoning
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

/etc/glance/glance-registry.conf
[database]
connection = mysql+pymysql://glance:haoning@mcontroller605/glance
[keystone_authtoken]
auth_uri = http://mcontroller605:5000
auth_url = http://mcontroller605:35357
memcached_servers = mcontroller605:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = haoning

[paste_deploy]
flavor = keystone

启动glance服务
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service

wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
openstack image create "cirros"  --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare  --public
openstack image list


//nova的安装★★★★★★★Install and configure controller node
mysql -u root -p
CREATE DATABASE nova_api;
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'mcontroller605' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'mcompute605' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'mcontroller605' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'mcompute605' IDENTIFIED BY 'haoning';

openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region wuhan compute public http://mcontroller605:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region wuhan compute internal http://mcontroller605:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region wuhan compute admin http://mcontroller605:8774/v2.1/%\(tenant_id\)s

yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler -y


/etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.139.161
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:haoning@mcontroller605/nova_api
[database]
connection = mysql+pymysql://nova:haoning@mcontroller605/nova
[oslo_messaging_rabbit]
rabbit_host = mcontroller605
rabbit_userid = openstack
rabbit_password = haoning
[keystone_authtoken]
auth_uri = http://mcontroller605:5000
auth_url = http://mcontroller605:35357
memcached_servers = mcontroller605:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = haoning
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[glance]
api_servers = http://mcontroller605:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp


开启nova的服务
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service



nova的安装★★★★★★★★Install and configure a compute node
yum install openstack-nova-compute -y



/etc/nova/nova.conf //注意ip是计算节点自己
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.139.193
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
#########这里要确认haohaohao
[oslo_messaging_rabbit]
rabbit_host = mcontroller605
rabbit_userid = openstack
rabbit_password = haoning
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://mcontroller605:6080/vnc_auto.html
[glance]
api_servers = http://mcontroller605:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[libvirt]
block_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED,VIR_MIGRATE_NON_SHARED_INC
live_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED
live_migration_uri = qemu+tcp://%s/system


egrep -c '(vmx|svm)' /proc/cpuinfo

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
openstack compute service list



# network
cat /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0

 
#compute1,compute2

cat /etc/sysctl.conf

net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1

yum install openvswitch -y
systemctl enable openvswitch
systemctl start openvswitch
ovs-vsctl add-br br-ex


##########################neutron 所有节点的内容#################
yum install openstack-neutron python-neutron openstack-neutron-ml2 openstack-neutron-openvswitch ebtables net-tools openstack-utils  -y
/etc/neutron/neutron.conf
#################### 修改neutron.conf配置项
openstack-config --set /etc/neutron/neutron.conf DEFAULT api_workers 4        
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:haoning@mcontroller605
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf DEFAULT router_distributed true
openstack-config --set /etc/neutron/neutron.conf DEFAULT l3_ha false
openstack-config --set /etc/neutron/neutron.conf DEFAULT max_l3_agents_per_router 0
openstack-config --set /etc/neutron/neutron.conf DEFAULT min_l3_agents_per_router 1 
# 根据网络节点个数配置
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:haoning@mcontroller605/neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://mcontroller605:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://mcontroller605:35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default      
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password neutron
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://mcontroller605:35357
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default      
openstack-config --set /etc/neutron/neutron.conf nova region_name wuhan
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password haoning
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp

##################### 修改plugins/ml2/ml2_conf.ini
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan 
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan,flat
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch,l2population
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security 
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks '*'
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 2001:4000 
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset True

#####################################

########## 修改plugins/ml2/openvswitch_agent.ini
export my_tenant_ip=`ifconfig eth2 | grep inet -w | awk '{print $2}'`
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini agent tunnel_types vxlan 
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini agent l2_population True 
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini agent arp_responder True 
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini agent enable_distributed_routing True
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini agent tunnel_csum True
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs of_interface native
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs ovsdb_interface native 
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs local_ip $my_tenant_ip
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs bridge_mappings physnet1:br-ex 
openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver


#####################
#修改l3_agent.ini
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver openvswitch 
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT ha_vrrp_auth_password password


# 修改metadata_agent.ini
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_ip mcontroller605 
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret neutron
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_workers 2



/etc/neutron/dnsmasq.conf
dhcp-option-force=26,1450


# 修改dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver openvswitch 
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True 
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dnsmasq_config_file /etc/neutron/dnsmasq.conf

# 创建文件链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

######neutron的控制节点mcontroller605
mysql -uroot -p
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'mcontroller605' IDENTIFIED BY 'haoning';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'mcompute605' IDENTIFIED BY 'haoning';
# 添加neutron 服务
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region wuhan network public http://mcontroller605:9696
openstack endpoint create --region wuhan network internal http://mcontroller605:9696
openstack endpoint create --region wuhan network admin http://mcontroller605:9696
# 添加neutron用户
openstack user create --domain default --project service --project-domain default --password neutron neutron
openstack role add --project service --user neutron --project-domain default --user-domain default admin

neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head

chown -R neutron:neutron /etc/neutron /var/lib/neutron /var/log/neutron

systemctl enable neutron-server;systemctl start neutron-server


#############网络节点的neutron
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr_snat
chown -R neutron:neutron /etc/neutron /var/lib/neutron /var/log/neutron
for id in neutron-{openvswitch-agent,dhcp-agent,metadata-agent,l3-agent};do systemctl enable $id;systemctl start $id;done

#####################计算节点的的neutron
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT agent_mode dvr
# 修改文件所有者
chown -R neutron:neutron /etc/neutron /var/lib/neutron /var/log/neutron
# 启动服务
for id in neutron-{openvswitch,metadata,l3}-agent;do systemctl enable $id;systemctl start $id;done
#如果起的有问题,就按照顺序依次起,注意log
#如果全都关闭后 neutron-rootwrap-daemon /etc/neutron/rootwrap.conf ,还存在,需要kill掉,确保没有neutron进程再重新起


##########nova的安装,所有节点
#########
yum install openstack-nova python-nova openstack-utils -y
export my_ip=`ifconfig br-ex | grep inet -w | awk '{print $2}'`
export my_block_storage_ip=`ifconfig eth1 | grep inet -w | awk '{print $2}'`
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip $my_ip
openstack-config --set /etc/nova/nova.conf DEFAULT my_block_storage_ip $my_block_storage_ip
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT linuxnet_interface_driver nova.network.linux_net.LinuxOVSInterfaceDriver
 
openstack-config --set /etc/nova/nova.conf DEFAULT scheduler_default_filters RetryFilter,AvailabilityZoneFilter,ComputeFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,AggregateMultiTenancyIsolation,AggregateInstanceExtraSpecsFilter,AggregateCoreFilter,AggregateRamFilter
openstack-config --set /etc/nova/nova.conf DEFAULT reclaim_instance_interval 7200
openstack-config --set /etc/nova/nova.conf DEFAULT resize_confirm_window 1
openstack-config --set /etc/nova/nova.conf DEFAULT flat_injected True
openstack-config --set /etc/nova/nova.conf DEFAULT injected_network_template /usr/lib/python2.7/site-packages/nova/virt/interfaces.template
openstack-config --set /etc/nova/nova.conf DEFAULT force_config_drive true
 
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:haoning@mcontroller605
openstack-config --set /etc/nova/nova.conf DEFAULT osapi_compute_workers 2
openstack-config --set /etc/nova/nova.conf DEFAULT metadata_workers 2
  
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:haoning@mcontroller605/nova
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:haoning@mcontroller605/nova_api
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://mcontroller605:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://mcontroller605:35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password haoning
  
openstack-config --set /etc/nova/nova.conf vnc enabled True
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address $my_ip
  
openstack-config --set /etc/nova/nova.conf glance api_servers http://mcontroller605:9292
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  
openstack-config --set /etc/nova/nova.conf neutron url http://mcontroller605:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://mcontroller605:35357
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name wuhan
openstack-config --set /etc/nova/nova.conf neutron project_name neutron
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron password neutron
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy true
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret neutron
  
#openstack-config --set /etc/nova/nova.conf cinder os_region_name wuhan
#openstack-config --set /etc/nova/nova.conf cinder catalog_info volumev2:cinderv2:internalURL
#openstack-config --set /etc/nova/nova.conf libvirt images_rbd_pool volumes
#openstack-config --set /etc/nova/nova.conf libvirt images_rbd_ceph_conf /etc/ceph/ceph.conf
#openstack-config --set /etc/nova/nova.conf libvirt rbd_user awcloud
#openstack-config --set /etc/nova/nova.conf libvirt rbd_secret_uuid a7f64266-0894-4f1e-a635-d0aeaca0e993
 
openstack-config --set /etc/nova/nova.conf conductor workers 2
 
openstack-config --set /etc/nova/nova.conf cache enabled true
openstack-config --set /etc/nova/nova.conf cache memcache_servers mcontroller605:11211
openstack-config --set /etc/nova/nova.conf cache backend oslo_cache.memcache_pool
openstack-config --set /etc/nova/nova.conf cache debug_cache_backend true
openstack-config --set /etc/nova/nova.conf cache expiration_time 600
 
# 如果是在虚拟机环境当中,则还需添加如下的设置
#openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
#openstack-config --set /etc/nova/nova.conf libvirt cpu_mode none
openstack-config --set /etc/nova/nova.conf libvirt block_migration_flag VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED,VIR_MIGRATE_NON_SHARED_INC
openstack-config --set /etc/nova/nova.conf libvirt live_migration_flag VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED
openstack-config --set /etc/nova/nova.conf libvirt live_migration_uri qemu+tcp://%s/system


################################
nova安装(mcontroller605)
################################
#控制节点可以没有这个
export my_ip=`ifconfig br-ex | grep inet -w | awk '{print $2}'`
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://$my_ip:6080/vnc_auto.html

nova-manage api_db sync
nova-manage db sync

# 修改文件所有者
chown -R nova:nova /etc/nova /var/lib/nova /var/log/nova

# 设置开机自启动并启动服务
for id in openstack-nova-{conductor,api,scheduler,consoleauth,novncproxy};do systemctl enable $id;systemctl start $id;done

################nova安装(compute1,compute2)
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://<controller的br-ex上的ip地址>:6080/vnc_auto.html


chown -R nova:nova /etc/nova /var/lib/nova /var/log/nova
for id in openstack-nova-{conductor,scheduler,compute};do systemctl enable $id;systemctl start $id;done



##################实验##############################################

nova service-list
neutron ext-list
neutron agent-list

★★★★★★★建个vm
##注意这里要配置#####openstack-config --set /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs bridge_mappings physnet1:br-ex

neutron net-create --shared --provider:physical_network physnet1  --provider:network_type flat provider

neutron subnet-create --name provider --allocation-pool start=192.168.139.220,end=192.168.139.225 --dns-nameserver 8.8.8.8 --gateway 192.168.128.1 provider 192.168.128.0/20

neutron net-create selfservice


neutron subnet-create --name selfservice --dns-nameserver 8.8.8.8 --gateway 172.16.1.1 selfservice 172.16.1.0/24

neutron net-show provider
neutron net-update provider --router:external

neutron net-show provider
neutron router-create router
neutron router-interface-add router selfservice
neutron router-gateway-set router provider

neutron port-list
neutron router-port-list router
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano


ssh-keygen -q -N ""
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack keypair list


openstack security group list
openstack security group rule create --proto icmp default

openstack security group rule create --proto tcp --dst-port 22 default


openstack flavor list
openstack image list
openstack network list
openstack security group list

外网
#openstack server create --flavor m1.tiny --image cirros  --nic net-id=PROVIDER_NET_ID --security-group default  --key-name mykey provider-instance

openstack server list
openstack console url show provider-instance
ssh cirros@203.0.113.103

内网
nova service-list
neutron net-list  /  openstack network list
openstack server create --flavor m1.tiny --image cirros --nic net-id=3dd2b74b-d138-4200-91ea-f1f8f7d56bae --security-group default --key-name mykey selfservice-instance

或者
nova boot --flavor m1.tiny  --image cirros  --nic net-id=3dd2b74b-d138-4200-91ea-f1f8f7d56bae --security-group default  --key-name mykey provider-instance
制定节点建vm
nova boot --flavor m1.tiny  --image cirros  --nic net-id=3dd2b74b-d138-4200-91ea-f1f8f7d56bae --security-group default  --key-name mykey --availability-zone nova:mcompute605 instance-hello



openstack console url show selfservice-instance

或者
nova get-vnc-console c15dea61-cc2d-41eb-897d-822c6e58d5b7 novnc
nova list
ip netns exec qdhcp-a61423e7-4a5f-49d9-856f-1fdcb61c4418 ssh cirros@172.16.1.3 

浮动ip
#openstack ip floating create provider
192.168.139.222 
#openstack ip floating add 192.168.139.222  selfservice-instance
ssh cirros@192.168.139.222

openstack ip floating list

nova list  / openstack server list

浮动ip是192.168.139.222
dvr的计算节点:
ip netns exec qrouter-4d62ef1c-8f58-4e0e-9009-1e59e49cafdb iptables -t nat -L |grep 222

nova hypervisor-list
nova service-list
nova-manage cell_v2 discover_hosts
nova-manage cell_v2 list_cells
openstack endpoint list

如果关键了,重新启动
nova stop selfservice-instance
nova start selfservice-instance


#如果是从linuxbridge切换成openvswitch
#ovs-vsctl add-br br-ex
#systemctl disable neutron-linuxbridge-agent.service
#systemctl enable neutron-openvswitch-agent.service
#
#systemctl start neutron-openvswitch-agent.service



nova service-list
neutron agent-list
neutorn ext-list


#############################如果要删除网络话
nova delete selfservice-instance
neutron router-interface-delete router selfservice
neutron router-gateway-clear router provider

neutron net-delete provider
neutron net-delete selfservice
neutron router-delete router



/usr/lib/python2.7/site-packages/neutron/agent/l3/dvr_local_router.py

异常重启后可能导致rabbit不可用,需要重新建openstack用户和给权限
重启相关服务
启动vm
nova start selfservice-instance


#去掉注释的看配置文件

grep -v ^$ /etc/nova/nova.conf | grep -v ^#


调试技巧:
打印log,
比如
vim /usr/lib/python2.7/site-packages/eventlet/convenience.py
############
import logging
import logging.handlers
LOG_FILE = '/opt/tst.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes = 1024*1024, backupCount = 5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'

formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)

logger = logging.getLogger('tst')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)

logger.info('first info message haoning')
logger.debug('first debug message haoning')
#############
注意设置权限 chmod 777 /opt
在出错的地方打印logger.info('first info message haoning')



###############dvr模式下,计算节点的维护
neutron相关的维护
neutron agent-list
重启后如果出现问题,需要重启计算节点的相关agent
删掉不可用的port
ovs-vsctl del-port   vxlan-0a030303
ovs-vsctl del-port  qvo013a8b51-83

ovs-vsctl del-br br-ex
ovs-vsctl del-br br-int
ovs-vsctl del-br br-tun

启动服务后, br-int 和br-tun 会自动建立

#for id in neutron-{openvswitch,metadata,l3}-agent;do systemctl enable $id;systemctl start $id;done
systemctl stop neutron-openvswitch-agent
systemctl stop neutron-metadata-agent
systemctl stop neutron-l3-agent
ps -ef|grep neutron
如果有neutron-rootwrap-daemon /etc/neutron/rootwrap.conf什么的,kill掉再重新启动
systemctl start neutron-openvswitch-agent
systemctl start neutron-metadata-agent
systemctl start neutron-l3-agent



控制节点的nova
for id in openstack-nova-{conductor,api,scheduler,consoleauth,novncproxy};do systemctl enable $id;systemctl start $id;done
systemctl stop openstack-nova-conductor
systemctl stop openstack-nova-api
systemctl stop openstack-nova-scheduler
systemctl stop openstack-nova-consoleauth
systemctl stop openstack-nova-novncproxy

systemctl start openstack-nova-conductor
systemctl start openstack-nova-api
systemctl start openstack-nova-scheduler
systemctl start openstack-nova-consoleauth
systemctl start openstack-nova-novncproxy



计算节点的
#for id in openstack-nova-{conductor,scheduler,compute};do systemctl enable $id;systemctl start $id;done
nova service-list
systemctl stop openstack-nova-conductor
systemctl stop openstack-nova-scheduler
systemctl stop openstack-nova-compute

systemctl start openstack-nova-conductor
systemctl start openstack-nova-scheduler
systemctl start openstack-nova-compute





#######虚拟机迁移########
###############qemu的普通的迁移
192.168.139.193 迁移到的一端,设置接收端口
/usr/libexec/qemu-kvm  linux-0.2.img  -vnc 0.0.0.0:1  -incoming tcp:0:6666  
192.168.139.194 被迁移的一端
/usr/libexec/qemu-kvm  linux-0.2.img  -vnc 0.0.0.0:2 
第二台control + alt 2
(qemu)migrate tcp:192.168.139.193:6666



#############使用nova的动态迁移:
需要设置libvirtd打开16509端口
http://blog.csdn.net/qq_21398167/article/details/48291065

nova migrate --poll  1aa1118b-2c3a-4327-b574-adfd2d6611e0
竟然是重建,丢数据啊
nova live-migration d46a18dc-fb6f-4499-ac67-4b8bc2479e14 mcompute612
nova --debug live-migration 1aa1118b-2c3a-4327-b574-adfd2d6611e0 mcompute605
nova live-migration --block-migrate f3f7d3ed-e4b9-4fc3-9756-76f2d8bdb282 mcompute605

/etc/libvirt/libvirtd.conf
listen_tls = 0    
listen_tcp = 1    
tcp_port = "16509"
listen_addr = "0.0.0.0"
auth_tcp = "none"


libvirtd --daemon --listen --config /etc/libvirt/libvirtd.conf
或者
vim /etc/sysconfig/libvirtd
LIBVIRTD_CONFIG=/etc/libvirt/libvirtd.conf
LIBVIRTD_ARGS="--listen"

systemctl start libvirtd

netstat -nltp|grep 16509
virsh -c qemu+tcp://mcompute605/system


nova-manage service list
virsh -c qemu+tcp://mcontroller605/system list  --all
virsh -c qemu+tcp://mcompute605/system list  --all
virsh -c qemu+tcp://mcompute612/system list  --all


#########安装nfs########
参考http://www.linuxidc.com/Linux/2015-05/117378.htm
yum install nfs-utils portmap -y
#############
nova的vm在 /var/lib/nova/instances
cat /etc/exports
/opt/share/ 192.168.139.0/20(rw,no_root_squash,no_all_squash,sync,anonuid=501,anongid=501)
/var/lib/nova/instances/ 192.168.139.0/20(rw,no_root_squash,no_all_squash,sync,anonuid=501,anongid=501)
#################
exportfs -r
使nfs生效
showmount -e 192.168.139.161            #查看可挂载
mkdir /opt/share
在客户端 udp
mount -t nfs 192.168.139.161:/var/lib/nova/instances/ /var/lib/nova/instances/
或者使用tcp
mount -t nfs 192.168.139.161:/var/lib/nova/instances/ /var/lib/nova/instances/ -o proto=tcp -o nolock
nova boot --flavor m1.tiny  --image cirros  --nic net-id=3dd2b74b-d138-4200-91ea-f1f8f7d56bae --security-group default  --key-name mykey provider-616

nova live-migration 8caa13bc-ee39-4da5-b0a4-aaff9edb2a7d mcompute605
ip netns
ip netns exec  qdhcp-3dd2b74b-d138-4200-91ea-f1f8f7d56bae  ssh cirros@172.16.1.10








分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics