Difference between revisions of "Vnx-labo-openstack-4nodes-classic-ovs-stein"

From VNX
Jump to: navigation, search
(Introduction)
(XML specification of Openstack tutorial scenario)
Line 155: Line 155:
 
'''Figure 5: Openstack Dashboard view of the demo virtual machines created'''</div>]]
 
'''Figure 5: Openstack Dashboard view of the demo virtual machines created'''</div>]]
  
== XML specification of Openstack tutorial scenario ==
 
 
<pre>
 
 
<?xml version="1.0" encoding="UTF-8"?>
 
<?xml version="1.0" encoding="UTF-8"?>
  
Line 165: Line 162:
 
~~~~~~~~~~~~~~~~~~~~~~
 
~~~~~~~~~~~~~~~~~~~~~~
  
Name:        openstack_tutorial-ocata
+
Name:        openstack_tutorial-stein
  
Description: This is an Openstack tutorial scenario designed to experiment with Openstack free and open-source
+
Description: This is an Openstack tutorial scenario designed to experiment with Openstack free and open-source  
             software platform for cloud-computing. It is made of four LXC containers:
+
             software platform for cloud-computing. It is made of four LXC containers:  
 
               - one controller
 
               - one controller
 
               - one network node
 
               - one network node
 
               - two compute nodes
 
               - two compute nodes
             Openstack version used: Ocata.
+
             Openstack version used: Stein.
 
             The network configuration is based on the one named "Classic with Open vSwitch" described here:
 
             The network configuration is based on the one named "Classic with Open vSwitch" described here:
                   http://docs.openstack.org/ocata/networking-guide/scenario-classic-ovs.html
+
                   http://docs.openstack.org/liberty/networking-guide/scenario-classic-ovs.html
  
 
Author:      David Fernandez (david@dit.upm.es)
 
Author:      David Fernandez (david@dit.upm.es)
  
This file is part of the Virtual Networks over LinuX (VNX) Project distribution.
+
This file is part of the Virtual Networks over LinuX (VNX) Project distribution.  
(www: http://www.dit.upm.es/vnx - e-mail: vnx@dit.upm.es)
+
(www: http://www.dit.upm.es/vnx - e-mail: vnx@dit.upm.es)  
  
Copyright (C) 2017 Departamento de Ingenieria de Sistemas Telematicos (DIT)
+
Copyright (C) 2019 Departamento de Ingenieria de Sistemas Telematicos (DIT)
 
      Universidad Politecnica de Madrid (UPM)
 
      Universidad Politecnica de Madrid (UPM)
 
               SPAIN
 
               SPAIN
Line 190: Line 187:
 
   <global>
 
   <global>
 
     <version>2.0</version>
 
     <version>2.0</version>
     <scenario_name>openstack_tutorial-ocata</scenario_name>
+
     <scenario_name>openstack_tutorial-stein</scenario_name>
 
     <ssh_key>/root/.ssh/id_rsa.pub</ssh_key>
 
     <ssh_key>/root/.ssh/id_rsa.pub</ssh_key>
 
     <automac offset="0"/>
 
     <automac offset="0"/>
Line 196: Line 193:
 
     <vm_mgmt type="private" network="10.20.0.0" mask="24" offset="0">
 
     <vm_mgmt type="private" network="10.20.0.0" mask="24" offset="0">
 
       <host_mapping />
 
       <host_mapping />
     </vm_mgmt>
+
     </vm_mgmt>  
 
     <vm_defaults>
 
     <vm_defaults>
 
         <console id="0" display="no"/>
 
         <console id="0" display="no"/>
 
         <console id="1" display="yes"/>
 
         <console id="1" display="yes"/>
 
     </vm_defaults>
 
     </vm_defaults>
     <cmd-seq seq="step1-6">step1,step2,step3,step4,step5,step6</cmd-seq>
+
     <cmd-seq seq="step1-6">step1,step2,step3,step3b,step4,step5,step6</cmd-seq>
 
     <cmd-seq seq="step4">step41,step42,step43,step44</cmd-seq>
 
     <cmd-seq seq="step4">step41,step42,step43,step44</cmd-seq>
 
     <cmd-seq seq="step5">step51,step52,step53</cmd-seq>
 
     <cmd-seq seq="step5">step51,step52,step53</cmd-seq>
Line 241: Line 238:
 
     <!-- Copy ntp config and restart service -->
 
     <!-- Copy ntp config and restart service -->
 
     <!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
 
     <!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
         between the vms/containers and the host -->
+
         between the vms/containers and the host -->  
 
     <!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-controller.conf</filetree>
 
     <!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-controller.conf</filetree>
 
     <exec seq="on_boot" type="verbatim">
 
     <exec seq="on_boot" type="verbatim">
Line 257: Line 254:
  
 
         # Change owner of secret_key to horizon to avoid a 500 error when
 
         # Change owner of secret_key to horizon to avoid a 500 error when
         # accessing horizon (new problem arosed in v01)
+
         # accessing horizon (new problem arosed in v04)
 
         # See: https://ask.openstack.org/en/question/30059/getting-500-internal-server-error-while-accessing-horizon-dashboard-in-ubuntu-icehouse/
 
         # See: https://ask.openstack.org/en/question/30059/getting-500-internal-server-error-while-accessing-horizon-dashboard-in-ubuntu-icehouse/
 
         chown horizon /var/lib/openstack-dashboard/secret_key
 
         chown horizon /var/lib/openstack-dashboard/secret_key
 +
 +
    # Stop nova services. Before being configured, they consume a lot of CPU
 +
    service nova-scheduler stop
 +
service nova-api stop
 +
service nova-conductor stop
 +
    </exec>
 +
 +
    <exec seq="step00" type="verbatim">
 +
    # Restart nova services
 +
    service nova-scheduler start
 +
service nova-api start
 +
service nova-conductor start
 
     </exec>
 
     </exec>
  
 
     <!-- STEP 1: Basic services -->
 
     <!-- STEP 1: Basic services -->
    <filetree seq="step1" root="/etc/mysql/conf.d/">conf/controller/mysql/mysqld_openstack.cnf</filetree>
+
     <filetree seq="step1" root="/etc/mysql/mariadb.conf.d/">conf/controller/mysql/mysqld_openstack.cnf</filetree>
    <!--
 
# mariadb in ubuntu 16.04 does not seem to manage correctly the "bind-address" and a
 
    # "DBAPIError exception ... 'Specified key was too long; max key length is 767 bytes" error
 
    # when executing su -s /bin/sh -c "keystone-manage db_sync" keystone
 
    # the bind problem can be solved by changing the default value of bind-address to listen on all
 
# interfaces
 
        sed -i -e 's/^bind-address\s*=.*/bind-address = 0.0.0.0/' /etc/mysql/mariadb.conf.d/50-server.cnf
 
# But both problems can be solved with this 60-openstack.cnf script taken from
 
  # https://bugs.launchpad.net/openstack-manuals/+bug/1575688
 
    -->
 
     <filetree seq="step1" root="/etc/mysql/mariadb.conf.d/">conf/controller/mysql/60-openstack.cnf</filetree>
 
 
     <filetree seq="step1" root="/etc/">conf/controller/memcached/memcached.conf</filetree>
 
     <filetree seq="step1" root="/etc/">conf/controller/memcached/memcached.conf</filetree>
 +
    <filetree seq="step1" root="/etc/default/">conf/controller/etcd/etcd</filetree>
 
     <!--filetree seq="step1" root="/etc/mongodb.conf">conf/controller/mongodb/mongodb.conf</filetree!-->
 
     <!--filetree seq="step1" root="/etc/mongodb.conf">conf/controller/mongodb/mongodb.conf</filetree!-->
 
     <exec seq="step1" type="verbatim">
 
     <exec seq="step1" type="verbatim">
 +
 +
    # Stop nova services. Before being configured, they consume a lot of CPU
 +
    service nova-scheduler stop
 +
service nova-api stop
 +
service nova-conductor stop
 +
 
         # Change all ocurrences of utf8mb4 to utf8. See comment above
 
         # Change all ocurrences of utf8mb4 to utf8. See comment above
 
         #for f in $( find /etc/mysql/mariadb.conf.d/ -type f ); do echo "Changing utf8mb4 to utf8 in file $f"; sed -i -e 's/utf8mb4/utf8/g' $f; done
 
         #for f in $( find /etc/mysql/mariadb.conf.d/ -type f ); do echo "Changing utf8mb4 to utf8 in file $f"; sed -i -e 's/utf8mb4/utf8/g' $f; done
Line 284: Line 289:
  
 
         rabbitmqctl add_user openstack xxxx
 
         rabbitmqctl add_user openstack xxxx
         rabbitmqctl set_permissions openstack ".*" ".*" ".*"
+
         rabbitmqctl set_permissions openstack ".*" ".*" ".*"  
  
 
         service memcached restart
 
         service memcached restart
 +
 +
        systemctl enable etcd
 +
        systemctl start etcd
  
 
         #service mongodb stop
 
         #service mongodb stop
Line 295: Line 303:
 
     <!-- STEP 2: Identity service -->
 
     <!-- STEP 2: Identity service -->
 
     <filetree seq="step2" root="/etc/keystone/">conf/controller/keystone/keystone.conf</filetree>
 
     <filetree seq="step2" root="/etc/keystone/">conf/controller/keystone/keystone.conf</filetree>
    <!--filetree seq="step2" root="/etc/apache2/sites-available/">conf/controller/apache2/wsgi-keystone.conf</filetree!-->
 
 
     <filetree seq="step2" root="/root/bin/">conf/controller/keystone/admin-openrc.sh</filetree>
 
     <filetree seq="step2" root="/root/bin/">conf/controller/keystone/admin-openrc.sh</filetree>
 
     <filetree seq="step2" root="/root/bin/">conf/controller/keystone/demo-openrc.sh</filetree>
 
     <filetree seq="step2" root="/root/bin/">conf/controller/keystone/demo-openrc.sh</filetree>
Line 313: Line 320:
  
 
         keystone-manage bootstrap --bootstrap-password xxxx \
 
         keystone-manage bootstrap --bootstrap-password xxxx \
           --bootstrap-admin-url http://controller:35357/v3/ \
+
           --bootstrap-admin-url http://controller:5000/v3/ \
 
           --bootstrap-internal-url http://controller:5000/v3/ \
 
           --bootstrap-internal-url http://controller:5000/v3/ \
 
           --bootstrap-public-url http://controller:5000/v3/ \
 
           --bootstrap-public-url http://controller:5000/v3/ \
Line 329: Line 336:
 
         export OS_USER_DOMAIN_NAME=Default
 
         export OS_USER_DOMAIN_NAME=Default
 
         export OS_PROJECT_DOMAIN_NAME=Default
 
         export OS_PROJECT_DOMAIN_NAME=Default
         export OS_AUTH_URL=http://controller:35357/v3
+
         export OS_AUTH_URL=http://controller:5000/v3
 
         export OS_IDENTITY_API_VERSION=3
 
         export OS_IDENTITY_API_VERSION=3
  
        #export OS_TOKEN=ee173fc22384618b472e
 
        #export OS_URL=http://controller:35357/v3
 
        #export OS_IDENTITY_API_VERSION=3
 
 
        # Create endpoints
 
        #openstack service create --name keystone --description "OpenStack Identity" identity
 
        #openstack endpoint create --region RegionOne identity public http://controller:5000/v3
 
        #openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
 
        #openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
 
 
         # Create users and projects
 
         # Create users and projects
        #openstack domain create --description "Default Domain" default
 
        #openstack project create --domain default --description "Admin Project" admin
 
        #openstack user create --domain default --password=xxxx admin
 
        #openstack role create admin
 
        #openstack role add --project admin --user admin admin
 
 
         openstack project create --domain default --description "Service Project" service
 
         openstack project create --domain default --description "Service Project" service
 
         openstack project create --domain default --description "Demo Project" demo
 
         openstack project create --domain default --description "Demo Project" demo
Line 354: Line 347:
 
     </exec>
 
     </exec>
  
     <!-- STEP 3: Image service (Glance) -->
+
     <!--  
 +
          STEP 3: Image service (Glance)  
 +
    -->
 
     <filetree seq="step3" root="/etc/glance/">conf/controller/glance/glance-api.conf</filetree>
 
     <filetree seq="step3" root="/etc/glance/">conf/controller/glance/glance-api.conf</filetree>
 
     <filetree seq="step3" root="/etc/glance/">conf/controller/glance/glance-registry.conf</filetree>
 
     <filetree seq="step3" root="/etc/glance/">conf/controller/glance/glance-registry.conf</filetree>
Line 365: Line 360:
 
         openstack user create --domain default --password=xxxx glance
 
         openstack user create --domain default --password=xxxx glance
 
         openstack role add --project service --user glance admin
 
         openstack role add --project service --user glance admin
         openstack service create --name glance --description "OpenStack Image service" image
+
         openstack service create --name glance --description "OpenStack Image" image
 
         openstack endpoint create --region RegionOne image public http://controller:9292
 
         openstack endpoint create --region RegionOne image public http://controller:9292
 
         openstack endpoint create --region RegionOne image internal http://controller:9292
 
         openstack endpoint create --region RegionOne image internal http://controller:9292
Line 375: Line 370:
 
         #rm -f /var/lib/glance/glance.sqlite
 
         #rm -f /var/lib/glance/glance.sqlite
 
     </exec>
 
     </exec>
 +
 +
    <!--
 +
          STEP 3B: Placement service API
 +
    -->
 +
    <filetree seq="step3b" root="/etc/placement/">conf/controller/placement/placement.conf</filetree>
 +
    <exec seq="step3b" type="verbatim">
 +
        mysql -u root --password='xxxx' -e "CREATE DATABASE placement;"
 +
        mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'xxxx';"
 +
        mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'xxxx';"
 +
        mysql -u root --password='xxxx' -e "flush privileges;"
 +
        source /root/bin/admin-openrc.sh
 +
        openstack user create --domain default --password=xxxx placement
 +
        openstack role add --project service --user placement admin
 +
        openstack service create --name placement --description "Placement API" placement
 +
        openstack endpoint create --region RegionOne placement public  http://controller:8778
 +
        openstack endpoint create --region RegionOne placement internal http://controller:8778
 +
        openstack endpoint create --region RegionOne placement admin    http://controller:8778
 +
        su -s /bin/sh -c "placement-manage db sync" placement
 +
        service apache2 restart
 +
    </exec>
 +
 +
  
 
     <!-- STEP 4: Compute service (Nova) -->
 
     <!-- STEP 4: Compute service (Nova) -->
Line 400: Line 417:
 
         openstack endpoint create --region RegionOne compute admin    http://controller:8774/v2.1
 
         openstack endpoint create --region RegionOne compute admin    http://controller:8774/v2.1
  
         openstack user create --domain default --password=xxxx placement
+
         # Restart services stopped at step 1 to save CPU
        openstack role add --project service --user placement admin
+
    service nova-scheduler start
        openstack service create --name placement --description "Placement API" placement
+
service nova-api start
        openstack endpoint create --region RegionOne placement public  http://controller:8778
+
service nova-conductor start
        openstack endpoint create --region RegionOne placement internal http://controller:8778
 
        openstack endpoint create --region RegionOne placement admin    http://controller:8778
 
  
 
    su -s /bin/sh -c "nova-manage api_db sync" nova
 
    su -s /bin/sh -c "nova-manage api_db sync" nova
Line 411: Line 426:
 
         su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
 
         su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
 
    su -s /bin/sh -c "nova-manage db sync" nova
 
    su -s /bin/sh -c "nova-manage db sync" nova
 +
        su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
  
 
         service nova-api restart
 
         service nova-api restart
Line 478: Line 494:
 
     <filetree seq="step6" root="/etc/openstack-dashboard/">conf/controller/dashboard/local_settings.py</filetree>
 
     <filetree seq="step6" root="/etc/openstack-dashboard/">conf/controller/dashboard/local_settings.py</filetree>
 
     <exec seq="step6" type="verbatim">
 
     <exec seq="step6" type="verbatim">
     chown www-data:www-data /var/lib/openstack-dashboard/secret_key
+
     #chown www-data:www-data /var/lib/openstack-dashboard/secret_key
         service apache2 reload
+
    rm /var/lib/openstack-dashboard/secret_key
 +
    systemctl enable apache2
 +
         service apache2 restart
 
     </exec>
 
     </exec>
  
Line 524: Line 542:
 
         glance image-create --name "trove-mariadb" --file /tmp/images/mariadb.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
 
         glance image-create --name "trove-mariadb" --file /tmp/images/mariadb.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
 
         rm /tmp/images/mariadb.qcow2
 
         rm /tmp/images/mariadb.qcow2
         su -s /bin/sh -c "trove-manage --config-file /etc/trove/trove.conf datastore_update mysql ''" trove
+
         su -s /bin/sh -c "trove-manage --config-file /etc/trove/trove.conf datastore_update mysql ''" trove  
 
         su -s /bin/sh -c "trove-manage --config-file /etc/trove/trove.conf datastore_version_update mysql mariadb mariadb glance_image_ID '' 1" trove
 
         su -s /bin/sh -c "trove-manage --config-file /etc/trove/trove.conf datastore_version_update mysql mariadb mariadb glance_image_ID '' 1" trove
  
Line 533: Line 551:
  
 
     <!-- STEP 8: Heat service -->
 
     <!-- STEP 8: Heat service -->
     <cmd-seq seq="step8">step81,step82</cmd-seq>
+
     <!--cmd-seq seq="step8">step81,step82</cmd-seq-->
 
 
    <!--exec seq="step81" type="verbatim">
 
        # Moved to create filesystem script
 
        apt-get -y install heat-api heat-api-cfn heat-engine
 
    </exec-->
 
  
     <filetree seq="step82" root="/etc/heat/">conf/controller/heat/heat.conf</filetree>
+
     <filetree seq="step8" root="/etc/heat/">conf/controller/heat/heat.conf</filetree>
     <filetree seq="step82" root="/root/heat/">conf/controller/heat/examples</filetree>
+
     <filetree seq="step8" root="/root/heat/">conf/controller/heat/examples</filetree>
     <exec seq="step82" type="verbatim">
+
     <exec seq="step8" type="verbatim">
 
         mysql -u root --password='xxxx' -e "CREATE DATABASE heat;"
 
         mysql -u root --password='xxxx' -e "CREATE DATABASE heat;"
 
         mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY 'xxxx';"
 
         mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY 'xxxx';"
Line 553: Line 566:
 
         openstack service  create --name heat    --description "Orchestration" orchestration
 
         openstack service  create --name heat    --description "Orchestration" orchestration
 
         openstack service  create --name heat-cfn --description "Orchestration" cloudformation
 
         openstack service  create --name heat-cfn --description "Orchestration" cloudformation
         openstack endpoint create --region RegionOne orchestration public http://controller:8004/v1/%\(tenant_id\)s
+
         openstack endpoint create --region RegionOne orchestration public   http://controller:8004/v1/%\(tenant_id\)s
         openstack endpoint create --region RegionOne orchestration internal http://controller:8004/v1/%\(tenant_id\)s
+
         openstack endpoint create --region RegionOne orchestration internal http://controller:8004/v1/%\(tenant_id\)s
         openstack endpoint create --region RegionOne orchestration admin http://controller:8004/v1/%\(tenant_id\)s
+
         openstack endpoint create --region RegionOne orchestration admin   http://controller:8004/v1/%\(tenant_id\)s
         openstack endpoint create --region RegionOne cloudformation public http://controller:8000/v1
+
         openstack endpoint create --region RegionOne cloudformation public   http://controller:8000/v1
 
         openstack endpoint create --region RegionOne cloudformation internal http://controller:8000/v1
 
         openstack endpoint create --region RegionOne cloudformation internal http://controller:8000/v1
         openstack endpoint create --region RegionOne cloudformation admin http://controller:8000/v1
+
         openstack endpoint create --region RegionOne cloudformation admin   http://controller:8000/v1
 
         openstack domain  create --description "Stack projects and users" heat
 
         openstack domain  create --description "Stack projects and users" heat
 
         openstack user    create --domain heat --password xxxx heat_domain_admin
 
         openstack user    create --domain heat --password xxxx heat_domain_admin
Line 570: Line 583:
 
         service heat-api-cfn restart
 
         service heat-api-cfn restart
 
         service heat-engine restart
 
         service heat-engine restart
 +
 +
        # Install Orchestration interface in Dashboard
 +
        export DEBIAN_FRONTEND=noninteractive
 +
apt-get install -y gettext
 +
pip3 install heat-dashboard
 +
 +
        cd /root
 +
git clone https://github.com/openstack/heat-dashboard.git
 +
cd heat-dashboard/
 +
git checkout stable/stein
 +
cp heat_dashboard/enabled/_[1-9]*.py /usr/share/openstack-dashboard/openstack_dashboard/local/enabled
 +
python3 ./manage.py compilemessages
 +
cd /usr/share/openstack-dashboard
 +
DJANGO_SETTINGS_MODULE=openstack_dashboard.settings python3 manage.py collectstatic --noinput
 +
DJANGO_SETTINGS_MODULE=openstack_dashboard.settings python3 manage.py compress --force
 +
rm /var/lib/openstack-dashboard/secret_key
 +
    service apache2 restart
  
 
     </exec>
 
     </exec>
Line 601: Line 631:
 
     <filetree seq="step92" root="/root/tacker/">conf/controller/tacker/examples</filetree>
 
     <filetree seq="step92" root="/root/tacker/">conf/controller/tacker/examples</filetree>
 
     <exec seq="step92" type="verbatim">
 
     <exec seq="step92" type="verbatim">
         sed -i -e 's/.*"resource_types:OS::Nova::Flavor":.*/    "resource_types:OS::Nova::Flavor": "role:admin",/' /etc/heat/policy.json
+
         sed -i -e 's/.*"resource_types:OS::Nova::Flavor":.*/    "resource_types:OS::Nova::Flavor": "role:admin",/' /etc/heat/policy.json  
  
 
         mysql -u root --password='xxxx' -e "CREATE DATABASE tacker;"
 
         mysql -u root --password='xxxx' -e "CREATE DATABASE tacker;"
Line 672: Line 702:
 
         tacker vnfd-create --vnfd-file sample-vnfd.yaml testd
 
         tacker vnfd-create --vnfd-file sample-vnfd.yaml testd
  
         # Falla con error:
+
         # Falla con error:  
 
         # ERROR: Property error: : resources.VDU1.properties.image: : No module named v2.client
 
         # ERROR: Property error: : resources.VDU1.properties.image: : No module named v2.client
 
         tacker vnf-create --vnfd-id $( tacker vnfd-list | awk '/ testd / { print $2 }' ) test
 
         tacker vnf-create --vnfd-id $( tacker vnfd-list | awk '/ testd / { print $2 }' ) test
Line 708: Line 738:
  
 
     # Ceilometer
 
     # Ceilometer
         source /root/bin/admin-openrc.sh
+
         source /root/bin/admin-openrc.sh  
 
     openstack user create --domain default --password xxxx ceilometer
 
     openstack user create --domain default --password xxxx ceilometer
 
     openstack role add --project service --user ceilometer admin
 
     openstack role add --project service --user ceilometer admin
Line 760: Line 790:
 
#crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_userid openstack
 
#crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_userid openstack
 
#crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_password xxxx
 
#crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_password xxxx
 
+
 
#crudini --set /etc/glance/glance-registry.conf DEFAULT rpc_backend rabbit
 
#crudini --set /etc/glance/glance-registry.conf DEFAULT rpc_backend rabbit
 
#crudini --set /etc/glance/glance-registry.conf oslo_messaging_notifications driver messagingv2
 
#crudini --set /etc/glance/glance-registry.conf oslo_messaging_notifications driver messagingv2
Line 773: Line 803:
 
     <exec seq="load-img" type="verbatim">
 
     <exec seq="load-img" type="verbatim">
 
         source /root/bin/admin-openrc.sh
 
         source /root/bin/admin-openrc.sh
 
+
       
 
         # Create flavors if not created
 
         # Create flavors if not created
 
         openstack flavor show m1.nano >/dev/null 2>&amp;1    || openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
 
         openstack flavor show m1.nano >/dev/null 2>&amp;1    || openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
Line 780: Line 810:
  
 
         # CentOS image
 
         # CentOS image
         # Cirros image
+
         # Cirros image
 
         #wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
 
         #wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
 
         wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/cirros-0.3.4-x86_64-disk-vnx.qcow2
 
         wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/cirros-0.3.4-x86_64-disk-vnx.qcow2
 
         glance image-create --name "cirros-0.3.4-x86_64-vnx" --file /tmp/images/cirros-0.3.4-x86_64-disk-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
 
         glance image-create --name "cirros-0.3.4-x86_64-vnx" --file /tmp/images/cirros-0.3.4-x86_64-disk-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
 
         rm /tmp/images/cirros-0.3.4-x86_64-disk*.qcow2
 
         rm /tmp/images/cirros-0.3.4-x86_64-disk*.qcow2
 
+
       
 
         # Ubuntu image (trusty)
 
         # Ubuntu image (trusty)
 
         #wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2
 
         #wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2
Line 997: Line 1,027:
 
     <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/dhcp_agent.ini</filetree>
 
     <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/dhcp_agent.ini</filetree>
 
     <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/dnsmasq-neutron.conf</filetree>
 
     <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/dnsmasq-neutron.conf</filetree>
     <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/fwaas_driver.ini</filetree>
+
     <!--filetree seq="step52" root="/etc/neutron/">conf/network/neutron/fwaas_driver.ini</filetree-->
     <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/lbaas_agent.ini</filetree>
+
     <!--filetree seq="step52" root="/etc/neutron/">conf/network/neutron/lbaas_agent.ini</filetree-->
 
     <exec seq="step52" type="verbatim">
 
     <exec seq="step52" type="verbatim">
 
         ovs-vsctl add-br br-vlan
 
         ovs-vsctl add-br br-vlan
Line 1,007: Line 1,037:
 
         service neutron-lbaasv2-agent restart
 
         service neutron-lbaasv2-agent restart
 
         service openvswitch-switch restart
 
         service openvswitch-switch restart
 
+
 
         service neutron-openvswitch-agent restart
 
         service neutron-openvswitch-agent restart
 
#service neutron-linuxbridge-agent restart
 
#service neutron-linuxbridge-agent restart
Line 1,042: Line 1,072:
 
         cat /root/hosts >> /etc/hosts;
 
         cat /root/hosts >> /etc/hosts;
 
         rm /root/hosts;
 
         rm /root/hosts;
         # Create /dev/net/tun device
+
         # Create /dev/net/tun device  
 
         #mkdir -p /dev/net/
 
         #mkdir -p /dev/net/
 
         #mknod -m 666 /dev/net/tun  c 10 200
 
         #mknod -m 666 /dev/net/tun  c 10 200
Line 1,056: Line 1,086:
 
     <!-- Copy ntp config and restart service -->
 
     <!-- Copy ntp config and restart service -->
 
     <!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
 
     <!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
         between the vms/containers and the host -->
+
         between the vms/containers and the host -->  
 
     <!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
 
     <!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
 
     <exec seq="on_boot" type="verbatim">
 
     <exec seq="on_boot" type="verbatim">
Line 1,081: Line 1,111:
 
         service nova-compute restart
 
         service nova-compute restart
 
         service neutron-openvswitch-agent restart
 
         service neutron-openvswitch-agent restart
        #service neutron-linuxbridge-agent restart
 
 
     </exec>
 
     </exec>
  
Line 1,127: Line 1,156:
 
         cat /root/hosts >> /etc/hosts;
 
         cat /root/hosts >> /etc/hosts;
 
         rm /root/hosts;
 
         rm /root/hosts;
         # Create /dev/net/tun device
+
         # Create /dev/net/tun device  
 
         #mkdir -p /dev/net/
 
         #mkdir -p /dev/net/
 
         #mknod -m 666 /dev/net/tun  c 10 200
 
         #mknod -m 666 /dev/net/tun  c 10 200
Line 1,141: Line 1,170:
 
     <!-- Copy ntp config and restart service -->
 
     <!-- Copy ntp config and restart service -->
 
     <!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
 
     <!-- Note: not used because ntp cannot be used inside a container. Clocks are supposed to be synchronized
         between the vms/containers and the host -->
+
         between the vms/containers and the host -->  
 
     <!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
 
     <!--filetree seq="on_boot" root="/etc/chrony/chrony.conf">conf/ntp/chrony-others.conf</filetree>
 
     <exec seq="on_boot" type="verbatim">
 
     <exec seq="on_boot" type="verbatim">
Line 1,166: Line 1,195:
 
         service nova-compute restart
 
         service nova-compute restart
 
         service neutron-openvswitch-agent restart
 
         service neutron-openvswitch-agent restart
        #service neutron-linuxbridge-agent restart
 
 
     </exec>
 
     </exec>
  
Line 1,210: Line 1,238:
  
 
</vnx>
 
</vnx>
</pre>
 

Revision as of 11:15, 26 August 2019

Not finished yet....

VNX Openstack Stein four nodes classic scenario using Open vSwitch

Introduction

This is an Openstack tutorial scenario designed to experiment with Openstack free and open-source software platform for cloud-computing.

The scenario is made of four virtual machines: a controller node, a network node and two compute nodes, all based on LXC. Optionally, new compute nodes can be added by starting additional VNX scenarios.

Openstack version used is Stein (April 2019) over Ubuntu 18.04 LTS. The deployment scenario is the one that was named "Classic with Open vSwitch" and was described in previous versions of Openstack documentation (https://docs.openstack.org/liberty/networking-guide/scenario-classic-ovs.html).

The scenario is similar to the ones developed by Raul Alvarez to test OpenDaylight-Openstack integration, but instead of using Devstack to configure Openstack nodes, the configuration is done by means of commands integrated into the VNX scenario following Openstack Stein installation recipes.

Figure 1: Openstack tutorial scenario

Requirements

To use the scenario you need a Linux computer (Ubuntu 16.04 or later recommended) with VNX software installed. At least 8Gb of memory are needed to execute the scenario.

See how to install VNX here: http://vnx.dit.upm.es/vnx/index.php/Vnx-install

If already installed, update VNX to the latest version with:

vnx_update

Installation

Download the scenario with the virtual machines images included and unpack it:

wget http://idefix.dit.upm.es/download/vnx/examples/openstack/openstack_lab-stein_4n_classic_ovs-v01-with-rootfs.tgz
sudo vnx --unpack openstack_lab-stein_4n_classic_ovs-v01-with-rootfs.tgz

Starting the scenario

Start the scenario and configure it and load an example cirros and ubuntu images with:

cd openstack_lab-stein_4n_classic_ovs-v01
# Start the scenario
sudo vnx -f openstack_lab.xml -v --create
# Configure all Openstack services
vnx -f openstack_lab.xml -v -x start-all
# Load vm images in GLANCE
vnx -f openstack_lab.xml -v -x load-img


Figure 2: Openstack tutorial detailed topology

Once started, you can connect to Openstack Dashboard (default/admin/xxxx) starting a browser and pointing it to the controller horizon page. For example:

firefox 10.0.10.11/horizon

Access Dashboard page "Project|Network|Network topology" and create a simple demo scenario inside Openstack:

vnx -f openstack_lab.xml -v -x create-demo-scenario

You should see the simple scenario as it is being created through the Dashboard.

Once created you should be able to access vm1 console, to ping or ssh from the host to the vm1 or the opposite (see the floating IP assigned to vm1 in the Dashboard, probably 10.0.10.102).

You can create a second virtual machine (vm2) to test conectivity among virtual machines with:

vnx -f openstack_lab.xml -v -x create-demo-vm2

To allow external Internet access from vm1 you hace to configure a NAT in the host. You can easily do it using vnx_config_nat command distributed with VNX. Just find out the name of the public network interface of your host (i.e eth0) and execute:

vnx_config_nat ExtNet eth0

Besides, you can access the Openstack controller by ssh from the host and execute management commands directly:

slogin root@controller     # root/xxxx
source bin/admin-openrc.sh # Load admin credentials

For example, to show the virtual machines started:

openstack server list

You can also execute that commands from the host where the virtual scenario is started. For that purpose you need to install openstack client first:

pip install python-openstackclient
source bin/admin-openrc.sh # Load admin credentials
openstack server list


Connecting Openstack VMs to external systems using VLAN network interfaces

Compute nodes in this scenario have two network interfaces for internal and external connections:

  • eth2, connected to Tunnel network and used to connect with VMs in other compute nodes or routers in the network node
  • eth3, connected to VLAN network and used for the same purpose and also to connect to external systems through the VLAN based network infraestructure.

To demonstrate how Openstack VMs can be connected with external systems though the VLAN network switches, an additional demo scenario is included. Just execute:

vnx -f openstack_lab.xml -v -x create-vlan-demo-scenario

That scenario will create two new networks and subnetworks associated with VLANs 1000 and 1001, and two VMs, vm3 and vm4 connected to that networks. You can see the scenario created through the openstack Dashboard.

The commands used to create that networks and vms are the following (can be consulted also in the scenario XML file):

# Networks
neutron net-create vlan1000 --shared --provider:physical_network vlan --provider:network_type vlan   --provider:segmentation_id 1000
neutron net-create vlan1001 --shared --provider:physical_network vlan --provider:network_type vlan   --provider:segmentation_id 1001
neutron subnet-create vlan1000 10.1.2.0/24 --name vlan1000-subnet --allocation-pool start=10.1.2.2,end=10.1.2.99 --gateway 10.1.2.1 --dns-nameserver 8.8.8.8
neutron subnet-create vlan1001 10.1.3.0/24 --name vlan1001-subnet --allocation-pool start=10.1.3.2,end=10.1.3.99 --gateway 10.1.3.1 --dns-nameserver 8.8.8.8

# VMs
mkdir -p tmp
openstack keypair create vm3 > tmp/vm3
openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm3 --nic net-id=vlan1000
openstack keypair create vm4 > tmp/vm4
openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm4 --nic net-id=vlan1001

To demonstrate the connectivity of vm3 and vm4 with external systems connected on VLANs 1000/1001, you can start and additional virtual scenario which creates three additional systems: vmA (vlan 1000), vmB (vlan 1001) and vlan-router (connected to both vlans). To start it just execute:

vnx -f openstack_lab-vms-vlan.xml -v -t

Once the scenario is started, you should be able to ping and ssh among vm3, vm4, vmA and vmB.

You can have a look at the virtual switch that supports the Openstack VLAN Network executing the following command in the host:

ovs-vsctl show


Figure 3: Openstack Dashboard view of the demo virtual scenarios created

Stopping or releasing the scenario

To stop the scenario preserving the configuration and the changes made:

vnx -f openstack_lab.xml -v --shutdown

To start it again use:

vnx -f openstack_lab.xml -v --start

To stop the scenario destroying all the configuration and changes made:

vnx -f openstack_lab.xml -v --destroy

To unconfigure the NAT, just execute (change eth0 by the name of your external interface):

vnx_config_nat -d ExtNet eth0

Other useful information

To pack the scenario in a tgz file:

bin/pack-scenario-with-rootfs # including rootfs bin/pack-scenario # without rootfs

Other Openstack Dashboard screen captures

Figure 4: Openstack Dashboard compute overview
Figure 5: Openstack Dashboard view of the demo virtual machines created

<?xml version="1.0" encoding="UTF-8"?>


<vnx xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"

 xsi:noNamespaceSchemaLocation="/usr/share/xml/vnx/vnx-2.00.xsd">
 <global>
   <version>2.0</version>
   <scenario_name>openstack_tutorial-stein</scenario_name>
   <ssh_key>/root/.ssh/id_rsa.pub</ssh_key>
   <automac offset="0"/>
   <vm_mgmt type="private" network="10.20.0.0" mask="24" offset="0">
      <host_mapping />
   </vm_mgmt> 
   <vm_defaults>
       <console id="0" display="no"/>
       <console id="1" display="yes"/>
   </vm_defaults>
   <cmd-seq seq="step1-6">step1,step2,step3,step3b,step4,step5,step6</cmd-seq>
   <cmd-seq seq="step4">step41,step42,step43,step44</cmd-seq>
   <cmd-seq seq="step5">step51,step52,step53</cmd-seq>
   <cmd-seq seq="step10">step101,step102</cmd-seq>
   <cmd-seq seq="start-all">step00,step42,step43,step44,step52,step53</cmd-seq>
   <cmd-seq seq="discover-hosts">step44</cmd-seq>
 </global>
 <net name="MgmtNet" mode="openvswitch" mtu="1450"/>
 <net name="TunnNet" mode="openvswitch" mtu="1450"/>
 <net name="ExtNet"  mode="openvswitch" />
 <net name="VlanNet" mode="openvswitch" />
 <net name="virbr0"  mode="virtual_bridge" managed="no"/>
 <vm name="controller" type="lxc" arch="x86_64">
   <filesystem type="cow">filesystems/rootfs_lxc_ubuntu64-ostack-controller</filesystem>
   <mem>1G</mem>
   <if id="1" net="MgmtNet">
     <ipv4>10.0.0.11/24</ipv4>
   </if>
   <if id="2" net="ExtNet">
     <ipv4>10.0.10.11/24</ipv4>
   </if>
   <if id="9" net="virbr0">
     <ipv4>dhcp</ipv4>
   </if>
   <filetree seq="on_boot" root="/root/">conf/hosts</filetree>
   <exec seq="on_boot" type="verbatim">
       cat /root/hosts >> /etc/hosts;
       rm /root/hosts;
   </exec>


   <filetree seq="on_boot" root="/root/">conf/controller/bin</filetree>
   <exec seq="on_boot" type="verbatim">
       chmod +x /root/bin/*
   </exec>
   <exec seq="on_boot" type="verbatim">
       # Change MgmtNet and TunnNet interfaces MTU
       ifconfig eth1 mtu 1450
       sed -i -e '/iface eth1 inet static/a \   mtu 1450' /etc/network/interfaces
       # Change owner of secret_key to horizon to avoid a 500 error when
       # accessing horizon (new problem arosed in v04)
       # See: https://ask.openstack.org/en/question/30059/getting-500-internal-server-error-while-accessing-horizon-dashboard-in-ubuntu-icehouse/
       chown horizon /var/lib/openstack-dashboard/secret_key
   	# Stop nova services. Before being configured, they consume a lot of CPU
   	service nova-scheduler stop

service nova-api stop service nova-conductor stop

   </exec>
   <exec seq="step00" type="verbatim">
   	# Restart nova services
   	service nova-scheduler start

service nova-api start service nova-conductor start

   </exec>
   <filetree seq="step1" root="/etc/mysql/mariadb.conf.d/">conf/controller/mysql/mysqld_openstack.cnf</filetree>
   <filetree seq="step1" root="/etc/">conf/controller/memcached/memcached.conf</filetree>
   <filetree seq="step1" root="/etc/default/">conf/controller/etcd/etcd</filetree>
   <exec seq="step1" type="verbatim">
   	# Stop nova services. Before being configured, they consume a lot of CPU
   	service nova-scheduler stop

service nova-api stop service nova-conductor stop

       # Change all ocurrences of utf8mb4 to utf8. See comment above
       #for f in $( find /etc/mysql/mariadb.conf.d/ -type f ); do echo "Changing utf8mb4 to utf8 in file $f"; sed -i -e 's/utf8mb4/utf8/g' $f; done
       service mysql restart
       #mysql_secure_installation # to be run manually
       rabbitmqctl add_user openstack xxxx
       rabbitmqctl set_permissions openstack ".*" ".*" ".*" 
       service memcached restart
       systemctl enable etcd
       systemctl start etcd
       #service mongodb stop
       #rm -f /var/lib/mongodb/journal/prealloc.*
       #service mongodb start
   </exec>
   <filetree seq="step2" root="/etc/keystone/">conf/controller/keystone/keystone.conf</filetree>
   <filetree seq="step2" root="/root/bin/">conf/controller/keystone/admin-openrc.sh</filetree>
   <filetree seq="step2" root="/root/bin/">conf/controller/keystone/demo-openrc.sh</filetree>
   <exec seq="step2" type="verbatim">
       count=1; while ! mysqladmin ping ; do echo -n $count; echo ": waiting for mysql ..."; ((count++)) && ((count==6)) && echo "--" && echo "-- ERROR: database not ready." && echo "--" && break; sleep 2; done
   </exec>
   <exec seq="step2" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE keystone;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       su -s /bin/sh -c "keystone-manage db_sync" keystone
       keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
       keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
       keystone-manage bootstrap --bootstrap-password xxxx \
         --bootstrap-admin-url http://controller:5000/v3/ \
         --bootstrap-internal-url http://controller:5000/v3/ \
         --bootstrap-public-url http://controller:5000/v3/ \
         --bootstrap-region-id RegionOne
       echo "ServerName controller" >> /etc/apache2/apache2.conf
       #ln -s /etc/apache2/sites-available/wsgi-keystone.conf /etc/apache2/sites-enabled
       service apache2 restart
       rm -f /var/lib/keystone/keystone.db
       sleep 5
       export OS_USERNAME=admin
       export OS_PASSWORD=xxxx
       export OS_PROJECT_NAME=admin
       export OS_USER_DOMAIN_NAME=Default
       export OS_PROJECT_DOMAIN_NAME=Default
       export OS_AUTH_URL=http://controller:5000/v3
       export OS_IDENTITY_API_VERSION=3
       # Create users and projects
       openstack project create --domain default --description "Service Project" service
       openstack project create --domain default --description "Demo Project" demo
       openstack user create --domain default --password=xxxx demo
       openstack role create user
       openstack role add --project demo --user demo user
   </exec>
   <filetree seq="step3" root="/etc/glance/">conf/controller/glance/glance-api.conf</filetree>
   <filetree seq="step3" root="/etc/glance/">conf/controller/glance/glance-registry.conf</filetree>
   <exec seq="step3" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE glance;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password=xxxx glance
       openstack role add --project service --user glance admin
       openstack service create --name glance --description "OpenStack Image" image
       openstack endpoint create --region RegionOne image public http://controller:9292
       openstack endpoint create --region RegionOne image internal http://controller:9292
       openstack endpoint create --region RegionOne image admin http://controller:9292
       su -s /bin/sh -c "glance-manage db_sync" glance
       service glance-registry restart
       service glance-api restart
       #rm -f /var/lib/glance/glance.sqlite
   </exec>
   <filetree seq="step3b" root="/etc/placement/">conf/controller/placement/placement.conf</filetree>
   <exec seq="step3b" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE placement;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password=xxxx placement
       openstack role add --project service --user placement admin
       openstack service create --name placement --description "Placement API" placement
       openstack endpoint create --region RegionOne placement public   http://controller:8778
       openstack endpoint create --region RegionOne placement internal http://controller:8778
       openstack endpoint create --region RegionOne placement admin    http://controller:8778
       su -s /bin/sh -c "placement-manage db sync" placement
       service apache2 restart
   </exec>


   <filetree seq="step41" root="/etc/nova/">conf/controller/nova/nova.conf</filetree>
   <exec seq="step41" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE nova_api;"
       mysql -u root --password='xxxx' -e "CREATE DATABASE nova;"
       mysql -u root --password='xxxx' -e "CREATE DATABASE nova_cell0;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON nova_api.*   TO 'nova'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON nova_api.*   TO 'nova'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON nova.*       TO 'nova'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON nova.*       TO 'nova'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password=xxxx nova
       openstack role add --project service --user nova admin
       openstack service create --name nova --description "OpenStack Compute" compute
       openstack endpoint create --region RegionOne compute public   http://controller:8774/v2.1
       openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
       openstack endpoint create --region RegionOne compute admin    http://controller:8774/v2.1
       # Restart services stopped at step 1 to save CPU
   	service nova-scheduler start

service nova-api start service nova-conductor start

su -s /bin/sh -c "nova-manage api_db sync" nova

       su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
       su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

       su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
       service nova-api restart
       service nova-consoleauth restart
       service nova-scheduler restart
       service nova-conductor restart
       service nova-novncproxy restart
       #rm -f /var/lib/nova/nova.sqlite
   </exec>
   <exec seq="step43" type="verbatim">
       source /root/bin/admin-openrc.sh
       # Wait for compute1 hypervisor to be up
       while [ $( openstack hypervisor list --matching compute1 -f value -c State ) != 'up' ]; do echo "waiting for compute1 hypervisor..."; sleep 5; done
   </exec>
   <exec seq="step43" type="verbatim">
       source /root/bin/admin-openrc.sh
       # Wait for compute2 hypervisor to be up
       while [ $( openstack hypervisor list --matching compute2 -f value -c State ) != 'up' ]; do echo "waiting for compute2 hypervisor..."; sleep 5; done
   </exec>
   <exec seq="step44" type="verbatim">
       source /root/bin/admin-openrc.sh
       openstack hypervisor list
       su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
   </exec>
   <filetree seq="step51" root="/etc/neutron/">conf/controller/neutron/neutron.conf</filetree>
   <filetree seq="step51" root="/etc/neutron/plugins/ml2/">conf/controller/neutron/ml2_conf.ini</filetree>
   <exec seq="step51" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE neutron;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password=xxxx neutron
       openstack role add --project service --user neutron admin
       openstack service create --name neutron --description "OpenStack Networking" network
       openstack endpoint create --region RegionOne network public   http://controller:9696
       openstack endpoint create --region RegionOne network internal http://controller:9696
       openstack endpoint create --region RegionOne network admin    http://controller:9696
       su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
       # LBaaS
       neutron-db-manage --subproject neutron-lbaas upgrade head
       # FwaaS
       neutron-db-manage --subproject neutron-fwaas upgrade head
       # LBaaS Dashboard panels
       #git clone https://git.openstack.org/openstack/neutron-lbaas-dashboard
       #cd neutron-lbaas-dashboard
       #git checkout stable/mitaka
       #python setup.py install
       #cp neutron_lbaas_dashboard/enabled/_1481_project_ng_loadbalancersv2_panel.py /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/
       #cd /usr/share/openstack-dashboard
       #./manage.py collectstatic --noinput
       #./manage.py compress
       #sudo service apache2 restart
       service nova-api restart
       service neutron-server restart
   </exec>
   <filetree seq="step6" root="/etc/openstack-dashboard/">conf/controller/dashboard/local_settings.py</filetree>
   <exec seq="step6" type="verbatim">
   	#chown www-data:www-data /var/lib/openstack-dashboard/secret_key
   	rm /var/lib/openstack-dashboard/secret_key
   	systemctl enable apache2
       service apache2 restart
   </exec>
   <cmd-seq seq="step7">step71,step72,step73</cmd-seq>
   <exec seq="step71" type="verbatim">
       apt-get -y install python-trove python-troveclient   python-glanceclient trove-common trove-api trove-taskmanager trove-conductor python-pip
       pip install trove-dashboard==7.0.0.0b2
   </exec>
   <filetree seq="step72" root="/etc/trove/">conf/controller/trove/trove.conf</filetree>
   <filetree seq="step72" root="/etc/trove/">conf/controller/trove/trove-conductor.conf</filetree>
   <filetree seq="step72" root="/etc/trove/">conf/controller/trove/trove-taskmanager.conf</filetree>
   <filetree seq="step72" root="/etc/trove/">conf/controller/trove/trove-guestagent.conf</filetree>
   <exec seq="step72" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE trove;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON trove.* TO 'trove'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON trove.* TO 'trove'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password xxxx trove
       openstack role add --project service --user trove admin
       openstack service create --name trove --description "Database" database
       openstack endpoint create --region RegionOne database public http://controller:8779/v1.0/%\(tenant_id\)s
       openstack endpoint create --region RegionOne database internal http://controller:8779/v1.0/%\(tenant_id\)s
       openstack endpoint create --region RegionOne database admin http://controller:8779/v1.0/%\(tenant_id\)s
       su -s /bin/sh -c "trove-manage db_sync" trove
       service trove-api restart
       service trove-taskmanager restart
       service trove-conductor restart
       # Install trove_dashboard
       cp -a /usr/local/lib/python2.7/dist-packages/trove_dashboard/enabled/* /usr/share/openstack-dashboard/openstack_dashboard/local/enabled/
       service apache2 restart
   </exec>
   <exec seq="step73" type="verbatim">
       #wget -P /tmp/images http://tarballs.openstack.org/trove/images/ubuntu/mariadb.qcow2
       wget -P /tmp/images/ http://138.4.7.228/download/vnx/filesystems/ostack-images/trove/mariadb.qcow2
       glance image-create --name "trove-mariadb" --file /tmp/images/mariadb.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
       rm /tmp/images/mariadb.qcow2
       su -s /bin/sh -c "trove-manage --config-file /etc/trove/trove.conf datastore_update mysql " trove 
       su -s /bin/sh -c "trove-manage --config-file /etc/trove/trove.conf datastore_version_update mysql mariadb mariadb glance_image_ID  1" trove
       # Create example database
       openstack flavor show m1.smaller >/dev/null 2>&1 || openstack flavor create m1.smaller --ram 512 --disk 3 --vcpus 1 --id 6
       #trove create mysql_instance_1 m1.smaller --size 1 --databases myDB --users userA:xxxx --datastore_version mariadb --datastore mysql
   </exec>


   <filetree seq="step8" root="/etc/heat/">conf/controller/heat/heat.conf</filetree>
   <filetree seq="step8" root="/root/heat/">conf/controller/heat/examples</filetree>
   <exec seq="step8" type="verbatim">
       mysql -u root --password='xxxx' -e "CREATE DATABASE heat;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password xxxx heat
       openstack role add --project service --user heat admin
       openstack service  create --name heat     --description "Orchestration" orchestration
       openstack service  create --name heat-cfn --description "Orchestration" cloudformation
       openstack endpoint create --region RegionOne orchestration  public   http://controller:8004/v1/%\(tenant_id\)s
       openstack endpoint create --region RegionOne orchestration  internal http://controller:8004/v1/%\(tenant_id\)s
       openstack endpoint create --region RegionOne orchestration  admin    http://controller:8004/v1/%\(tenant_id\)s
       openstack endpoint create --region RegionOne cloudformation public   http://controller:8000/v1
       openstack endpoint create --region RegionOne cloudformation internal http://controller:8000/v1
       openstack endpoint create --region RegionOne cloudformation admin    http://controller:8000/v1
       openstack domain   create --description "Stack projects and users" heat
       openstack user     create --domain heat --password xxxx heat_domain_admin
       openstack role add --domain heat --user-domain heat --user heat_domain_admin admin
       openstack role create heat_stack_owner
       openstack role add --project demo --user demo heat_stack_owner
       openstack role create heat_stack_user
       su -s /bin/sh -c "heat-manage db_sync" heat
       service heat-api restart
       service heat-api-cfn restart
       service heat-engine restart
       # Install Orchestration interface in Dashboard
       export DEBIAN_FRONTEND=noninteractive

apt-get install -y gettext pip3 install heat-dashboard

       cd /root

git clone https://github.com/openstack/heat-dashboard.git cd heat-dashboard/ git checkout stable/stein cp heat_dashboard/enabled/_[1-9]*.py /usr/share/openstack-dashboard/openstack_dashboard/local/enabled python3 ./manage.py compilemessages cd /usr/share/openstack-dashboard DJANGO_SETTINGS_MODULE=openstack_dashboard.settings python3 manage.py collectstatic --noinput DJANGO_SETTINGS_MODULE=openstack_dashboard.settings python3 manage.py compress --force rm /var/lib/openstack-dashboard/secret_key

   	service apache2 restart
   </exec>
   <exec seq="create-demo-heat" type="verbatim">
       source /root/bin/demo-openrc.sh
       # Create internal network
       openstack network create net-heat
       openstack subnet create --network net-heat --gateway 10.1.10.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.10.0/24 --allocation-pool start=10.1.10.8,end=10.1.10.100 subnet-heat
       mkdir -p /root/keys
       openstack keypair create key-heat > /root/keys/key-heat
       #export NET_ID=$(openstack network list | awk '/ net-heat / { print $2 }')
       export NET_ID=$( openstack network list --name net-heat -f value -c ID )
       openstack stack create -t /root/heat/examples/demo-template.yml --parameter "NetID=$NET_ID" stack
   </exec>


   <cmd-seq seq="step9">step91,step92</cmd-seq>
   <exec seq="step91" type="verbatim">
       apt-get -y install python-pip git
       pip install --upgrade pip
   </exec>
   <filetree seq="step92" root="/usr/local/etc/tacker/">conf/controller/tacker/tacker.conf</filetree>
   <filetree seq="step92" root="/usr/local/etc/tacker/">conf/controller/tacker/default-vim-config.yaml</filetree>
   <filetree seq="step92" root="/root/tacker/">conf/controller/tacker/examples</filetree>
   <exec seq="step92" type="verbatim">
       sed -i -e 's/.*"resource_types:OS::Nova::Flavor":.*/    "resource_types:OS::Nova::Flavor": "role:admin",/' /etc/heat/policy.json 
       mysql -u root --password='xxxx' -e "CREATE DATABASE tacker;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON tacker.* TO 'tacker'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON tacker.* TO 'tacker'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
       source /root/bin/admin-openrc.sh
       openstack user create --domain default --password xxxx tacker
       openstack role add --project service --user tacker admin
       openstack service create --name tacker --description "Tacker Project" nfv-orchestration
       openstack endpoint create --region RegionOne nfv-orchestration public   http://controller:9890/
       openstack endpoint create --region RegionOne nfv-orchestration internal http://controller:9890/
       openstack endpoint create --region RegionOne nfv-orchestration admin    http://controller:9890/
       mkdir -p /root/tacker
       cd /root/tacker
       git clone https://github.com/openstack/tacker
       cd tacker
       git checkout stable/ocata
       pip install -r requirements.txt
       pip install tosca-parser
       python setup.py install
       mkdir -p /var/log/tacker
       /usr/local/bin/tacker-db-manage --config-file /usr/local/etc/tacker/tacker.conf upgrade head
       # Tacker client
       cd /root/tacker
       git clone https://github.com/openstack/python-tackerclient
       cd python-tackerclient
       git checkout stable/ocata
       python setup.py install
       # Tacker horizon
       cd /root/tacker
       git clone https://github.com/openstack/tacker-horizon
       cd tacker-horizon
       git checkout stable/ocata
       python setup.py install
       cp tacker_horizon/enabled/* /usr/share/openstack-dashboard/openstack_dashboard/enabled/
       service apache2 restart
       # Start tacker server
       mkdir -p /var/log/tacker
       nohup python /usr/local/bin/tacker-server \
           --config-file /usr/local/etc/tacker/tacker.conf \
           --log-file /var/log/tacker/tacker.log &
       # Register default VIM
       tacker vim-register --is-default --config-file /usr/local/etc/tacker/default-vim-config.yaml \
           --description "Default VIM" "Openstack-VIM"
   </exec>
   <exec seq="step93" type="verbatim">
       nohup python /usr/local/bin/tacker-server \
           --config-file /usr/local/etc/tacker/tacker.conf \
           --log-file /var/log/tacker/tacker.log &
   </exec>
   <exec seq="create-demo-tacker" type="verbatim">
       source /root/bin/demo-openrc.sh
       # Create internal network
       openstack network create net-tacker
       openstack subnet create --network net-tacker --gateway 10.1.11.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.11.0/24 --allocation-pool start=10.1.11.8,end=10.1.11.100 subnet-tacker
       cd /root/tacker/examples
       tacker vnfd-create --vnfd-file sample-vnfd.yaml testd
       # Falla con error: 
       # ERROR: Property error: : resources.VDU1.properties.image: : No module named v2.client
       tacker vnf-create --vnfd-id $( tacker vnfd-list | awk '/ testd / { print $2 }' ) test
   </exec>


   <exec seq="step101" type="verbatim">

export DEBIAN_FRONTEND=noninteractive #apt-get -y install python-pip

       #pip install --upgrade pip

#pip install gnocchi[mysql,keystone] gnocchiclient apt-get -y install ceilometer-collector ceilometer-agent-central ceilometer-agent-notification python-ceilometerclient

       apt-get -y install gnocchi-common gnocchi-api gnocchi-metricd gnocchi-statsd python-gnocchiclient
   </exec>
   <filetree seq="step102" root="/etc/ceilometer/">conf/controller/ceilometer/ceilometer.conf</filetree>
   <filetree seq="step102" root="/etc/gnocchi/">conf/controller/ceilometer/gnocchi.conf</filetree>
   <filetree seq="step102" root="/etc/gnocchi/">conf/controller/ceilometer/api-paste.ini</filetree>
   <exec seq="step102" type="verbatim">
       # Create gnocchi database
       mysql -u root --password='xxxx' -e "CREATE DATABASE gnocchidb default character set utf8;"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON gnocchidb.* TO 'gnocchi'@'%' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "GRANT ALL PRIVILEGES ON gnocchidb.* TO 'gnocchi'@'localhost' IDENTIFIED BY 'xxxx';"
       mysql -u root --password='xxxx' -e "flush privileges;"
   	# Ceilometer
       source /root/bin/admin-openrc.sh 
   	openstack user create --domain default --password xxxx ceilometer
   	openstack role add --project service --user ceilometer admin
   	openstack service create --name ceilometer --description "Telemetry" metering
   	openstack user create --domain default --password xxxx gnocchi
       openstack role add --project service --user gnocchi admin
   	openstack service create --name gnocchi --description "Metric Service" metric

openstack endpoint create --region RegionOne metric public http://controller:8041 openstack endpoint create --region RegionOne metric internal http://controller:8041 openstack endpoint create --region RegionOne metric admin http://controller:8041

       mkdir -p /var/cache/gnocchi
       chown gnocchi:gnocchi -R  /var/cache/gnocchi
       mkdir -p /var/lib/gnocchi
       chown gnocchi:gnocchi -R  /var/lib/gnocchi
       gnocchi-upgrade
       sed -i 's/8000/8041/g' /usr/bin/gnocchi-api
       # Correct error in gnocchi-api startup script
       sed -i -e 's/exec $DAEMON $DAEMON_ARGS/exec $DAEMON -- $DAEMON_ARGS/' /etc/init.d/gnocchi-api
       systemctl enable gnocchi-api.service gnocchi-metricd.service gnocchi-statsd.service
       systemctl start gnocchi-api.service gnocchi-metricd.service gnocchi-statsd.service

ceilometer-upgrade --skip-metering-database service ceilometer-agent-central restart service ceilometer-agent-notification restart service ceilometer-collector restart

 		# Enable Glance service meters
 		crudini --set /etc/glance/glance-api.conf DEFAULT transport_url rabbit://openstack:xxxx@controller

crudini --set /etc/glance/glance-api.conf oslo_messaging_notifications driver messagingv2 crudini --set /etc/glance/glance-registry.conf DEFAULT transport_url rabbit://openstack:xxxx@controller crudini --set /etc/glance/glance-registry.conf oslo_messaging_notifications driver messagingv2

       service glance-registry restart
       service glance-api restart
       # Enable Neutron service meters
       crudini --set /etc/neutron/neutron.conf oslo_messaging_notifications driver messagingv2
       service neutron-server restart
       # Enable Heat service meters
       crudini --set /etc/heat/heat.conf oslo_messaging_notifications driver messagingv2
       service heat-api restart
       service heat-api-cfn restart
       service heat-engine restart
 		#crudini --set /etc/glance/glance-api.conf DEFAULT rpc_backend rabbit

#crudini --set /etc/glance/glance-api.conf oslo_messaging_notifications driver messagingv2 #crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_host controller #crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_userid openstack #crudini --set /etc/glance/glance-api.conf oslo_messaging_rabbit rabbit_password xxxx

#crudini --set /etc/glance/glance-registry.conf DEFAULT rpc_backend rabbit #crudini --set /etc/glance/glance-registry.conf oslo_messaging_notifications driver messagingv2 #crudini --set /etc/glance/glance-registry.conf oslo_messaging_rabbit rabbit_host controller #crudini --set /etc/glance/glance-registry.conf oslo_messaging_rabbit rabbit_userid openstack #crudini --set /etc/glance/glance-registry.conf oslo_messaging_rabbit rabbit_password xxxx


   </exec>


   <exec seq="load-img" type="verbatim">
       source /root/bin/admin-openrc.sh
       
       # Create flavors if not created
       openstack flavor show m1.nano >/dev/null 2>&1    || openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
       openstack flavor show m1.tiny >/dev/null 2>&1    || openstack flavor create --id 1 --vcpus 1 --ram 512 --disk 1 m1.tiny
       openstack flavor show m1.smaller >/dev/null 2>&1 || openstack flavor create --id 6 --vcpus 1 --ram 512 --disk 3 m1.smaller
       # CentOS image
       # Cirros image  
       #wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
       wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/cirros-0.3.4-x86_64-disk-vnx.qcow2
       glance image-create --name "cirros-0.3.4-x86_64-vnx" --file /tmp/images/cirros-0.3.4-x86_64-disk-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
       rm /tmp/images/cirros-0.3.4-x86_64-disk*.qcow2
       
       # Ubuntu image (trusty)
       #wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2
       #glance image-create --name "trusty-server-cloudimg-amd64-vnx" --file /tmp/images/trusty-server-cloudimg-amd64-disk1-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
       #rm /tmp/images/trusty-server-cloudimg-amd64-disk1*.qcow2
       # Ubuntu image (xenial)
       wget -P /tmp/images http://138.4.7.228/download/vnx/filesystems/ostack-images/xenial-server-cloudimg-amd64-disk1-vnx.qcow2
       glance image-create --name "xenial-server-cloudimg-amd64-vnx" --file /tmp/images/xenial-server-cloudimg-amd64-disk1-vnx.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
       rm /tmp/images/xenial-server-cloudimg-amd64-disk1*.qcow2
       #wget -P /tmp/images http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
       #glance image-create --name "CentOS-7-x86_64" --file /tmp/images/CentOS-7-x86_64-GenericCloud.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress
       #rm /tmp/images/CentOS-7-x86_64-GenericCloud.qcow2
   </exec>
   <exec seq="create-demo-scenario" type="verbatim">
       source /root/bin/admin-openrc.sh
       # Create internal network
       #neutron net-create net0
       openstack network create net0
       #neutron subnet-create net0 10.1.1.0/24 --name subnet0 --gateway 10.1.1.1 --dns-nameserver 8.8.8.8
       openstack subnet create --network net0 --gateway 10.1.1.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.1.0/24 --allocation-pool start=10.1.1.8,end=10.1.1.100 subnet0
       # Create virtual machine
       mkdir -p /root/keys
       openstack keypair create vm1 > /root/keys/vm1
       openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm1 --nic net-id=net0 --key-name vm1
       # Create external network
       #neutron net-create ExtNet --provider:physical_network provider --provider:network_type flat --router:external --shared
       openstack network create --share --external --provider-physical-network provider --provider-network-type flat ExtNet
       #neutron subnet-create --name ExtSubnet --allocation-pool start=10.0.10.100,end=10.0.10.200 --dns-nameserver 10.0.10.1 --gateway 10.0.10.1 ExtNet 10.0.10.0/24
       openstack subnet create --network ExtNet --gateway 10.0.10.1 --dns-nameserver 10.0.10.1 --subnet-range 10.0.10.0/24 --allocation-pool start=10.0.10.100,end=10.0.10.200 ExtSubNet
       #neutron router-create r0
       openstack router create r0
       #neutron router-gateway-set r0 ExtNet
       openstack router set r0 --external-gateway ExtNet
       #neutron router-interface-add r0 subnet0
       openstack router add subnet r0 subnet0


       # Assign floating IP address to vm1
       #openstack ip floating add $( openstack ip floating create ExtNet -c ip -f value ) vm1
       openstack server add floating ip vm1 $( openstack floating ip create ExtNet -c floating_ip_address -f value )


       # Create security group rules to allow ICMP, SSH and WWW access
       openstack security group rule create --proto icmp --dst-port 0  default
       openstack security group rule create --proto tcp  --dst-port 80 default
       openstack security group rule create --proto tcp  --dst-port 22 default
   </exec>
   <exec seq="create-demo-vm2" type="verbatim">
       source /root/bin/admin-openrc.sh
       # Create virtual machine
       mkdir -p /root/keys
       openstack keypair create vm2 > /root/keys/vm2
       openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm2 --nic net-id=net0 --key-name vm2
       # Assign floating IP address to vm2
       #openstack ip floating add $( openstack ip floating create ExtNet -c ip -f value ) vm2
       openstack server add floating ip vm2 $( openstack floating ip create ExtNet -c floating_ip_address -f value )
   </exec>
   <exec seq="create-demo-vm3" type="verbatim">
       source /root/bin/admin-openrc.sh
       # Create virtual machine
       mkdir -p /root/keys
       openstack keypair create vm3 > /root/keys/vm3
       openstack server create --flavor m1.smaller --image xenial-server-cloudimg-amd64-vnx vm3 --nic net-id=net0 --key-name vm3
       # Assign floating IP address to vm3
       #openstack ip floating add $( openstack ip floating create ExtNet -c ip -f value ) vm3
       openstack server add floating ip vm3 $( openstack floating ip create ExtNet -c floating_ip_address -f value )
   </exec>
   <exec seq="create-vlan-demo-scenario" type="verbatim">
       source /root/bin/admin-openrc.sh
       # Create vlan based networks and subnetworks
       #neutron net-create vlan1000 --shared --provider:physical_network vlan --provider:network_type vlan   --provider:segmentation_id 1000
       #neutron net-create vlan1001 --shared --provider:physical_network vlan --provider:network_type vlan   --provider:segmentation_id 1001
       openstack network create --share --provider-physical-network vlan --provider-network-type vlan --provider-segment 1000 vlan1000
       openstack network create --share --provider-physical-network vlan --provider-network-type vlan --provider-segment 1001 vlan1001
       #neutron subnet-create vlan1000 10.1.2.0/24 --name vlan1000-subnet --allocation-pool start=10.1.2.2,end=10.1.2.99 --gateway 10.1.2.1 --dns-nameserver 8.8.8.8
       #neutron subnet-create vlan1001 10.1.3.0/24 --name vlan1001-subnet --allocation-pool start=10.1.3.2,end=10.1.3.99 --gateway 10.1.3.1 --dns-nameserver 8.8.8.8
       openstack subnet create --network vlan1000 --gateway 10.1.2.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.2.0/24 --allocation-pool start=10.1.2.2,end=10.1.2.99 subvlan1000
       openstack subnet create --network vlan1001 --gateway 10.1.3.1 --dns-nameserver 8.8.8.8 --subnet-range 10.1.3.0/24 --allocation-pool start=10.1.3.2,end=10.1.3.99 subvlan1001


       # Create virtual machine
       mkdir -p tmp
       openstack keypair create vm3 > tmp/vm3
       openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm3 --nic net-id=vlan1000 --key-name vm3
       openstack keypair create vm4 > tmp/vm4
       openstack server create --flavor m1.tiny --image cirros-0.3.4-x86_64-vnx vm4 --nic net-id=vlan1001 --key-name vm4


       # Create security group rules to allow ICMP, SSH and WWW access
       openstack security group rule create --proto icmp --dst-port 0  default
       openstack security group rule create --proto tcp  --dst-port 80 default
       openstack security group rule create --proto tcp  --dst-port 22 default
   </exec>
   <exec seq="verify" type="verbatim">
       source /root/bin/admin-openrc.sh
       echo "--"
       echo "-- Keystone (identity)"
       echo "--"
       echo "Command: openstack --os-auth-url http://controller:35357/v3 --os-project-domain-name default --os-user-domain-name default --os-project-name admin --os-username admin token issue"
       openstack --os-auth-url http://controller:35357/v3 \
         --os-project-domain-name default --os-user-domain-name default \
         --os-project-name admin --os-username admin token issue
   </exec>
   <exec seq="verify" type="verbatim">
       echo "--"
       echo "-- Glance (images)"
       echo "--"
       echo "Command: openstack image list"
       openstack image list
   </exec>
   <exec seq="verify" type="verbatim">
       echo "--"
       echo "-- Nova (compute)"
       echo "--"
       echo "Command: openstack compute service list"
       openstack compute service list
       echo "Command: openstack hypervisor service list"
       openstack hypervisor service list
       echo "Command: openstack catalog list"
       openstack catalog list
       echo "Command: nova-status upgrade check"
       nova-status upgrade check
   </exec>
   <exec seq="verify" type="verbatim">
       echo "--"
       echo "-- Neutron (network)"
       echo "--"
       echo "Command: openstack extension list --network"
       openstack extension list --network
       echo "Command: openstack network agent list"
       openstack network agent list
       echo "Command: openstack security group list"
       openstack security group list
       echo "Command: openstack security group rule list"
       openstack security group rule list
   </exec>
 </vm>
 <vm name="network" type="lxc" arch="x86_64">
   <filesystem type="cow">filesystems/rootfs_lxc_ubuntu64-ostack-network</filesystem>
   <mem>1G</mem>
   <if id="1" net="MgmtNet">
     <ipv4>10.0.0.21/24</ipv4>
   </if>
   <if id="2" net="TunnNet">
     <ipv4>10.0.1.21/24</ipv4>
   </if>
   <if id="3" net="VlanNet">
   </if>
   <if id="4" net="ExtNet">
   </if>
   <if id="9" net="virbr0">
     <ipv4>dhcp</ipv4>
   </if>
   <forwarding type="ip" />
   <forwarding type="ipv6" />
   <filetree seq="on_boot" root="/root/">conf/hosts</filetree>
   <exec seq="on_boot" type="verbatim">
       cat /root/hosts >> /etc/hosts
       rm /root/hosts
   </exec>
   <exec seq="on_boot" type="verbatim">
       # Change MgmtNet and TunnNet interfaces MTU
       ifconfig eth1 mtu 1450
       sed -i -e '/iface eth1 inet static/a \   mtu 1450' /etc/network/interfaces
       ifconfig eth2 mtu 1450
       sed -i -e '/iface eth2 inet static/a \   mtu 1450' /etc/network/interfaces
       ifconfig eth3 mtu 1450
       sed -i -e '/iface eth3 inet static/a \   mtu 1450' /etc/network/interfaces
   </exec>


   <filetree seq="on_boot" root="/root/">conf/network/bin</filetree>
   <exec seq="on_boot" type="verbatim">
       chmod +x /root/bin/*
   </exec>
   <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/neutron.conf</filetree>
   <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/metadata_agent.ini</filetree>
   <filetree seq="step52" root="/etc/neutron/plugins/ml2/">conf/network/neutron/openvswitch_agent.ini</filetree>
   <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/l3_agent.ini</filetree>
   <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/dhcp_agent.ini</filetree>
   <filetree seq="step52" root="/etc/neutron/">conf/network/neutron/dnsmasq-neutron.conf</filetree>
   <exec seq="step52" type="verbatim">
       ovs-vsctl add-br br-vlan
       ovs-vsctl add-port br-vlan eth3
       #ovs-vsctl add-br br-ex
       ovs-vsctl add-port br-provider eth4
       service neutron-lbaasv2-agent restart
       service openvswitch-switch restart
       service neutron-openvswitch-agent restart

#service neutron-linuxbridge-agent restart service neutron-dhcp-agent restart service neutron-metadata-agent restart service neutron-l3-agent restart

       rm -f /var/lib/neutron/neutron.sqlite
   </exec>
 </vm>


 <vm name="compute1" type="lxc" arch="x86_64">
   <filesystem type="cow">filesystems/rootfs_lxc_ubuntu64-ostack-compute</filesystem>
   <mem>2G</mem>
   <if id="1" net="MgmtNet">
     <ipv4>10.0.0.31/24</ipv4>
   </if>
   <if id="2" net="TunnNet">
     <ipv4>10.0.1.31/24</ipv4>
   </if>
   <if id="3" net="VlanNet">
   </if>
   <if id="9" net="virbr0">
     <ipv4>dhcp</ipv4>
   </if>
   <filetree seq="on_boot" root="/root/">conf/hosts</filetree>
   <exec seq="on_boot" type="verbatim">
       cat /root/hosts >> /etc/hosts;
       rm /root/hosts;
       # Create /dev/net/tun device 
       #mkdir -p /dev/net/
       #mknod -m 666 /dev/net/tun  c 10 200
       # Change MgmtNet and TunnNet interfaces MTU
       ifconfig eth1 mtu 1450
       sed -i -e '/iface eth1 inet static/a \   mtu 1450' /etc/network/interfaces
       ifconfig eth2 mtu 1450
       sed -i -e '/iface eth2 inet static/a \   mtu 1450' /etc/network/interfaces
       ifconfig eth3 mtu 1450
       sed -i -e '/iface eth3 inet static/a \   mtu 1450' /etc/network/interfaces
   </exec>


   <filetree seq="step42" root="/etc/nova/">conf/compute1/nova/nova.conf</filetree>
   <filetree seq="step42" root="/etc/nova/">conf/compute1/nova/nova-compute.conf</filetree>
   <exec seq="step42" type="verbatim">
       service nova-compute restart
       #rm -f /var/lib/nova/nova.sqlite
   </exec>
   <filetree seq="step53" root="/etc/neutron/">conf/compute1/neutron/neutron.conf</filetree>
   <filetree seq="step53" root="/etc/neutron/plugins/ml2/">conf/compute1/neutron/openvswitch_agent.ini</filetree>
   <exec seq="step53" type="verbatim">
       ovs-vsctl add-br br-vlan
       ovs-vsctl add-port br-vlan eth3
       service openvswitch-switch restart
       service nova-compute restart
       service neutron-openvswitch-agent restart
   </exec>


   <exec seq="step101" type="verbatim">

export DEBIAN_FRONTEND=noninteractive

       apt-get -y install ceilometer-agent-compute
   </exec>
   <filetree seq="step102" root="/etc/ceilometer/">conf/compute2/ceilometer/ceilometer.conf</filetree>
   <exec seq="step102" type="verbatim">

crudini --set /etc/nova/nova.conf DEFAULT instance_usage_audit True crudini --set /etc/nova/nova.conf DEFAULT instance_usage_audit_period hour crudini --set /etc/nova/nova.conf DEFAULT notify_on_state_change vm_and_task_state crudini --set /etc/nova/nova.conf oslo_messaging_notifications driver messagingv2 service ceilometer-agent-compute restart service nova-compute restart

   </exec>
 </vm>
 <vm name="compute2" type="lxc" arch="x86_64">
   <filesystem type="cow">filesystems/rootfs_lxc_ubuntu64-ostack-compute</filesystem>
   <mem>2G</mem>
   <if id="1" net="MgmtNet">
     <ipv4>10.0.0.32/24</ipv4>
   </if>
   <if id="2" net="TunnNet">
     <ipv4>10.0.1.32/24</ipv4>
   </if>
   <if id="3" net="VlanNet">
   </if>
   <if id="9" net="virbr0">
     <ipv4>dhcp</ipv4>
   </if>
   <filetree seq="on_boot" root="/root/">conf/hosts</filetree>
   <exec seq="on_boot" type="verbatim">
       cat /root/hosts >> /etc/hosts;
       rm /root/hosts;
       # Create /dev/net/tun device 
       #mkdir -p /dev/net/
       #mknod -m 666 /dev/net/tun  c 10 200
       # Change MgmtNet and TunnNet interfaces MTU
       ifconfig eth1 mtu 1450
       sed -i -e '/iface eth1 inet static/a \   mtu 1450' /etc/network/interfaces
       ifconfig eth2 mtu 1450
       sed -i -e '/iface eth2 inet static/a \   mtu 1450' /etc/network/interfaces
       ifconfig eth3 mtu 1450
       sed -i -e '/iface eth3 inet static/a \   mtu 1450' /etc/network/interfaces
   </exec>


   <filetree seq="step42" root="/etc/nova/">conf/compute2/nova/nova.conf</filetree>
   <filetree seq="step42" root="/etc/nova/">conf/compute2/nova/nova-compute.conf</filetree>
   <exec seq="step42" type="verbatim">
       service nova-compute restart
       #rm -f /var/lib/nova/nova.sqlite
   </exec>
   <filetree seq="step53" root="/etc/neutron/">conf/compute2/neutron/neutron.conf</filetree>
   <filetree seq="step53" root="/etc/neutron/plugins/ml2/">conf/compute2/neutron/openvswitch_agent.ini</filetree>
   <exec seq="step53" type="verbatim">
       ovs-vsctl add-br br-vlan
       ovs-vsctl add-port br-vlan eth3
       service openvswitch-switch restart
       service nova-compute restart
       service neutron-openvswitch-agent restart
   </exec>
   <exec seq="step101" type="verbatim">

export DEBIAN_FRONTEND=noninteractive

       apt-get -y install ceilometer-agent-compute
   </exec>
   <filetree seq="step102" root="/etc/ceilometer/">conf/compute2/ceilometer/ceilometer.conf</filetree>
   <exec seq="step102" type="verbatim">

crudini --set /etc/nova/nova.conf DEFAULT instance_usage_audit True crudini --set /etc/nova/nova.conf DEFAULT instance_usage_audit_period hour crudini --set /etc/nova/nova.conf DEFAULT notify_on_state_change vm_and_task_state crudini --set /etc/nova/nova.conf oslo_messaging_notifications driver messagingv2 service ceilometer-agent-compute restart service nova-compute restart

   </exec>
 </vm>


 <host>
   <hostif net="ExtNet">
      <ipv4>10.0.10.1/24</ipv4>
   </hostif>
   <hostif net="MgmtNet">
     <ipv4>10.0.0.1/24</ipv4>
   </hostif>
   <exec seq="step00" type="verbatim">
   	echo "--\n-- Waiting for all VMs to be ssh ready...\n--"
   </exec>
   <exec seq="step00" type="verbatim">
   	# Wait till ssh is accesible in all VMs
   	while ! $( nc -z controller 22 ); do sleep 1; done
   	while ! $( nc -z network 22 ); do sleep 1; done
   	while ! $( nc -z compute1 22 ); do sleep 1; done
   	while ! $( nc -z compute2 22 ); do sleep 1; done
   </exec>
   <exec seq="step00" type="verbatim">
   	echo "-- ...OK\n--"
   </exec>
 </host>

</vnx>