def upgrade_net_tmpl_restore(self):
        """Restore Fuel master - network templates

        Scenario:
        1. Revert "upgrade_net_tmpl_backup" snapshot
        2. Reinstall Fuel master using iso given in ISO_PATH
        3. Install fuel-octane package
        4. Upload the backup back to reinstalled Fuel maser node
        5. Restore master node using 'octane fuel-restore'
        6. Check that network template is still available
        7. Verify networks
        8. Run OSTF

        Snapshot: upgrade_net_tmpl_restore
        """

        self.check_run(self.snapshot_name)
        assert_true(os.path.exists(self.repos_local_path))
        assert_true(os.path.exists(self.local_path))

        self.show_step(1)
        self.revert_backup()
        self.show_step(2)
        self.reinstall_master_node()
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.do_restore(self.backup_path, self.local_path,
                        self.repos_backup_path, self.repos_local_path)
        self.show_step(6)
        cluster_id = self.fuel_web.get_last_created_cluster()
        # get_network_template will raise en exception if there is no template
        template = self.fuel_web.client.get_network_template(cluster_id)
        if LooseVersion(settings.UPGRADE_FUEL_FROM) == LooseVersion("7.0"):
            # replace network mapping from eth* schema to enp0s* schema for all
            # deployed nodes
            nic_map = template['adv_net_template']['default']['nic_mapping']
            default_mapping = nic_map['default']
            for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
                template['adv_net_template']['default']['nic_mapping'][
                    node['hostname']] = deepcopy(default_mapping)
            new_template = get_network_template("upgrades")
            template['adv_net_template']['default']['nic_mapping'][
                'default'] = new_template['adv_net_template']['default'][
                    'nic_mapping']['default']
            self.fuel_web.client.upload_network_template(cluster_id, template)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(8)
        self.check_ostf(cluster_id=cluster_id,
                        test_sets=['smoke', 'sanity', 'ha'],
                        ignore_known_issues=True)
        self.env.make_snapshot("upgrade_net_tmpl_restore", is_make=True)
    def upgrade_net_tmpl_restore(self):
        """Restore Fuel master - network templates

        Scenario:
        1. Revert "upgrade_net_tmpl_backup" snapshot
        2. Reinstall Fuel master using iso given in ISO_PATH
        3. Install fuel-octane package
        4. Upload the backup back to reinstalled Fuel maser node
        5. Restore master node using 'octane fuel-restore'
        6. Check that network template is still available
        7. Verify networks
        8. Run OSTF

        Snapshot: upgrade_net_tmpl_restore
        """

        self.check_run(self.snapshot_name)
        assert_true(os.path.exists(self.repos_local_path))
        assert_true(os.path.exists(self.local_path))

        self.show_step(1)
        self.revert_backup()
        self.show_step(2)
        self.reinstall_master_node()
        self.show_step(3)
        self.show_step(4)
        self.show_step(5)
        self.do_restore(self.backup_path, self.local_path,
                        self.repos_backup_path, self.repos_local_path)
        self.show_step(6)
        cluster_id = self.fuel_web.get_last_created_cluster()
        # get_network_template will raise en exception if there is no template
        template = self.fuel_web.client.get_network_template(cluster_id)
        if LooseVersion(settings.UPGRADE_FUEL_FROM) == LooseVersion("7.0"):
            # replace network mapping from eth* schema to enp0s* schema for all
            # deployed nodes
            nic_map = template['adv_net_template']['default']['nic_mapping']
            default_mapping = nic_map['default']
            for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
                template['adv_net_template']['default']['nic_mapping'][
                    node['hostname']] = deepcopy(default_mapping)
            new_template = get_network_template("upgrades")
            template['adv_net_template']['default']['nic_mapping'][
                'default'] = new_template['adv_net_template']['default'][
                'nic_mapping']['default']
            self.fuel_web.client.upload_network_template(cluster_id, template)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(8)
        self.check_ostf(cluster_id=cluster_id,
                        test_sets=['smoke', 'sanity', 'ha'],
                        ignore_known_issues=True)
        self.env.make_snapshot("upgrade_net_tmpl_restore", is_make=True)
Beispiel #3
0
    def deploy_cinder_net_tmpl(self):
        """Deploy HA environment with Cinder, Neutron and network template

        Scenario:
            1. Revert snapshot with 3 slaves
            2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE
            3. Add 1 controller + cinder nodes
            4. Add 2 compute + cinder nodes
            5. Upload 'cinder' network template'
            6. Create custom network groups basing
               on template endpoints assignments
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)
            11. Check L3 network configuration on slaves
            12. Check that services are listening on their networks only

        Duration 180m
        Snapshot deploy_cinder_net_tmpl
        """

        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT[NEUTRON_SEGMENT_TYPE],
                'tenant': 'netTemplate',
                'user': '******',
                'password': '******',
            }
        )

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute', 'cinder'],
                'slave-03': ['compute', 'cinder'],
            },
            update_interfaces=False
        )

        network_template = get_network_template('cinder')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={'default': '10.200.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

        self.fuel_web.verify_network(cluster_id)

        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['smoke', 'sanity',
                                          'ha', 'tests_platform'])
        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)

        self.check_services_networks(cluster_id, network_template)

        self.env.make_snapshot("deploy_cinder_net_tmpl",
                               is_make=self.is_make_snapshot())
Beispiel #4
0
    def add_nodes_net_tmpl(self):
        """Add nodes to operational environment with network template

        Scenario:
            1. Revert snapshot with deployed environment
            2. Bootstrap 2 more slave nodes
            3. Add 1 controller + cinder and 1 compute + cinder nodes
            4. Upload 'cinder_add_nodes' network template with broken
               network mapping for new nodes
            5. Run network verification. Check it failed.
            6. Upload 'cinder' network template'
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)
            11. Check L3 network configuration on slaves
            12. Check that services are listening on their networks only

        Duration 60m
        Snapshot add_nodes_net_tmpl
        """

        self.env.revert_snapshot("deploy_cinder_net_tmpl")

        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])

        cluster_id = self.fuel_web.get_last_created_cluster()

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-04': ['controller', 'cinder'],
                'slave-05': ['compute', 'cinder'],
            },
            update_interfaces=False
        )

        network_template = get_network_template('cinder_add_nodes')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)
        self.fuel_web.verify_network(cluster_id, success=False)

        network_template = get_network_template('cinder')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={'default': '10.200.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['smoke', 'sanity',
                                          'ha', 'tests_platform'])
        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)

        self.check_services_networks(cluster_id, network_template)

        self.env.make_snapshot("add_nodes_net_tmpl")
Beispiel #5
0
    def two_nodegroups_network_templates(self):
        """Deploy HA environment with Cinder, Neutron and network template on
        two nodegroups.

        Scenario:
            1. Revert snapshot with 5 slaves
            2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE
            3. Add 3 controller nodes
            4. Add 2 compute + cinder nodes
            5. Upload 'two_nodegroups' network template
            6. Deploy cluster
            7. Run health checks (OSTF)
            8. Check L3 network configuration on slaves
            9. Check that services are listening on their networks only

        Duration 120m
        Snapshot two_nodegroups_network_templates
        """
        if not MULTIPLE_NETWORKS:
            raise SkipTest()

        self.env.revert_snapshot('ready_with_5_slaves')

        # TODO(akostrikov) This should be refactored.
        admin_net = self.env.d_env.admin_net
        admin_net2 = self.env.d_env.admin_net2
        get_network = lambda x: self.env.d_env.get_network(name=x).ip_network
        networks = ['.'.join(get_network(n).split('.')[0:-1])
                    for n in [admin_net, admin_net2]]
        nodes_addresses = ['.'.join(node['ip'].split('.')[0:-1]) for node in
                           self.fuel_web.client.list_nodes()]
        assert_equal(set(networks), set(nodes_addresses),
                     'Only one admin network is used for discovering slaves:'
                     ' "{0}"'.format(set(nodes_addresses)))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': NEUTRON_SEGMENT['tun'],
                'tenant': 'netTemplate',
                'user': '******',
                'password': '******',
            }
        )
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': [['controller'], nodegroup1],
                'slave-05': [['controller'], nodegroup1],
                'slave-03': [['controller'], nodegroup1],
                'slave-02': [['compute', 'cinder'], nodegroup2],
                'slave-04': [['compute', 'cinder'], nodegroup2],
            }
        )

        network_template = get_network_template('two_nodegroups')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id,
            network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={nodegroup1: '10.200.0.0/16', nodegroup2: '10.210.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

        self.check_ipconfig_for_template(cluster_id,
                                         network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        # TODO(akostrikov) ostf may fail, need further investigation.
        ostf_tmpl_set = ['smoke', 'sanity', 'ha', 'tests_platform']
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=ostf_tmpl_set)

        self.check_ipconfig_for_template(cluster_id,
                                         network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        self.env.make_snapshot('two_nodegroups_network_templates')
    def baremetal_deploy_virt_nodes_on_one_compute(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        all virtual nodes on the same compute

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for three VMs
            4. Spawn the VMs and wait until they are available for allocation
            5. Assign controller role to the VMs
            6. Deploy the cluster
            7. Run OSTF

        Duration: 180m
        """
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt'],
            })

        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(
            node['id'],
            [
                {"id": 1, "mem": 4, "cpu": 2, "vda_size": "100G"},
                {"id": 2, "mem": 4, "cpu": 2, "vda_size": "100G"},
                {"id": 3, "mem": 4, "cpu": 2, "vda_size": "100G"},
            ])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 4,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']}
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
    def two_nodegroups_network_templates(self):
        """Deploy HA environment with Cinder, Neutron and network template on
        two nodegroups.

        Scenario:
            1. Revert snapshot with ready master node
            2. Bootstrap 3 slaves from default nodegroup
            3. Create cluster with Neutron VXLAN and custom nodegroups
            4. Bootstrap 2 slaves nodes from custom nodegroup
            5. Add 3 controller nodes from default nodegroup
            6. Add 2 compute+cinder nodes from custom nodegroup
            7. Upload 'two_nodegroups' network template
            8. Verify networks
            9. Deploy cluster
            10. Run health checks (OSTF)
            11. Check L3 network configuration on slaves
            12. Check that services are listening on their networks only

        Duration 120m
        Snapshot two_nodegroups_network_templates
        """

        asserts.assert_true(MULTIPLE_NETWORKS, "MULTIPLE_NETWORKS variable"
                                               " wasn't exported")
        self.show_step(1, initialize=True)
        self.env.revert_snapshot('ready')
        self.show_step(2)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[0:3])
        self.show_step(3)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': NEUTRON_SEGMENT['tun'],
                'tenant': 'netTemplate',
                'user': '******',
                'password': '******',
            }
        )

        self.show_step(4)
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])

        self.show_step(5)
        self.show_step(6)
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': [['controller'], nodegroup1],
                'slave-02': [['controller'], nodegroup1],
                'slave-03': [['controller'], nodegroup1],
                'slave-04': [['compute', 'cinder'], nodegroup2],
                'slave-05': [['compute', 'cinder'], nodegroup2],
            }
        )
        network_template = utils.get_network_template('two_nodegroups')
        self.show_step(7)
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id,
            network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={nodegroup1: '10.200.0.0/16', nodegroup2: '10.210.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

        self.show_step(11)
        self.check_ipconfig_for_template(cluster_id,
                                         network_template,
                                         networks)
        self.show_step(12)
        self.check_services_networks(cluster_id, network_template)

        # TODO(akostrikov) ostf may fail, need further investigation.
        ostf_tmpl_set = ['smoke', 'sanity', 'ha', 'tests_platform']
        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=ostf_tmpl_set)

        self.env.make_snapshot('two_nodegroups_network_templates')
    def upgrade_net_tmpl_backup(self):
        """Deploy HA environment with Ceph, Neutron and network template

        Scenario:
            1. Revert snapshot with 9 slaves
            2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE
            3. Add 3 controller + ceph nodes
            4. Add 2 compute + ceph nodes
            5. Upload 'upgrades' network template
            6. Create custom network groups basing
               on template endpoints assignments
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)
            11. Check L3 network configuration on slaves
            12. Check that services are listening on their networks only
            13. Install fuel-octane package
            14. Create backups for upgrade procedure

        Duration 180m
        Snapshot upgrade_net_tmpl_backup
        """
        self.check_run(self.source_snapshot_name)

        intermediate_snapshot = "prepare_upgrade_tmpl_before_backup"
        if not self.env.d_env.has_snapshot(intermediate_snapshot):
            self.show_step(1)
            self.env.revert_snapshot("ready_with_9_slaves")
            self.show_step(2)
            cluster_settings = {
                'volumes_ceph': True, 'images_ceph': True,
                'volumes_lvm': False, 'ephemeral_ceph': True,
                'objects_ceph': True,
                'net_provider': 'neutron',
                'net_segment_type':
                    settings.NEUTRON_SEGMENT['vlan']}
            cluster_settings.update(self.cluster_creds)

            cluster_id = self.fuel_web.create_cluster(
                name=self.__class__.__name__,
                settings=cluster_settings)

            self.show_step(3)
            self.show_step(4)
            self.fuel_web.update_nodes(
                cluster_id,
                {'slave-01': ['controller'],
                 'slave-02': ['controller'],
                 'slave-03': ['controller'],
                 'slave-04': ['ceph-osd'],
                 'slave-05': ['ceph-osd'],
                 'slave-06': ['ceph-osd'],
                 'slave-07': ['compute'],
                 'slave-08': ['compute']},
                update_interfaces=False)

            self.show_step(5)
            network_template = get_network_template("upgrades")
            self.fuel_web.client.upload_network_template(
                cluster_id=cluster_id, network_template=network_template)
            self.show_step(6)
            # pylint: disable=redefined-variable-type
            if settings.UPGRADE_FUEL_FROM == "7.0":
                network = '10.200.0.0/16'
            else:
                network = {'default': '10.200.0.0/16'}
            # pylint: enable=redefined-variable-type
            networks = self.generate_networks_for_template(
                network_template, network, '24')
            existing_networks = self.fuel_web.client.get_network_groups()
            networks = self.create_custom_networks(networks, existing_networks)

            logger.debug('Networks: {0}'.format(
                self.fuel_web.client.get_network_groups()))

            self.show_step(7)
            self.fuel_web.verify_network(cluster_id)

            self.show_step(8)
            self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

            self.show_step(9)
            self.fuel_web.verify_network(cluster_id)
            self.show_step(10)
            # Live migration test could fail
            # https://bugs.launchpad.net/fuel/+bug/1471172
            # https://bugs.launchpad.net/fuel/+bug/1604749
            self.check_ostf(cluster_id=cluster_id,
                            test_sets=['smoke', 'sanity', 'ha'],
                            ignore_known_issues=True)
            self.show_step(11)
            self.check_ipconfig_for_template(cluster_id, network_template,
                                             networks)

            self.show_step(12)
            self.check_services_networks(cluster_id, network_template)

            self.env.make_snapshot(intermediate_snapshot)

        # revert_snapshot will do nothing if there is no snapshot
        self.env.revert_snapshot(intermediate_snapshot)

        self.show_step(13)
        self.show_step(14)
        self.do_backup(self.backup_path, self.local_path,
                       self.repos_backup_path, self.repos_local_path)
        self.env.make_snapshot(self.source_snapshot_name, is_make=True)
    def baremetal_deploy_virt_nodes_on_different_computes(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        each virtual node on a separate compute

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to three slave nodes
            3. Upload VM configuration for one VM to each slave node
            4. Apply network template for the env and spawn the VMs
            5. Assign controller role to VMs
            6. Deploy cluster
            7. Run OSTF
            8. Mark 'mysql' partition to be preserved on one of controllers
            9. Reinstall the controller
            10. Verify that the reinstalled controller joined the Galera
                cluster and synced its state
            11. Run OSTF
            12. Gracefully reboot one controller using "reboot" command
                and wait till it comes up
            13. Run OSTF
            14. Forcefully reboot one controller using "reboot -f" command
                and wait till it comes up
            15. Run OSTF
            16. Gracefully reboot one compute using "reboot" command
                and wait till compute and controller come up
            17. Run OSTF
            18. Forcefully reboot one compute using "reboot -f" command
                and wait till compute and controller come up
            19. Run OSTF

        Duration: 360m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt'],
                'slave-02': ['compute', 'virt'],
                'slave-03': ['compute', 'virt']
            })

        self.show_step(3)
        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            self.fuel_web.client.create_vm_nodes(
                node['id'],
                [{
                    "id": 1,
                    "mem": 2,
                    "cpu": 2,
                    "vda_size": "100G"
                }])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf_ha")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 6,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting 2 available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']
        }
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        virt_nodes = [n for n in self.fuel_web.client.list_nodes()
                      if n['name'].startswith('vslave')]
        ctrl = virt_nodes[0]
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            preserve_partition(admin, ctrl['id'], "mysql")

        self.show_step(9)
        task = self.fuel_web.client.provision_nodes(
            cluster_id, [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)
        task = self.fuel_web.client.deploy_nodes(
            cluster_id, [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)

        self.show_step(10)
        cmd = "mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE 'wsrep%';\""
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            err_msg = ("Galera isn't ready on {0} node".format(
                ctrl['hostname']))
            wait(
                lambda: admin.execute_through_host(
                    ctrl['ip'], cmd, auth=self.ssh_auth)['exit_code'] == 0,
                timeout=10 * 60, timeout_msg=err_msg)

            cmd = ("mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE "
                   "'wsrep_local_state_comment';\"")
            err_msg = ("The reinstalled node {0} is not synced with the "
                       "Galera cluster".format(ctrl['hostname']))
            wait(
                # pylint: disable=no-member
                lambda: admin.execute_through_host(
                    ctrl['ip'], cmd,
                    auth=self.ssh_auth)['stdout'][0].split()[1] == "Synced",
                # pylint: enable=no-member
                timeout=10 * 60,
                timeout_msg=err_msg)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(12)
        self.show_step(13)
        self.show_step(14)
        self.show_step(15)
        cmds = {"reboot": "gracefully", "reboot -f >/dev/null &": "forcefully"}
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(
                        virt_nodes[1]['ip'], cmd, auth=self.ssh_auth,
                        timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} controller"
                    "node".format(cmds[cmd], virt_nodes[1]['name']))
            self.wait_for_slave(virt_nodes[1])

            self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(16)
        self.show_step(17)
        self.show_step(18)
        self.show_step(19)
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(
                        compute['ip'], cmd, auth=self.ssh_auth,
                        timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} compute"
                    "node".format(cmds[cmd], compute['name']))
            self.wait_for_slave(compute)
            for vm in virt_nodes:
                self.wait_for_slave(vm)

            self.fuel_web.run_ostf(cluster_id=cluster_id)
Beispiel #10
0
    def add_nodes_net_tmpl(self):
        """Add nodes to operational environment with network template

        Scenario:
            1. Revert snapshot with deployed environment
            2. Bootstrap 2 more slave nodes
            3. Add 1 controller + cinder and 1 compute + cinder nodes
            4. Upload 'cinder_add_nodes' network template with broken
               network mapping for new nodes
            5. Run network verification. Check it failed.
            6. Upload 'cinder' network template'
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)
            11. Check L3 network configuration on slaves
            12. Check that services are listening on their networks only

        Duration 60m
        Snapshot add_nodes_net_tmpl
        """

        self.env.revert_snapshot("deploy_cinder_net_tmpl")

        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[3:5])

        cluster_id = self.fuel_web.get_last_created_cluster()

        self.fuel_web.update_nodes(cluster_id, {
            'slave-04': ['controller', 'cinder'],
            'slave-05': ['compute', 'cinder'],
        },
                                   update_interfaces=False)

        network_template = get_network_template('cinder_add_nodes')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)
        self.fuel_web.verify_network(cluster_id, success=False)

        network_template = get_network_template('cinder')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={'default': '10.200.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['smoke', 'sanity', 'ha', 'tests_platform'])
        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)

        self.check_services_networks(cluster_id, network_template)

        self.env.make_snapshot("add_nodes_net_tmpl")
Beispiel #11
0
    def deploy_cinder_net_tmpl(self):
        """Deploy HA environment with Cinder, Neutron and network template

        Scenario:
            1. Revert snapshot with 3 slaves
            2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE
            3. Add 1 controller + cinder nodes
            4. Add 2 compute + cinder nodes
            5. Upload 'cinder' network template'
            6. Create custom network groups basing
               on template endpoints assignments
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)
            11. Check L3 network configuration on slaves
            12. Check that services are listening on their networks only

        Duration 180m
        Snapshot deploy_cinder_net_tmpl
        """

        self.env.revert_snapshot("ready_with_3_slaves")

        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_HA,
                                                  settings={
                                                      'tenant': 'netTemplate',
                                                      'user': '******',
                                                      'password':
                                                      '******',
                                                  })

        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller', 'cinder'],
            'slave-02': ['compute', 'cinder'],
            'slave-03': ['compute', 'cinder'],
        },
                                   update_interfaces=False)

        network_template = get_network_template('cinder')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={'default': '10.200.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

        self.fuel_web.verify_network(cluster_id)

        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['smoke', 'sanity', 'ha', 'tests_platform'])
        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)

        self.check_services_networks(cluster_id, network_template)

        self.env.make_snapshot("deploy_cinder_net_tmpl",
                               is_make=self.is_make_snapshot())
Beispiel #12
0
    def two_nodegroups_network_templates(self):
        """Deploy HA environment with Cinder, Neutron and network template on
        two nodegroups.

        Scenario:
            1. Revert snapshot with 5 slaves
            2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE
            3. Add 3 controller nodes
            4. Add 2 compute + cinder nodes
            5. Upload 'two_nodegroups' network template
            6. Deploy cluster
            7. Run health checks (OSTF)
            8. Check L3 network configuration on slaves
            9. Check that services are listening on their networks only

        Duration 120m
        Snapshot two_nodegroups_network_templates
        """
        def get_network(x):
            return self.env.d_env.get_network(name=x).ip_network

        if not MULTIPLE_NETWORKS:
            raise SkipTest()

        self.env.revert_snapshot('ready_with_5_slaves')

        # TODO(akostrikov) This should be refactored.
        admin_net = self.env.d_env.admin_net
        admin_net2 = self.env.d_env.admin_net2

        networks = ['.'.join(get_network(n).split('.')[0:-1])
                    for n in [admin_net, admin_net2]]
        nodes_addresses = ['.'.join(node['ip'].split('.')[0:-1]) for node in
                           self.fuel_web.client.list_nodes()]
        assert_equal(set(networks), set(nodes_addresses),
                     'Only one admin network is used for discovering slaves:'
                     ' "{0}"'.format(set(nodes_addresses)))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': NEUTRON_SEGMENT['tun'],
                'tenant': 'netTemplate',
                'user': '******',
                'password': '******',
            }
        )
        nodegroup1 = NODEGROUPS[0]['name']
        nodegroup2 = NODEGROUPS[1]['name']
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': [['controller'], nodegroup1],
                'slave-05': [['controller'], nodegroup1],
                'slave-03': [['controller'], nodegroup1],
                'slave-02': [['compute', 'cinder'], nodegroup2],
                'slave-04': [['compute', 'cinder'], nodegroup2],
            }
        )

        network_template = get_network_template('two_nodegroups')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id,
            network_template=network_template)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={nodegroup1: '10.200.0.0/16', nodegroup2: '10.210.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

        self.check_ipconfig_for_template(cluster_id,
                                         network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        # TODO(akostrikov) ostf may fail, need further investigation.
        ostf_tmpl_set = ['smoke', 'sanity', 'ha', 'tests_platform']
        self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=ostf_tmpl_set)

        self.check_ipconfig_for_template(cluster_id,
                                         network_template,
                                         networks)
        self.check_services_networks(cluster_id, network_template)

        self.env.make_snapshot('two_nodegroups_network_templates')
Beispiel #13
0
    def network_config_consistency_on_reboot(self):
        """Deploy HA environment with Cinder, Neutron and network template

        Scenario:
            1. Revert snapshot with 5 slaves
            2. Create cluster (HA) with Neutron VLAN
            3. Add 3 controller and 1 compute + cinder nodes
            4. Upload 'default_ovs' network template
            5. Create custom network groups basing
               on template endpoints assignments
            6. Run network verification
            7. Deploy cluster and run basic health checks
            8. Run network verification
            9. Check L3 network configuration on slaves
            10. Check that services are listening on their networks only
            11. Reboot a node
            12. Run network verification
            13. Check L3 network configuration on slaves
            14. Check that services are listening on their networks only
            15. Run OSTF

        Duration 180m
        Snapshot deploy_cinder_net_tmpl
        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
                'tenant': 'netTemplate',
                'user': '******',
                'password': '******',
            }
        )

        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute', 'cinder'],
            },
            update_interfaces=False
        )

        self.show_step(4)
        network_template = get_network_template('default_ovs')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)

        self.show_step(5)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={'default': '10.200.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        self.check_ipconfig_for_template(
            cluster_id, network_template, networks)
        self.show_step(10)
        self.check_services_networks(cluster_id, network_template)

        self.show_step(11)
        self.fuel_web.warm_restart_nodes([self.env.d_env.nodes().slaves[0]])
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.fuel_web.assert_os_services_ready(cluster_id)

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(13)
        self.check_ipconfig_for_template(
            cluster_id, network_template, networks)
        self.show_step(14)
        self.check_services_networks(cluster_id, network_template)

        self.show_step(15)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['smoke', 'sanity', 'ha'])

        self.env.make_snapshot("network_config_consistency_on_reboot",
                               is_make=self.is_make_snapshot())
Beispiel #14
0
    def deploy_env_with_public_api(self):
        """Deploy environment with enabled DMZ network for API.

        Scenario:
            1. Revert snapshot with ready master node
            2. Create new environment
            3. Run network verification
            4. Deploy the environment
            5. Run network verification
            6. Run OSTF
            7. Reboot cluster nodes
            8. Run OSTF
            9. Create environment snapshot deploy_env_with_public_api

        Duration 120m
        Snapshot deploy_env_with_public_api
        """

        asserts.assert_true(settings.ENABLE_DMZ,
                            "ENABLE_DMZ variable wasn't exported")
        self.check_run('deploy_env_with_public_api')

        self.show_step(1)
        self.env.revert_snapshot('ready_with_5_slaves')

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
        )

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder'],
            },
            update_interfaces=False
        )

        network_template = utils.get_network_template('public_api')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)

        net = self.fuel_web.get_network_pool('os-api')
        nodegroup = self.fuel_web.get_nodegroup(cluster_id)
        os_api_template = {
            "group_id": nodegroup['id'],
            "name": 'os-api',
            "cidr": net['network'],
            "gateway": net['gateway'],
            "meta": {
                'notation': 'cidr',
                'render_type': None,
                'map_priority': 2,
                'configurable': True,
                'use_gateway': True,
                'name': 'os-api',
                'cidr': net['network'],
                'vlan_start': None,
                'vips': ['haproxy']
            }
        }
        self.fuel_web.client.add_network_group(os_api_template)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.show_step(3)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(4)
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(7)
        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        self.fuel_web.warm_restart_nodes(
            self.fuel_web.get_devops_nodes_by_nailgun_nodes(nodes))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id=cluster_id,
            roles=['controller']
        )[0]
        controller_devops = \
            self.fuel_web.get_devops_node_by_nailgun_node(controller)

        # Wait until MySQL Galera is UP on some controller
        self.fuel_web.wait_mysql_galera_is_up([controller_devops.name])

        # Wait until Cinder services UP on a controller
        self.fuel_web.wait_cinder_is_up([controller_devops.name])

        wait_pass(
            lambda: self.fuel_web.run_ostf(cluster_id,
                                           test_sets=['sanity', 'smoke']),
            interval=10,
            timeout=12 * 60
        )

        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(9)
        self.env.make_snapshot('deploy_env_with_public_api', is_make=True)
Beispiel #15
0
    def baremetal_deploy_virt_nodes_on_one_compute(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        all virtual nodes on the same compute

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for three VMs
            4. Spawn the VMs and wait until they are available for allocation
            5. Assign controller role to the VMs
            6. Deploy the cluster
            7. Run OSTF

        Duration: 180m
        """
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['compute', 'virt'],
        })

        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(node['id'], [
            {
                "id": 1,
                "mem": 4,
                "cpu": 2,
                "vda_size": "100G"
            },
            {
                "id": 2,
                "mem": 4,
                "cpu": 2,
                "vda_size": "100G"
            },
            {
                "id": 3,
                "mem": 4,
                "cpu": 2,
                "vda_size": "100G"
            },
        ])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 4,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']
        }
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
Beispiel #16
0
    def baremetal_deploy_virt_nodes_on_different_computes(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        each virtual node on a separate compute

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to three slave nodes
            3. Upload VM configuration for one VM to each slave node
            4. Apply network template for the env and spawn the VMs
            5. Assign controller role to VMs
            6. Deploy cluster
            7. Run OSTF
            8. Mark 'mysql' partition to be preserved on one of controllers
            9. Reinstall the controller
            10. Verify that the reinstalled controller joined the Galera
                cluster and synced its state
            11. Run OSTF
            12. Gracefully reboot one controller using "reboot" command
                and wait till it comes up
            13. Run OSTF
            14. Forcefully reboot one controller using "reboot -f" command
                and wait till it comes up
            15. Run OSTF
            16. Gracefully reboot one compute using "reboot" command
                and wait till compute and controller come up
            17. Run OSTF
            18. Forcefully reboot one compute using "reboot -f" command
                and wait till compute and controller come up
            19. Run OSTF

        Duration: 360m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['compute', 'virt'],
                'slave-02': ['compute', 'virt'],
                'slave-03': ['compute', 'virt']
            })

        self.show_step(3)
        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            self.fuel_web.client.create_vm_nodes(node['id'],
                                                 [{
                                                     "id": 1,
                                                     "mem": 2,
                                                     "cpu": 2,
                                                     "vda_size": "100G"
                                                 }])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf_ha")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 6,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting 2 available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']
        }
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        virt_nodes = [
            n for n in self.fuel_web.client.list_nodes()
            if n['name'].startswith('vslave')
        ]
        ctrl = virt_nodes[0]
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            preserve_partition(admin, ctrl['id'], "mysql")

        self.show_step(9)
        task = self.fuel_web.client.provision_nodes(cluster_id,
                                                    [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)
        task = self.fuel_web.client.deploy_nodes(cluster_id, [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)

        self.show_step(10)
        cmd = "mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE 'wsrep%';\""
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            err_msg = ("Galera isn't ready on {0} node".format(
                ctrl['hostname']))
            wait(lambda: admin.execute_through_host(
                ctrl['ip'], cmd, auth=self.ssh_auth)['exit_code'] == 0,
                 timeout=10 * 60,
                 timeout_msg=err_msg)

            cmd = ("mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE "
                   "'wsrep_local_state_comment';\"")
            err_msg = ("The reinstalled node {0} is not synced with the "
                       "Galera cluster".format(ctrl['hostname']))
            wait(
                # pylint: disable=no-member
                lambda: admin.execute_through_host(
                    ctrl['ip'], cmd, auth=self.ssh_auth)['stdout'][0].split()[
                        1] == "Synced",
                # pylint: enable=no-member
                timeout=10 * 60,
                timeout_msg=err_msg)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(12)
        self.show_step(13)
        self.show_step(14)
        self.show_step(15)
        cmds = {"reboot": "gracefully", "reboot -f >/dev/null &": "forcefully"}
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(virt_nodes[1]['ip'],
                                               cmd,
                                               auth=self.ssh_auth,
                                               timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} controller"
                    "node".format(cmds[cmd], virt_nodes[1]['name']))
            self.wait_for_slave(virt_nodes[1])

            self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(16)
        self.show_step(17)
        self.show_step(18)
        self.show_step(19)
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(compute['ip'],
                                               cmd,
                                               auth=self.ssh_auth,
                                               timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} compute"
                    "node".format(cmds[cmd], compute['name']))
            self.wait_for_slave(compute)
            for vm in virt_nodes:
                self.wait_for_slave(vm)

            self.fuel_web.run_ostf(cluster_id=cluster_id)
Beispiel #17
0
    def baremetal_deploy_cluster_with_virt_node(self):
        """Baremetal deployment of cluster with one virtual node

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for one VM
            4. Apply network template for the env and spawn the VM
            5. Assign controller role to the VM
            6. Deploy the environment
            7. Run OSTF
            8. Reset the environment
            9. Redeploy cluster
            10. Run OSTF

        Duration: 240m
        """

        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(cluster_id,
                                   {'slave-01': ['compute', 'virt']})
        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(
            node['id'], [{
                "id": 1,
                "mem": self.get_slave_total_mem(node['ip']) - 2,
                "cpu": self.get_slave_total_cpu(node['ip']) - 2,
                "vda_size": "100G"
            }])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 2,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.show_step(5)
        virt_nodes = {'vslave-01': ['controller']}
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        self.fuel_web.stop_reset_env_wait(cluster_id)
        for node in self.fuel_web.client.list_nodes():
            self.wait_for_slave(node)

        self.show_step(9)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
    def deploy_cluster_without_management_net(self):
        """Deploy HA environment network template: no dedicate management nwk

        Scenario:
            1. Revert snapshot with 3 slaves
            2. Create cluster (HA) with Neutron VLAN
            3. Add 1 controller + cinder nodes
            4. Add 2 compute + cinder nodes
            5. Upload network template
            6. Delete 'management' network-group
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)

        Duration 180m
        Snapshot deploy_cluster_without_management_net
        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
        )

        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'cinder'],
                'slave-02': ['compute', 'cinder'],
                'slave-03': ['compute', 'cinder'],
            },
            update_interfaces=False
        )

        self.show_step(5)
        template = 'default_no_mgmt_nwk'
        logger.info('using template: {!s}'.format(template))
        network_template = get_network_template(template)
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)

        self.show_step(6)
        mgmt_net = [
            grp for grp in self.fuel_web.client.get_network_groups()
            if grp['name'] == 'management'].pop()

        assert_true(
            self.fuel_web.client.del_network_group(mgmt_net['id']).status_code
            in {200, 204},
            'Network group delete failed'
        )

        assert_true(
            mgmt_net not in self.fuel_web.client.get_network_groups(),
            'Network group has not been deleted'
        )

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['smoke', 'sanity', 'ha', 'tests_platform'])

        self.env.make_snapshot('deploy_cluster_without_management_net')
Beispiel #19
0
    def network_config_consistency_on_reboot(self):
        """Deploy HA environment with Cinder, Neutron and network template

        Scenario:
            1. Revert snapshot with 5 slaves
            2. Create cluster (HA) with Neutron VLAN
            3. Add 3 controller and 1 compute + cinder nodes
            4. Upload 'default_ovs' network template
            5. Create custom network groups basing
               on template endpoints assignments
            6. Run network verification
            7. Deploy cluster and run basic health checks
            8. Run network verification
            9. Check L3 network configuration on slaves
            10. Check that services are listening on their networks only
            11. Reboot a node
            12. Run network verification
            13. Check L3 network configuration on slaves
            14. Check that services are listening on their networks only
            15. Run OSTF

        Duration 180m
        Snapshot deploy_cinder_net_tmpl
        """

        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_5_slaves")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE_HA,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT_TYPE,
                                                      'tenant': 'netTemplate',
                                                      'user': '******',
                                                      'password':
                                                      '******',
                                                  })

        self.show_step(3)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['controller'],
            'slave-03': ['controller'],
            'slave-04': ['compute', 'cinder'],
        },
                                   update_interfaces=False)

        self.show_step(4)
        network_template = get_network_template('default_ovs')
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)

        self.show_step(5)
        networks = self.generate_networks_for_template(
            template=network_template,
            ip_nets={'default': '10.200.0.0/16'},
            ip_prefixlen='24')
        existing_networks = self.fuel_web.client.get_network_groups()
        networks = self.create_custom_networks(networks, existing_networks)

        logger.debug('Networks: {0}'.format(
            self.fuel_web.client.get_network_groups()))

        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)
        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)
        self.show_step(10)
        self.check_services_networks(cluster_id, network_template)

        self.show_step(11)
        self.fuel_web.warm_restart_nodes([self.env.d_env.nodes().slaves[0]])
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.fuel_web.assert_os_services_ready(cluster_id)

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(13)
        self.check_ipconfig_for_template(cluster_id, network_template,
                                         networks)
        self.show_step(14)
        self.check_services_networks(cluster_id, network_template)

        self.show_step(15)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['smoke', 'sanity', 'ha'])

        self.env.make_snapshot("network_config_consistency_on_reboot",
                               is_make=self.is_make_snapshot())
    def upgrade_net_tmpl_backup(self):
        """Deploy HA environment with Ceph, Neutron and network template

        Scenario:
            1. Revert snapshot with 9 slaves
            2. Create cluster (HA) with Neutron VLAN/VXLAN/GRE
            3. Add 3 controller
            4. Add 3 ceph osd
            5. Add 2 compute
            6. Upload 'upgrades' network template
            7. Create custom network groups basing on template endpoints
               assignments
            8. Run network verification
            9. Deploy cluster
            10. Run network verification
            11. Run health checks (OSTF)
            12. Check L3 network configuration on slaves
            13. Check that services are listening on their networks only
            14. Install fuel-octane package
            15. Create backups for upgrade procedure

        Duration 180m
        Snapshot upgrade_net_tmpl_backup
        """
        self.check_run(self.source_snapshot_name)

        intermediate_snapshot = "prepare_upgrade_tmpl_before_backup"
        if not self.env.d_env.has_snapshot(intermediate_snapshot):
            self.show_step(1)
            self.env.revert_snapshot("ready_with_9_slaves")
            self.show_step(2)
            cluster_settings = {
                'volumes_ceph': True,
                'images_ceph': True,
                'volumes_lvm': False,
                'ephemeral_ceph': True,
                'objects_ceph': True,
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            }
            cluster_settings.update(self.cluster_creds)

            cluster_id = self.fuel_web.create_cluster(
                name=self.__class__.__name__, settings=cluster_settings)

            self.show_step(3)
            self.show_step(4)
            self.show_step(5)
            self.fuel_web.update_nodes(cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['ceph-osd'],
                'slave-05': ['ceph-osd'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['compute'],
                'slave-08': ['compute']
            },
                                       update_interfaces=False)

            self.show_step(6)
            network_template = get_network_template("upgrades")
            self.fuel_web.client.upload_network_template(
                cluster_id=cluster_id, network_template=network_template)
            self.show_step(7)
            # pylint: disable=redefined-variable-type
            if settings.UPGRADE_FUEL_FROM == "7.0":
                network = '10.200.0.0/16'
            else:
                network = {'default': '10.200.0.0/16'}
            # pylint: enable=redefined-variable-type
            networks = self.generate_networks_for_template(
                network_template, network, '24')
            existing_networks = self.fuel_web.client.get_network_groups()
            networks = self.create_custom_networks(networks, existing_networks)

            logger.debug('Networks: {0}'.format(
                self.fuel_web.client.get_network_groups()))

            self.show_step(8)
            self.fuel_web.verify_network(cluster_id)

            self.show_step(9)
            self.fuel_web.deploy_cluster_wait(cluster_id, timeout=180 * 60)

            self.show_step(10)
            self.fuel_web.verify_network(cluster_id)
            self.show_step(11)
            # Live migration test could fail
            # https://bugs.launchpad.net/fuel/+bug/1471172
            # https://bugs.launchpad.net/fuel/+bug/1604749
            self.check_ostf(cluster_id=cluster_id,
                            test_sets=['smoke', 'sanity', 'ha'],
                            ignore_known_issues=True)
            self.show_step(12)
            self.check_ipconfig_for_template(cluster_id, network_template,
                                             networks)

            self.show_step(13)
            self.check_services_networks(cluster_id, network_template)

            self.env.make_snapshot(intermediate_snapshot)

        # revert_snapshot will do nothing if there is no snapshot
        self.env.revert_snapshot(intermediate_snapshot)

        self.show_step(13)
        self.show_step(14)
        self.do_backup(self.backup_path, self.local_path,
                       self.repos_backup_path, self.repos_local_path)
        self.env.make_snapshot(self.source_snapshot_name, is_make=True)
Beispiel #21
0
    def deploy_cluster_without_management_net(self):
        """Deploy HA environment network template: no dedicate management nwk

        Scenario:
            1. Revert snapshot with 3 slaves
            2. Create cluster (HA) with Neutron VLAN
            3. Add 1 controller + cinder nodes
            4. Add 2 compute + cinder nodes
            5. Upload network template
            6. Delete 'management' network-group
            7. Run network verification
            8. Deploy cluster
            9. Run network verification
            10. Run health checks (OSTF)

        Duration 180m
        Snapshot deploy_cluster_without_management_net
        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE_HA,
        )

        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller', 'cinder'],
            'slave-02': ['compute', 'cinder'],
            'slave-03': ['compute', 'cinder'],
        },
                                   update_interfaces=False)

        self.show_step(5)
        template = 'default_no_mgmt_nwk'
        logger.info('using template: {!s}'.format(template))
        network_template = get_network_template(template)
        self.fuel_web.client.upload_network_template(
            cluster_id=cluster_id, network_template=network_template)

        self.show_step(6)
        mgmt_net = [
            grp for grp in self.fuel_web.client.get_network_groups()
            if grp['name'] == 'management'
        ].pop()

        assert_true(
            self.fuel_web.client.del_network_group(mgmt_net['id']).status_code
            in {200, 204}, 'Network group delete failed')

        assert_true(mgmt_net not in self.fuel_web.client.get_network_groups(),
                    'Network group has not been deleted')

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['smoke', 'sanity', 'ha', 'tests_platform'])

        self.env.make_snapshot('deploy_cluster_without_management_net')
    def baremetal_deploy_cluster_with_virt_node(self):
        """Baremetal deployment of cluster with one virtual node

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for one VM
            4. Apply network template for the env and spawn the VM
            5. Assign controller role to the VM
            6. Deploy the environment
            7. Run OSTF
            8. Reset the environment
            9. Redeploy cluster
            10. Run OSTF

        Duration: 240m
        """

        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt']
            })
        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(
            node['id'],
            [
                {
                    "id": 1,
                    "mem": self.get_slave_total_mem(node['ip']) - 2,
                    "cpu": self.get_slave_total_cpu(node['ip']) - 2,
                    "vda_size": "100G"
                }
            ])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 2,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.show_step(5)
        virt_nodes = {'vslave-01': ['controller']}
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        self.fuel_web.stop_reset_env_wait(cluster_id)
        for node in self.fuel_web.client.list_nodes():
            self.wait_for_slave(node)

        self.show_step(9)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)