def spawn_three_vms_across_three_virt_nodes(self):
        """Spawn three vm nodes across three slave nodes

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to three slave nodes
            3. Upload VM configuration for one VM to each slave node
            4. Spawn VMs
            5. Wait till VMs become available for allocation

        Duration: 60m
        """

        self.env.revert_snapshot("ready_with_3_slaves")

        checkers.enable_feature_group(self.env, "advanced")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['tun']
            })

        asserts.assert_true(settings.HARDWARE['slave_node_memory'] >= 1024,
                            "Wrong SLAVE_NODE_MEMORY value: {0}."
                            "Please allocate more than 1024Mb.".
                            format(settings.HARDWARE['slave_node_memory']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt'],
                'slave-02': ['compute', 'virt'],
                'slave-03': ['compute', 'virt']
            })

        hw_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in hw_nodes:
            self.fuel_web.client.create_vm_nodes(
                node['id'],
                [
                    {
                        "id": 1,
                        "mem": 1,
                        "cpu": 1
                    }
                ])

        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 6,
             timeout=60 * 120,
             timeout_msg=("Timeout waiting 6 available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.env.make_snapshot("spawn_three_vms_across_three_virt_nodes")
示例#2
0
    def spawn_three_vms_across_three_virt_nodes(self):
        """Spawn three vm nodes across three slave nodes

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to three slave nodes
            3. Upload VM configuration for one VM to each slave node
            4. Spawn VMs
            5. Wait till VMs become available for allocation

        Duration: 60m
        """

        self.env.revert_snapshot("ready_with_3_slaves")

        checkers.enable_feature_group(self.env, "advanced")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['tun']
            })

        asserts.assert_true(
            settings.HARDWARE['slave_node_memory'] >= 1024,
            "Wrong SLAVE_NODE_MEMORY value: {0}."
            "Please allocate more than 1024Mb.".format(
                settings.HARDWARE['slave_node_memory']))

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['compute', 'virt'],
                'slave-02': ['compute', 'virt'],
                'slave-03': ['compute', 'virt']
            })

        hw_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in hw_nodes:
            self.fuel_web.client.create_vm_nodes(node['id'], [{
                "id": 1,
                "mem": 1,
                "cpu": 1
            }])

        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 6,
             timeout=60 * 120,
             timeout_msg=("Timeout waiting 6 available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.env.make_snapshot("spawn_three_vms_across_three_virt_nodes")
示例#3
0
    def spawn_two_vms_on_one_virt_node(self):
        """Spawn two vm nodes on one slave node

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to slave node
            3. Upload configuration for two VMs
            4. Spawn VMs
            5. Wait till VMs become available for allocation

        Duration: 60m
        """

        self.env.revert_snapshot("ready_with_1_slaves")

        checkers.enable_feature_group(self.env, "advanced")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['tun']
            })

        asserts.assert_true(
            settings.HARDWARE['slave_node_memory'] >= 2048,
            "Wrong SLAVE_NODE_MEMORY value: {0}."
            "Please allocate more than 2048Mb.".format(
                settings.HARDWARE['slave_node_memory']))

        self.fuel_web.update_nodes(cluster_id,
                                   {'slave-01': ['compute', 'virt']})

        node_id = self.fuel_web.get_nailgun_node_by_name("slave-01")['id']

        self.fuel_web.client.create_vm_nodes(node_id, [{
            "id": 1,
            "mem": 1,
            "cpu": 1
        }, {
            "id": 2,
            "mem": 1,
            "cpu": 1
        }])

        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 3,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting 3 available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.env.make_snapshot("spawn_two_vms_on_one_virt_node")
示例#4
0
    def spawn_one_vm_on_one_virt_node(self):
        """Spawn one vm node on one slave node

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to slave node
            3. Upload configuration for one VM
            4. Spawn VM
            5. Wait till VM become available for allocation

        Duration: 60m
        """

        self.env.revert_snapshot("ready_with_1_slaves")

        checkers.enable_feature_group(self.env, 'advanced')
        self.env.docker_actions.restart_container("nailgun")

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['tun']
            })

        asserts.assert_true(settings.HARDWARE['slave_node_memory'] >= 1024,
                            "Wrong SLAVE_NODE_MEMORY value: {0}."
                            "Please allocate more than 1024Mb.".
                            format(settings.HARDWARE['slave_node_memory']))

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt']
            })

        node_id = self.fuel_web.get_nailgun_node_by_name("slave-01")['id']

        self.fuel_web.client.create_vm_nodes(
            node_id,
            [{
                "id": 1,
                "mem": 1,
                "cpu": 1
            }])

        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 2,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting 2 available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))
示例#5
0
    def check_can_not_enable_dpdk_on_non_dedicated_iface(self):
        """Check can not enable DPDK on non-dedicated interface

        Scenario:
            1. Create new environment with VLAN segmentation for Neutron
            2. Set KVM as Hypervisor
            3. Add controller and compute nodes
            4. Add private and storage networks to interface
               and try enable DPDK mode
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        enable_feature_group(self.env, 'experimental')
        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": "vlan"
            }
        )

        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })

        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'], role_status='pending_roles')[0]

        self.show_step(4)
        assigned_networks = {
            settings.iface_alias('eth0'): ['fuelweb_admin'],
            settings.iface_alias('eth1'): ['public'],
            settings.iface_alias('eth2'): ['management'],
            settings.iface_alias('eth3'): ['private', 'storage'],
            settings.iface_alias('eth4'): []
        }
        self.fuel_web.update_node_networks(compute['id'],
                                           interfaces_dict=assigned_networks)
        assert_raises(
            exceptions.BadRequest,
            self.fuel_web.enable_dpdk, compute['id'],
            force_enable=True)
    def baremetal_deploy_virt_nodes_on_one_compute(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        all virtual nodes on the same compute

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for three VMs
            4. Spawn the VMs and wait until they are available for allocation
            5. Assign controller role to the VMs
            6. Deploy the cluster
            7. Run OSTF

        Duration: 180m
        """
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt'],
            })

        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(
            node['id'],
            [
                {"id": 1, "mem": 4, "cpu": 2, "vda_size": "100G"},
                {"id": 2, "mem": 4, "cpu": 2, "vda_size": "100G"},
                {"id": 3, "mem": 4, "cpu": 2, "vda_size": "100G"},
            ])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 4,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']}
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
    def baremetal_deploy_virt_nodes_on_different_computes(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        each virtual node on a separate compute

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to three slave nodes
            3. Upload VM configuration for one VM to each slave node
            4. Apply network template for the env and spawn the VMs
            5. Assign controller role to VMs
            6. Deploy cluster
            7. Run OSTF
            8. Mark 'mysql' partition to be preserved on one of controllers
            9. Reinstall the controller
            10. Verify that the reinstalled controller joined the Galera
                cluster and synced its state
            11. Run OSTF
            12. Gracefully reboot one controller using "reboot" command
                and wait till it comes up
            13. Run OSTF
            14. Forcefully reboot one controller using "reboot -f" command
                and wait till it comes up
            15. Run OSTF
            16. Gracefully reboot one compute using "reboot" command
                and wait till compute and controller come up
            17. Run OSTF
            18. Forcefully reboot one compute using "reboot -f" command
                and wait till compute and controller come up
            19. Run OSTF

        Duration: 360m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt'],
                'slave-02': ['compute', 'virt'],
                'slave-03': ['compute', 'virt']
            })

        self.show_step(3)
        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            self.fuel_web.client.create_vm_nodes(
                node['id'],
                [{
                    "id": 1,
                    "mem": 2,
                    "cpu": 2,
                    "vda_size": "100G"
                }])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf_ha")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 6,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting 2 available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']
        }
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        virt_nodes = [n for n in self.fuel_web.client.list_nodes()
                      if n['name'].startswith('vslave')]
        ctrl = virt_nodes[0]
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            preserve_partition(admin, ctrl['id'], "mysql")

        self.show_step(9)
        task = self.fuel_web.client.provision_nodes(
            cluster_id, [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)
        task = self.fuel_web.client.deploy_nodes(
            cluster_id, [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)

        self.show_step(10)
        cmd = "mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE 'wsrep%';\""
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            err_msg = ("Galera isn't ready on {0} node".format(
                ctrl['hostname']))
            wait(
                lambda: admin.execute_through_host(
                    ctrl['ip'], cmd, auth=self.ssh_auth)['exit_code'] == 0,
                timeout=10 * 60, timeout_msg=err_msg)

            cmd = ("mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE "
                   "'wsrep_local_state_comment';\"")
            err_msg = ("The reinstalled node {0} is not synced with the "
                       "Galera cluster".format(ctrl['hostname']))
            wait(
                # pylint: disable=no-member
                lambda: admin.execute_through_host(
                    ctrl['ip'], cmd,
                    auth=self.ssh_auth)['stdout'][0].split()[1] == "Synced",
                # pylint: enable=no-member
                timeout=10 * 60,
                timeout_msg=err_msg)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(12)
        self.show_step(13)
        self.show_step(14)
        self.show_step(15)
        cmds = {"reboot": "gracefully", "reboot -f >/dev/null &": "forcefully"}
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(
                        virt_nodes[1]['ip'], cmd, auth=self.ssh_auth,
                        timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} controller"
                    "node".format(cmds[cmd], virt_nodes[1]['name']))
            self.wait_for_slave(virt_nodes[1])

            self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(16)
        self.show_step(17)
        self.show_step(18)
        self.show_step(19)
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(
                        compute['ip'], cmd, auth=self.ssh_auth,
                        timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} compute"
                    "node".format(cmds[cmd], compute['name']))
            self.wait_for_slave(compute)
            for vm in virt_nodes:
                self.wait_for_slave(vm)

            self.fuel_web.run_ostf(cluster_id=cluster_id)
    def baremetal_deploy_cluster_with_virt_node(self):
        """Baremetal deployment of cluster with one virtual node

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for one VM
            4. Apply network template for the env and spawn the VM
            5. Assign controller role to the VM
            6. Deploy the environment
            7. Run OSTF
            8. Reset the environment
            9. Redeploy cluster
            10. Run OSTF

        Duration: 240m
        """

        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['compute', 'virt']
            })
        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(
            node['id'],
            [
                {
                    "id": 1,
                    "mem": self.get_slave_total_mem(node['ip']) - 2,
                    "cpu": self.get_slave_total_cpu(node['ip']) - 2,
                    "vda_size": "100G"
                }
            ])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 2,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join(
                              ['Name: {0}, status: {1}, online: {2}'.
                               format(i['name'], i['status'], i['online'])
                               for i in self.fuel_web.client.list_nodes()])))

        self.show_step(5)
        virt_nodes = {'vslave-01': ['controller']}
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        self.fuel_web.stop_reset_env_wait(cluster_id)
        for node in self.fuel_web.client.list_nodes():
            self.wait_for_slave(node)

        self.show_step(9)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
示例#9
0
    def deploy_cluster_with_dpdk_vlan(self):
        """Deploy cluster with DPDK with VLAN segmentation

        Scenario:
            1. Create new environment with VLAN segmentation for Neutron
            2. Set KVM as Hypervisor
            3. Add controller and compute nodes
            4. Configure private network in DPDK mode
            5. Configure HugePages for compute nodes
            6. Run network verification
            7. Deploy environment
            8. Run network verification
            9. Run OSTF
            10. Reboot compute
            11. Run OSTF
            12. Check option "firewall_driver" in config files
            13. Run instance on compute with DPDK and check its availability
                via floating IP

        Snapshot: deploy_cluster_with_dpdk_vlan

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        enable_feature_group(self.env, 'experimental')
        self.show_step(2)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": "vlan"
            }
        )

        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['cinder']
            })

        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'], role_status='pending_roles')[0]

        self.show_step(4)
        self.fuel_web.enable_dpdk(compute['id'])

        self.show_step(5)
        self.fuel_web.setup_hugepages(
            compute['id'], hp_2mb=256, hp_dpdk_mb=1024)

        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(8)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(9)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(10)
        # reboot compute
        self.fuel_web.warm_restart_nodes(
            [self.fuel_web.get_devops_node_by_nailgun_node(compute)])

        # Wait until OpenStack services are UP
        self.fuel_web.assert_os_services_ready(cluster_id)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(12)
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        check_firewall_driver(compute['ip'], compute['roles'][0], 'noop')

        self.show_step(13)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        self.check_dpdk_instance_connectivity(os_conn, cluster_id)

        self.env.make_snapshot("deploy_cluster_with_dpdk_vlan")
示例#10
0
    def deploy_cluster_with_dpdk_bond(self):
        """Deploy cluster with DPDK, active-backup bonding and Neutron VLAN

        Scenario:
            1. Create cluster with VLAN for Neutron and KVM
            2. Add 1 node with controller role
            3. Add 2 nodes with compute and cinder roles
            4. Setup bonding for all interfaces: 1 for admin, 1 for private
               and 1 for public/storage/management networks
            5. Enable DPDK for bond with private network on all computes
            6. Configure HugePages for compute nodes
            7. Run network verification
            8. Deploy the cluster
            9. Run network verification
            10. Run OSTF
            11. Run instance on compute with DPDK and check its availability
                via floating IP

        Duration 90m
        Snapshot deploy_cluster_with_dpdk_bond
        """

        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        enable_feature_group(self.env, 'experimental')
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings={
                "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'cinder'],
                'slave-03': ['compute', 'cinder']
            },
            update_interfaces=False
        )

        self.show_step(4)
        nailgun_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        for node in nailgun_nodes:
            self.fuel_web.update_node_networks(
                node['id'], interfaces_dict=deepcopy(self.INTERFACES),
                raw_data=deepcopy(self.bond_config)
            )

        computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id,
            roles=['compute'],
            role_status='pending_roles')

        self.show_step(5)
        for node in computes:
            self.fuel_web.enable_dpdk(node['id'])

        self.show_step(6)
        for node in computes:
            self.fuel_web.setup_hugepages(
                node['id'], hp_2mb=256, hp_dpdk_mb=1024)

        self.show_step(7)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(8)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(11)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        self.check_dpdk_instance_connectivity(os_conn, cluster_id)

        self.env.make_snapshot("deploy_cluster_with_dpdk_bond")
示例#11
0
    def baremetal_deploy_virt_nodes_on_one_compute(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        all virtual nodes on the same compute

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for three VMs
            4. Spawn the VMs and wait until they are available for allocation
            5. Assign controller role to the VMs
            6. Deploy the cluster
            7. Run OSTF

        Duration: 180m
        """
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['compute', 'virt'],
        })

        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(node['id'], [
            {
                "id": 1,
                "mem": 4,
                "cpu": 2,
                "vda_size": "100G"
            },
            {
                "id": 2,
                "mem": 4,
                "cpu": 2,
                "vda_size": "100G"
            },
            {
                "id": 3,
                "mem": 4,
                "cpu": 2,
                "vda_size": "100G"
            },
        ])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 4,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']
        }
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
示例#12
0
    def baremetal_deploy_virt_nodes_on_different_computes(self):
        """Baremetal deployment of a cluster with virtual nodes in HA mode;
        each virtual node on a separate compute

        Scenario:
            1. Create cluster
            2. Assign compute and virt roles to three slave nodes
            3. Upload VM configuration for one VM to each slave node
            4. Apply network template for the env and spawn the VMs
            5. Assign controller role to VMs
            6. Deploy cluster
            7. Run OSTF
            8. Mark 'mysql' partition to be preserved on one of controllers
            9. Reinstall the controller
            10. Verify that the reinstalled controller joined the Galera
                cluster and synced its state
            11. Run OSTF
            12. Gracefully reboot one controller using "reboot" command
                and wait till it comes up
            13. Run OSTF
            14. Forcefully reboot one controller using "reboot -f" command
                and wait till it comes up
            15. Run OSTF
            16. Gracefully reboot one compute using "reboot" command
                and wait till compute and controller come up
            17. Run OSTF
            18. Forcefully reboot one compute using "reboot -f" command
                and wait till compute and controller come up
            19. Run OSTF

        Duration: 360m
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['compute', 'virt'],
                'slave-02': ['compute', 'virt'],
                'slave-03': ['compute', 'virt']
            })

        self.show_step(3)
        for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
            self.fuel_web.client.create_vm_nodes(node['id'],
                                                 [{
                                                     "id": 1,
                                                     "mem": 2,
                                                     "cpu": 2,
                                                     "vda_size": "100G"
                                                 }])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf_ha")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 6,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting 2 available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.show_step(5)
        virt_nodes = {
            'vslave-01': ['controller'],
            'vslave-02': ['controller'],
            'vslave-03': ['controller']
        }
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        virt_nodes = [
            n for n in self.fuel_web.client.list_nodes()
            if n['name'].startswith('vslave')
        ]
        ctrl = virt_nodes[0]
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            preserve_partition(admin, ctrl['id'], "mysql")

        self.show_step(9)
        task = self.fuel_web.client.provision_nodes(cluster_id,
                                                    [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)
        task = self.fuel_web.client.deploy_nodes(cluster_id, [str(ctrl['id'])])
        self.fuel_web.assert_task_success(task)

        self.show_step(10)
        cmd = "mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE 'wsrep%';\""
        with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as admin:
            err_msg = ("Galera isn't ready on {0} node".format(
                ctrl['hostname']))
            wait(lambda: admin.execute_through_host(
                ctrl['ip'], cmd, auth=self.ssh_auth)['exit_code'] == 0,
                 timeout=10 * 60,
                 timeout_msg=err_msg)

            cmd = ("mysql --connect_timeout=5 -sse \"SHOW STATUS LIKE "
                   "'wsrep_local_state_comment';\"")
            err_msg = ("The reinstalled node {0} is not synced with the "
                       "Galera cluster".format(ctrl['hostname']))
            wait(
                # pylint: disable=no-member
                lambda: admin.execute_through_host(
                    ctrl['ip'], cmd, auth=self.ssh_auth)['stdout'][0].split()[
                        1] == "Synced",
                # pylint: enable=no-member
                timeout=10 * 60,
                timeout_msg=err_msg)

        self.show_step(11)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(12)
        self.show_step(13)
        self.show_step(14)
        self.show_step(15)
        cmds = {"reboot": "gracefully", "reboot -f >/dev/null &": "forcefully"}
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(virt_nodes[1]['ip'],
                                               cmd,
                                               auth=self.ssh_auth,
                                               timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} controller"
                    "node".format(cmds[cmd], virt_nodes[1]['name']))
            self.wait_for_slave(virt_nodes[1])

            self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(16)
        self.show_step(17)
        self.show_step(18)
        self.show_step(19)
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        for cmd in cmds:
            with self.ssh_manager.get_remote(self.ssh_manager.admin_ip) as \
                    admin:
                asserts.assert_true(
                    admin.execute_through_host(compute['ip'],
                                               cmd,
                                               auth=self.ssh_auth,
                                               timeout=60)['exit_code'] == 0,
                    "Failed to {0} reboot {1} compute"
                    "node".format(cmds[cmd], compute['name']))
            self.wait_for_slave(compute)
            for vm in virt_nodes:
                self.wait_for_slave(vm)

            self.fuel_web.run_ostf(cluster_id=cluster_id)
示例#13
0
    def baremetal_deploy_cluster_with_virt_node(self):
        """Baremetal deployment of cluster with one virtual node

        Scenario:
            1. Create a cluster
            2. Assign compute and virt roles to the slave node
            3. Upload configuration for one VM
            4. Apply network template for the env and spawn the VM
            5. Assign controller role to the VM
            6. Deploy the environment
            7. Run OSTF
            8. Reset the environment
            9. Redeploy cluster
            10. Run OSTF

        Duration: 240m
        """

        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        checkers.enable_feature_group(self.env, "advanced")
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                'net_provider': 'neutron',
                'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
            })

        self.show_step(2)
        self.fuel_web.update_nodes(cluster_id,
                                   {'slave-01': ['compute', 'virt']})
        self.show_step(3)
        node = self.fuel_web.get_nailgun_node_by_name("slave-01")
        self.fuel_web.client.create_vm_nodes(
            node['id'], [{
                "id": 1,
                "mem": self.get_slave_total_mem(node['ip']) - 2,
                "cpu": self.get_slave_total_cpu(node['ip']) - 2,
                "vda_size": "100G"
            }])

        self.show_step(4)
        self.update_virt_vm_template()
        net_template = get_network_template("baremetal_rf")
        self.fuel_web.client.upload_network_template(cluster_id, net_template)
        self.fuel_web.spawn_vms_wait(cluster_id)
        wait(lambda: len(self.fuel_web.client.list_nodes()) == 2,
             timeout=60 * 60,
             timeout_msg=("Timeout waiting for available nodes, "
                          "current nodes: \n{0}" + '\n'.join([
                              'Name: {0}, status: {1}, online: {2}'.format(
                                  i['name'], i['status'], i['online'])
                              for i in self.fuel_web.client.list_nodes()
                          ])))

        self.show_step(5)
        virt_nodes = {'vslave-01': ['controller']}
        self.update_virtual_nodes(cluster_id, virt_nodes)

        self.show_step(6)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(7)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(8)
        self.fuel_web.stop_reset_env_wait(cluster_id)
        for node in self.fuel_web.client.list_nodes():
            self.wait_for_slave(node)

        self.show_step(9)
        self.deploy_cluster_wait(cluster_id)

        self.show_step(10)
        self.fuel_web.run_ostf(cluster_id=cluster_id)
示例#14
0
    def deploy_ovs_firewall_and_dpdk_vxlan(self):
        """Deploy non-HA cluster with VXLAN, OVS firewall driver and DPDK

        Scenario:
            1. Create new environment with VLAN segmentation for Neutron
            2. Add controller and compute nodes
            3. Enable OVS firewall driver for neutron security groups
            4. Configure private network in DPDK mode
            5. Configure HugePages for compute nodes
            6. Run network verification
            7. Deploy environment
            8. Run OSTF
            9. Check option "firewall_driver" in config files
            10. Boot instance with custom security group

        Snapshot: deploy_ovs_firewall_and_dpdk_vxlan

        """
        self.check_run("deploy_ovs_firewall_and_dpdk_vxlan")
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1)
        enable_feature_group(self.env, 'experimental')
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": "tun"
            })

        self.show_step(2)
        self.fuel_web.update_nodes(cluster_id, {
            'slave-01': ['controller'],
            'slave-02': ['compute']
        })

        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'], role_status='pending_roles')[0]

        self.show_step(3)
        self.fuel_web.set_ovs_firewall_driver(cluster_id)

        self.show_step(4)
        self.fuel_web.enable_dpdk(compute['id'])

        self.show_step(5)
        self.fuel_web.setup_hugepages(compute['id'],
                                      hp_2mb=256,
                                      hp_dpdk_mb=1024)

        self.show_step(6)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(7)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(8)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(9)
        nodes = self.fuel_web.client.list_cluster_nodes(cluster_id=cluster_id)
        for node in nodes:
            check_firewall_driver(node['ip'], node['roles'][0], 'openvswitch')

        self.show_step(10)
        compute = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['compute'])[0]
        self.check_ovs_firewall_functionality(cluster_id,
                                              compute['ip'],
                                              dpdk=True)
        self.env.make_snapshot("deploy_ovs_firewall_and_dpdk_vxlan",
                               is_make=True)
    def deploy_toolchain_with_reduced_footprint(self):
        """Deploy a cluster with the LMA Toolchain plugins using the Reduced
        Footprint feature (aka virt nodes).

        Scenario:
            1. Enable the advanced features.
            2. Upload the LMA Toolchain plugins to the master node
            3. Install the plugins
            4. Create the cluster
            5. Add 1 node with virt role
            6. Spawn 1 virtual machine on the virt node
            7. Add 1 node with controller role
            8. Add 1 node with compute and cinder roles
            9. Assign the StackLight roles to the virtual machine
            10. Deploy the cluster
            11. Check that LMA Toolchain plugins are running
            12. Run OSTF

        Duration 60m
        Snapshot deploy_toolchain_with_reduced_footprint
        """
        self.check_run("deploy_toolchain_with_reduced_footprint")

        self.env.revert_snapshot("ready_with_3_slaves")

        fuel_web = self.helpers.fuel_web
        nailgun_client = self.helpers.nailgun_client
        checkers.enable_feature_group(self.env, "advanced")

        self.prepare_plugins()

        self.helpers.create_cluster(
            name="deploy_toolchain_with_reduced_footprint",
            settings={
                "net_provider": "neutron",
                "net_segment_type": fuelweb_settings.NEUTRON_SEGMENT["tun"]
            }
        )

        self.activate_plugins()

        self.helpers.add_nodes_to_cluster({
            "slave-03": ["virt"],
        }, redeploy=False)

        initial_nodes = nailgun_client.list_nodes()
        virt_node = None
        for node in initial_nodes:
            if "virt" in node["pending_roles"]:
                virt_node = node
                break

        asserts.assert_is_not_none(virt_node,
                                   "Couldn't find any node with the virt role")
        vm_ram = 2
        asserts.assert_true(
            virt_node["meta"]["memory"]["total"] > vm_ram * (1024 ** 3),
            "Not enough RAM on node {0}, at least {1} GB required".format(
                virt_node["name"], vm_ram))

        nailgun_client.create_vm_nodes(
            virt_node["id"],
            [{"id": 1, "mem": vm_ram, "cpu": 1, "vda_size": "120G"}])

        logger.info(
            "Spawning 1 virtual machine on node {}".format(virt_node["id"]))
        fuel_web.spawn_vms_wait(self.helpers.cluster_id)

        logger.info("Waiting for the virtual manchine to be up...")
        wait(lambda: len(nailgun_client.list_nodes()) == 4,
             timeout=10 * 60,
             timeout_msg=("Timeout waiting for 4 nodes to be ready, "
                          "current nodes:{0}\n".format('\n'.join(
                              ['id: {0}, name: {1}, online: {2}'.
                               format(i["id"], i['name'], i['online'])
                               for i in nailgun_client.list_nodes()]))))
        vm_node = None
        for node in nailgun_client.list_nodes():
            if node["id"] not in [x["id"] for x in initial_nodes]:
                vm_node = node
                break
        asserts.assert_is_not_none(vm_node,
                                   "Couldn't find the virtual machine node")

        logger.info(
            "Assigning StackLight roles to node {}".format(vm_node["id"]))
        nailgun_client.update_nodes([{
            "cluster_id": self.helpers.cluster_id,
            "id": vm_node["id"],
            "pending_roles": settings.stacklight_roles,
            "pending_addition": True
        }])
        # The mapping between the hypervisor's NICs and the virtual machine's
        # NICs is defined on the Fuel node in
        # /etc/puppet/modules/osnailyfacter/templates/vm_libvirt.erb. In
        # practice, only the management and storage interfaces need to be
        # swapped. Note that the interface names on the virtual machines are
        # the legacy ones (ethX instead of enpXsY)
        fuel_web.update_node_networks(
            vm_node["id"],
            {
                "eth0": ["fuelweb_admin", "private"],
                "eth1": ["public"],
                "eth2": ["storage"],
                "eth3": ["management"],
                "eth4": []
            })
        self.helpers.deploy_cluster({
            "slave-01": ["controller"],
            "slave-02": ["compute", "cinder"],
        })
        # The 'hiera' and post-deployment tasks have to be re-executed
        # "manually" for the virt node
        self.helpers.run_tasks([virt_node], tasks=['hiera'],
                               start="post_deployment_start", timeout=20 * 60)

        self.check_plugins_online()
        self.helpers.run_ostf()

        self.env.make_snapshot("deploy_toolchain_with_reduced_footprint",
                               is_make=True)