Ejemplo n.º 1
0
    def check_example_plugin(self):
        """Check if service ran on controller"""

        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id=self.cluster_id,
            roles=['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for node in d_ctrls:
            logger.info("Check plugin service on node {0}".format(node.name))
            with self.fuel_web.get_ssh_for_node(node.name) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))
Ejemplo n.º 2
0
    def check_stopping_resources(self):
        """Check stopping pacemaker resources"""

        logger.info(
            "Waiting {} seconds for changing pacemaker status of {}".format(
                self.pacemaker_restart_timeout, self.primary_controller_fqdn))
        time.sleep(self.pacemaker_restart_timeout)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            def checking_health_disk_attribute():
                logger.info("Checking for '#health_disk' attribute")
                cibadmin_status_xml = run_on_remote_get_results(
                    remote, 'cibadmin --query --scope status')['stdout_str']
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return '#health_disk' in pcs_attribs[
                    self.primary_controller_fqdn]

            def checking_for_red_in_health_disk_attribute():
                logger.info(
                    "Checking for '#health_disk' attribute have 'red' value")
                cibadmin_status_xml = run_on_remote_get_results(
                    remote, 'cibadmin --query --scope status')['stdout_str']
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return pcs_attribs[
                    self.primary_controller_fqdn]['#health_disk'] == 'red'

            def check_stopping_resources():
                logger.info("Checking for 'running_resources "
                            "attribute have '0' value")

                primary_ctrl = \
                    self.primary_controller.get_ip_address_by_network_name(
                        'admin')
                pcs_status = parse_pcs_status_xml(primary_ctrl)

                pcs_attribs = get_pcs_nodes(pcs_status)
                return pcs_attribs[
                    self.primary_controller_fqdn]['resources_running'] == '0'

            wait(checking_health_disk_attribute,
                 "Attribute #health_disk wasn't appeared "
                 "in attributes on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout),
                 timeout=self.pcs_check_timeout)

            wait(checking_for_red_in_health_disk_attribute,
                 "Attribute #health_disk doesn't have 'red' value "
                 "on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout),
                 timeout=self.pcs_check_timeout)

            wait(check_stopping_resources,
                 "Attribute 'running_resources' doesn't have '0' value "
                 "on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout),
                 timeout=self.pcs_check_timeout)
Ejemplo n.º 3
0
    def check_starting_resources(self):
        """Check starting pacemaker resources"""

        logger.info(
            "Waiting {} seconds for changing pacemaker status of {}".format(
                self.pacemaker_restart_timeout,
                self.primary_controller_fqdn))
        time.sleep(self.pacemaker_restart_timeout)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            def checking_health_disk_attribute_is_not_present():
                logger.info(
                    "Checking for '#health_disk' attribute "
                    "is not present on node {}".format(
                        self.primary_controller_fqdn))
                cibadmin_status_xml = remote.check_call(
                    'cibadmin --query --scope status').stdout_str
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return '#health_disk' not in pcs_attribs[
                    self.primary_controller_fqdn]

            wait(checking_health_disk_attribute_is_not_present,
                 timeout=self.pcs_check_timeout,
                 timeout_msg="Attribute #health_disk was appeared "
                             "in attributes on node {} in {} seconds".format(
                                 self.primary_controller_fqdn,
                                 self.pcs_check_timeout))

            self.fuel_web.assert_ha_services_ready(self.cluster_id)
Ejemplo n.º 4
0
    def check_batch_instance_creation(self):
        """Create several instance simultaneously."""
        count = 10
        vm_name = 'vcenter_vm'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        os_conn.create_server(name=vm_name,
                              image=image,
                              availability_zone=self.vcenter_az,
                              net_id=net['id'],
                              security_groups=[sg],
                              min_count=count,
                              timeout=210)

        for i in range(1, count + 1):
            vm = os_conn.get_server_by_name('{name}-{index}'.format(
                name=vm_name, index=i))
            logger.info("Check state for {} instance".format(vm.name))
            helpers.wait(
                lambda: os_conn.get_instance_detail(vm).status == "ACTIVE",
                timeout=180,
                timeout_msg="Instance state is not active")

        for i in range(1, count + 1):
            vm = os_conn.get_server_by_name('{name}-{index}'.format(
                name=vm_name, index=i))
            os_conn.delete_instance(vm)
            os_conn.verify_srv_deleted(vm)
    def check_nova_conf(self):
        """Verify nova-compute vmware configuration."""
        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        vmware_attr = self.fuel_web.client.get_cluster_vmware_attributes(
            self.cluster_id)
        az = vmware_attr['editable']['value']['availability_zones'][0]
        nova_computes = az['nova_computes']

        data = []
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        for nova in nova_computes:
            target_node = nova['target_node']['current']['id']
            conf_dict = self.get_nova_conf_dict(az, nova)
            if target_node == 'controllers':
                conf_path = '/etc/nova/nova-compute.d/vmware-vcenter_{0}.' \
                            'conf'.format(nova['service_name'])
                for node in ctrl_nodes:
                    params = (node['hostname'], node['ip'], conf_path,
                              conf_dict)
                    data.append(params)
            else:
                conf_path = '/etc/nova/nova-compute.conf'
                for node in nodes:
                    if node['hostname'] == target_node:
                        params = (node['hostname'], node['ip'], conf_path,
                                  conf_dict)
                        data.append(params)

        for hostname, ip, conf_path, conf_dict in data:
            logger.info("Check nova conf of {0}".format(hostname))
            self.check_config(ip, conf_path, conf_dict)
Ejemplo n.º 6
0
    def check_vmware_service_actions(self):
        """Disable vmware host (cluster) and check instance creation on
        enabled cluster."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        services = os_conn.get_nova_service_list()
        vmware_services = []
        for service in services:
            if service.binary == 'nova-compute' and \
               service.zone == self.vcenter_az:
                vmware_services.append(service)
                os_conn.disable_nova_service(service)

        image = os_conn.get_image(self.vmware_image)
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)

        for service in vmware_services:
            logger.info("Check {}".format(service.host))
            os_conn.enable_nova_service(service)
            vm = os_conn.create_server(image=image,
                                       timeout=210,
                                       availability_zone=self.vcenter_az,
                                       net_id=net['id'],
                                       security_groups=[sg])
            vm_host = getattr(vm, 'OS-EXT-SRV-ATTR:host')
            assert_true(
                service.host == vm_host, 'Instance was launched on a'
                ' disabled vmware cluster')
            os_conn.delete_instance(vm)
            os_conn.verify_srv_deleted(vm)
            os_conn.disable_nova_service(service)
 def enable_plugins(self):
     """Enable plugins for Fuel if it is required"""
     for plugin_name in self.required_plugins:
         self.plugin_name = plugin_name
         self.plugin_path = self.plugins_paths[plugin_name]
         self.enable_plugin()
         logger.info("{} plugin has been enabled.".format(plugin_name))
 def upload_plugins(self):
     """Upload plugins for Fuel if it is required"""
     for plugin_name in self.required_plugins:
         self.plugin_name = plugin_name
         self.plugin_path = self.plugins_paths[plugin_name]
         self.upload_plugin()
         logger.info("{} plugin has been uploaded.".format(plugin_name))
    def check_up_vips(self):
        """Ensure that VIPs are moved to another controller."""
        vip_contr = self._get_controller_with_vip()

        assert_true(vip_contr and vip_contr != self.vip_contr,
                    'VIPs have not been moved to another controller')
        logger.info('VIPs have been moved to another controller')
Ejemplo n.º 10
0
    def check_starting_resources(self):
        """Check starting pacemaker resources"""

        logger.info(
            "Waiting {} seconds for changing pacemaker status of {}".format(
                self.pacemaker_restart_timeout, self.primary_controller_fqdn))
        time.sleep(self.pacemaker_restart_timeout)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            def checking_health_disk_attribute_is_not_present():
                logger.info("Checking for '#health_disk' attribute "
                            "is not present on node {}".format(
                                self.primary_controller_fqdn))
                cibadmin_status_xml = remote.check_call(
                    'cibadmin --query --scope status').stdout_str
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return '#health_disk' not in pcs_attribs[
                    self.primary_controller_fqdn]

            wait(checking_health_disk_attribute_is_not_present,
                 timeout=self.pcs_check_timeout,
                 timeout_msg="Attribute #health_disk was appeared "
                 "in attributes on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout))

            self.fuel_web.assert_ha_services_ready(self.cluster_id)
Ejemplo n.º 11
0
    def check_nova_conf(self):
        """Verify nova-compute vmware configuration."""
        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        vmware_attr = self.fuel_web.client.get_cluster_vmware_attributes(
            self.cluster_id)
        az = vmware_attr['editable']['value']['availability_zones'][0]
        nova_computes = az['nova_computes']

        data = []
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        for nova in nova_computes:
            target_node = nova['target_node']['current']['id']
            conf_dict = self.get_nova_conf_dict(az, nova)
            if target_node == 'controllers':
                conf_path = '/etc/nova/nova-compute.d/vmware-vcenter_{0}.' \
                            'conf'.format(nova['service_name'])
                for node in ctrl_nodes:
                    params = (node['hostname'], node['ip'], conf_path,
                              conf_dict)
                    data.append(params)
            else:
                conf_path = '/etc/nova/nova-compute.conf'
                for node in nodes:
                    if node['hostname'] == target_node:
                        params = (node['hostname'], node['ip'], conf_path,
                                  conf_dict)
                        data.append(params)

        for hostname, ip, conf_path, conf_dict in data:
            logger.info("Check nova conf of {0}".format(hostname))
            self.check_config(ip, conf_path, conf_dict)
Ejemplo n.º 12
0
    def check_vm_connect(self):
        """Ensure connectivity between VMs."""
        if self.vip_contr:
            primary_ctrl_name = self._get_controller_with_vip()
        else:
            primary_ctrl_name = self.fuel_web.get_nailgun_primary_node(
                self.env.d_env.nodes().slaves[0]).name

        private_ips = {}
        floating_ips = {}

        for srv in self.vms_to_ping:
            floating = self.os_conn.assign_floating_ip(srv)
            floating_ips[srv] = floating.ip
            logger.info("Floating address {0} was associated with instance "
                        "{1}".format(floating_ips[srv], srv.name))

            private_ips[srv] = self.os_conn.get_nova_instance_ip(
                srv, net_name=self.net_name)

        for vm in itertools.combinations(self.vms_to_ping, 2):
            logger.info('Try to ping from {src} ({src_vm}) to {dst} '
                        '({dst_vm})'.format(src=floating_ips[vm[0]],
                                            dst=private_ips[vm[1]],
                                            src_vm=vm[0].name,
                                            dst_vm=vm[1].name))

            assert_true(
                self.ping_from_instance(floating_ips[vm[0]],
                                        private_ips[vm[1]], primary_ctrl_name),
                'Ping between VMs failed')
Ejemplo n.º 13
0
 def check_stopping_resources():
     logger.info("Checking for 'running_resources "
                 "attribute have '0' value")
     pcs_status = parse_pcs_status_xml(remote)
     pcs_attribs = get_pcs_nodes(pcs_status)
     return pcs_attribs[
         self.primary_controller_fqdn]['resources_running'] == '0'
Ejemplo n.º 14
0
    def check_stopping_resources(self):
        """Check stopping pacemaker resources"""

        logger.info(
            "Waiting {} seconds for changing pacemaker status of {}".format(
                self.pacemaker_restart_timeout, self.primary_controller_fqdn))
        time.sleep(self.pacemaker_restart_timeout)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            def checking_health_disk_attribute():
                logger.info("Checking for '#health_disk' attribute")
                cibadmin_status_xml = remote.check_call(
                    'cibadmin --query --scope status').stdout_str
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return '#health_disk' in pcs_attribs[
                    self.primary_controller_fqdn]

            def checking_for_red_in_health_disk_attribute():
                logger.info(
                    "Checking for '#health_disk' attribute have 'red' value")
                cibadmin_status_xml = remote.check_call(
                    'cibadmin --query --scope status').stdout_str
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return pcs_attribs[
                    self.primary_controller_fqdn]['#health_disk'] == 'red'

            def check_stopping_resources():
                logger.info("Checking for 'running_resources "
                            "attribute have '0' value")

                nail_node = self.fuel_web.get_nailgun_node_by_devops_node(
                    self.primary_controller)
                pcs_status = parse_pcs_status_xml(nail_node['ip'])

                pcs_attribs = get_pcs_nodes(pcs_status)
                return pcs_attribs[
                    self.primary_controller_fqdn]['resources_running'] == '0'

            wait(checking_health_disk_attribute,
                 timeout=self.pcs_check_timeout,
                 timeout_msg="Attribute #health_disk wasn't appeared "
                 "in attributes on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout))

            wait(checking_for_red_in_health_disk_attribute,
                 timeout=self.pcs_check_timeout,
                 timeout_msg="Attribute #health_disk doesn't have 'red' value "
                 "on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout))

            wait(check_stopping_resources,
                 timeout=self.pcs_check_timeout,
                 timeout_msg="Attribute 'running_resources' "
                 "doesn't have '0' value "
                 "on node {} in {} seconds".format(
                     self.primary_controller_fqdn, self.pcs_check_timeout))
 def enable_plugins(self):
     """Enable plugins for Fuel if it is required"""
     for plugin_name in self.required_plugins:
         self.plugin_name = plugin_name
         self.plugin_path = self.plugins_paths[plugin_name]
         self.enable_plugin()
         logger.info("{} plugin has been enabled.".format(plugin_name))
 def install_plugins(self):
     """Install plugins for Fuel if it is required"""
     for plugin_name in self.required_plugins:
         self.plugin_name = plugin_name
         self.plugin_path = self.plugins_paths[plugin_name]
         self.install_plugin()
         logger.info("{} plugin has been installed.".format(plugin_name))
Ejemplo n.º 17
0
    def check_example_plugin(self):
        """Check if service ran on controller"""

        cmd_curl = 'curl localhost:8234'
        cmd = 'pgrep -f fuel-simple-service'

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id=self.cluster_id,
            roles=['controller'])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for node in d_ctrls:
            logger.info("Check plugin service on node {0}".format(node.name))
            with self.fuel_web.get_ssh_for_node(node.name) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_pgrep['stderr'], node))
                assert_equal(1, len(res_pgrep['stdout']),
                             'Failed with error {0} on the '
                             'node {1}'.format(res_pgrep['stderr'], node))
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(0, res_pgrep['exit_code'],
                             'Failed with error {0} '
                             'on node {1}'.format(res_curl['stderr'], node))
Ejemplo n.º 18
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[
                    self.primary_controller_fqdn]['root_free']

            logger.info("Free space in root on primary controller - {}".format(
                controller_space_on_root))

            controller_space_to_filled = str(
                int(controller_space_on_root) - self.rabbit_disk_free_limit -
                1)

            logger.info("Need to fill space on root - {}".format(
                controller_space_to_filled))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Ejemplo n.º 19
0
    def check_batch_instance_creation(self):
        """Create several instance simultaneously."""
        count = 10
        vm_name = 'vcenter_vm'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        os_conn.create_server(name=vm_name, image=image,
                              availability_zone=self.vcenter_az,
                              net_id=net['id'], security_groups=[sg],
                              min_count=count, timeout=210)

        for i in range(1, count + 1):
            vm = os_conn.get_server_by_name('{name}-{index}'.format(
                name=vm_name, index=i))
            logger.info("Check state for {} instance".format(vm.name))
            helpers.wait(
                lambda: os_conn.get_instance_detail(vm).status == "ACTIVE",
                timeout=180, timeout_msg="Instance state is not active"
            )

        for i in range(1, count + 1):
            vm = os_conn.get_server_by_name('{name}-{index}'.format(
                name=vm_name, index=i))
            os_conn.delete_instance(vm)
            os_conn.verify_srv_deleted(vm)
 def upload_plugins(self):
     """Upload plugins for Fuel if it is required"""
     for plugin_name in self.required_plugins:
         self.plugin_name = plugin_name
         self.plugin_path = self.plugins_paths[plugin_name]
         self.upload_plugin()
         logger.info("{} plugin has been uploaded.".format(plugin_name))
Ejemplo n.º 21
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        node = self.fuel_web.get_nailgun_node_by_name(
            self.primary_controller.name)
        pacemaker_attributes = self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='cibadmin --query --scope status'
        )['stdout_str']
        controller_space_on_root = get_pacemaker_nodes_attributes(
            pacemaker_attributes)[self.primary_controller_fqdn]['root_free']

        logger.info("Free space in root on primary controller - {}".format(
                    controller_space_on_root))

        controller_space_to_filled = str(
            int(controller_space_on_root) - self.rabbit_disk_free_limit - 1
        )

        logger.info("Need to fill space on root - {}".format(
            controller_space_to_filled))

        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='fallocate -l {}M /root/bigfile2 && sync'.format(
                controller_space_to_filled)
        )
        self.ssh_manager.execute_on_remote(
            ip=node['ip'],
            cmd='ls /root/bigfile2',
            assert_ec_equal=[0])
 def install_plugins(self):
     """Install plugins for Fuel if it is required"""
     for plugin_name in self.required_plugins:
         self.plugin_name = plugin_name
         self.plugin_path = self.plugins_paths[plugin_name]
         self.install_plugin()
         logger.info("{} plugin has been installed.".format(plugin_name))
Ejemplo n.º 23
0
    def check_up_vips(self):
        """Ensure that VIPs are moved to another controller."""
        vip_contr = self._get_controller_with_vip()

        assert_true(vip_contr and vip_contr != self.vip_contr,
                    'VIPs have not been moved to another controller')
        logger.info('VIPs have been moved to another controller')
Ejemplo n.º 24
0
    def check_vm_connect(self):
        """Ensure connectivity between VMs."""
        if self.vip_contr:
            primary_ctrl_name = self._get_controller_with_vip()
        else:
            primary_ctrl_name = self.fuel_web.get_nailgun_primary_node(
                self.env.d_env.nodes().slaves[0]).name

        private_ips = {}
        floating_ips = {}

        for srv in self.vms_to_ping:
            floating = self.os_conn.assign_floating_ip(srv)
            floating_ips[srv] = floating.ip
            logger.info("Floating address {0} was associated with instance "
                        "{1}".format(floating_ips[srv], srv.name))

            private_ips[srv] = self.os_conn.get_nova_instance_ip(
                srv, net_name=self.net_name)

        for vm in itertools.combinations(self.vms_to_ping, 2):
            logger.info('Try to ping from {src} ({src_vm}) to {dst} '
                        '({dst_vm})'.format(src=floating_ips[vm[0]],
                                            dst=private_ips[vm[1]],
                                            src_vm=vm[0].name,
                                            dst_vm=vm[1].name))

            assert_true(self.ping_from_instance(floating_ips[vm[0]],
                                                private_ips[vm[1]],
                                                primary_ctrl_name),
                        'Ping between VMs failed')
Ejemplo n.º 25
0
    def fill_root_below_rabbit_disk_free_limit(self):
        """Fill root more to below rabbit disk free limit"""

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            pacemaker_attributes = run_on_remote_get_results(
                remote, 'cibadmin --query --scope status')['stdout_str']

            controller_space_on_root = get_pacemaker_nodes_attributes(
                pacemaker_attributes)[self.primary_controller_fqdn][
                'root_free']

            logger.info(
                "Free space in root on primary controller - {}".format(
                    controller_space_on_root
                ))

            controller_space_to_filled = str(
                int(
                    controller_space_on_root
                ) - self.rabbit_disk_free_limit - 1)

            logger.info(
                "Need to fill space on root - {}".format(
                    controller_space_to_filled
                ))

            run_on_remote_get_results(
                remote, 'fallocate -l {}M /root/bigfile2'.format(
                    controller_space_to_filled))
            check_file_exists(remote, '/root/bigfile2')
Ejemplo n.º 26
0
    def check_example_plugin(self):
        """Check if service ran on controller"""

        cmd_curl = "curl localhost:8234"
        cmd = "pgrep -f fuel-simple-service"

        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(cluster_id=self.cluster_id, roles=["controller"])
        d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)

        for node in d_ctrls:
            logger.info("Check plugin service on node {0}".format(node.name))
            with self.fuel_web.get_ssh_for_node(node.name) as remote:
                res_pgrep = remote.execute(cmd)
                assert_equal(
                    0, res_pgrep["exit_code"], "Failed with error {0} " "on node {1}".format(res_pgrep["stderr"], node)
                )
                assert_equal(
                    1,
                    len(res_pgrep["stdout"]),
                    "Failed with error {0} on the " "node {1}".format(res_pgrep["stderr"], node),
                )
                # curl to service
                res_curl = remote.execute(cmd_curl)
                assert_equal(
                    0, res_pgrep["exit_code"], "Failed with error {0} " "on node {1}".format(res_curl["stderr"], node)
                )
Ejemplo n.º 27
0
    def check_vmware_service_actions(self):
        """Disable vmware host (cluster) and check instance creation on
        enabled cluster."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        services = os_conn.get_nova_service_list()
        vmware_services = []
        for service in services:
            if service.binary == 'nova-compute' and \
               service.zone == self.vcenter_az:
                vmware_services.append(service)
                os_conn.disable_nova_service(service)

        image = os_conn.get_image(self.vmware_image)
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)

        for service in vmware_services:
            logger.info("Check {}".format(service.host))
            os_conn.enable_nova_service(service)
            vm = os_conn.create_server(image=image, timeout=210,
                                       availability_zone=self.vcenter_az,
                                       net_id=net['id'], security_groups=[sg])
            vm_host = getattr(vm, 'OS-EXT-SRV-ATTR:host')
            assert_true(service.host == vm_host, 'Instance was launched on a'
                                                 ' disabled vmware cluster')
            os_conn.delete_instance(vm)
            os_conn.verify_srv_deleted(vm)
            os_conn.disable_nova_service(service)
Ejemplo n.º 28
0
    def create_env(self):
        """Create Fuel Environment

        For configure Environment use environment-config section in config file

        Skip action if we have snapshot with Environment name

        """
        self.check_run(self.env_config['name'])

        logger.info("Create env {}".format(
            self.env_config['name']))
        cluster_settings = {
            "murano": self.env_settings['components'].get('murano', False),
            "sahara": self.env_settings['components'].get('sahara', False),
            "ceilometer": self.env_settings['components'].get('ceilometer',
                                                              False),
            "ironic": self.env_settings['components'].get('ironic', False),
            "user": self.env_config.get("user", "admin"),
            "password": self.env_config.get("password", "admin"),
            "tenant": self.env_config.get("tenant", "admin"),
            "volumes_lvm": self.env_settings['storages'].get("volume-lvm",
                                                             False),
            "volumes_ceph": self.env_settings['storages'].get("volume-ceph",
                                                              False),
            "images_ceph": self.env_settings['storages'].get("image-ceph",
                                                             False),
            "ephemeral_ceph": self.env_settings['storages'].get(
                "ephemeral-ceph", False),
            "objects_ceph": self.env_settings['storages'].get("rados-ceph",
                                                              False),
            "osd_pool_size": str(self.env_settings['storages'].get(
                "replica-ceph", 2)),
            "net_provider": self.env_config['network'].get('provider',
                                                           'neutron'),
            "net_segment_type": self.env_config['network'].get('segment-type',
                                                               'vlan'),
            "assign_to_all_nodes": self.env_config['network'].get(
                'pubip-to-all',
                False),
            "neutron_l3_ha": self.env_config['network'].get(
                'neutron-l3-ha', False),
            "neutron_dvr": self.env_config['network'].get(
                'neutron-dvr', False),
            "neutron_l2_pop": self.env_config['network'].get(
                'neutron-l2-pop', False),
            "neutron_qos": self.env_config['network'].get(
                'neutron-qos', False),
        }

        self.cluster_id = self.fuel_web.create_cluster(
            name=self.env_config['name'],
            mode=settings.DEPLOYMENT_MODE,
            release_name=settings.OPENSTACK_RELEASE_UBUNTU
            if self.env_config['release'] == 'ubuntu'
            else settings.OPENSTACK_RELEASE,
            settings=cluster_settings)

        logger.info("Cluster created with ID:{}".format(self.cluster_id))
Ejemplo n.º 29
0
    def create_env(self):
        """Create Fuel Environment

        For configure Environment use environment-config section in config file

        Skip action if we have snapshot with Environment name

        """
        self.check_run(self.env_config['name'])

        logger.info("Create env {}".format(self.env_config['name']))
        cluster_settings = {
            "sahara":
            self.env_settings['components'].get('sahara', False),
            "ceilometer":
            self.env_settings['components'].get('ceilometer', False),
            "ironic":
            self.env_settings['components'].get('ironic', False),
            "user":
            self.env_config.get("user", "admin"),
            "password":
            self.env_config.get("password", "admin"),
            "tenant":
            self.env_config.get("tenant", "admin"),
            "volumes_lvm":
            self.env_settings['storages'].get("volume-lvm", False),
            "volumes_ceph":
            self.env_settings['storages'].get("volume-ceph", False),
            "images_ceph":
            self.env_settings['storages'].get("image-ceph", False),
            "ephemeral_ceph":
            self.env_settings['storages'].get("ephemeral-ceph", False),
            "objects_ceph":
            self.env_settings['storages'].get("rados-ceph", False),
            "osd_pool_size":
            str(self.env_settings['storages'].get("replica-ceph", 2)),
            "net_provider":
            self.env_config['network'].get('provider', 'neutron'),
            "net_segment_type":
            self.env_config['network'].get('segment-type', 'vlan'),
            "assign_to_all_nodes":
            self.env_config['network'].get('pubip-to-all', False),
            "neutron_l3_ha":
            self.env_config['network'].get('neutron-l3-ha', False),
            "neutron_dvr":
            self.env_config['network'].get('neutron-dvr', False),
            "neutron_l2_pop":
            self.env_config['network'].get('neutron-l2-pop', False)
        }

        self.cluster_id = self.fuel_web.create_cluster(
            name=self.env_config['name'],
            mode=settings.DEPLOYMENT_MODE,
            release_name=settings.OPENSTACK_RELEASE_UBUNTU
            if self.env_config['release'] == 'ubuntu' else
            settings.OPENSTACK_RELEASE,
            settings=cluster_settings)

        logger.info("Cluster created with ID:{}".format(self.cluster_id))
Ejemplo n.º 30
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Ejemplo n.º 31
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = run_on_remote_get_results(
         remote, 'cibadmin --query --scope status')['stdout_str']
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Ejemplo n.º 32
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Ejemplo n.º 33
0
 def check_stopping_resources():
     logger.info(
         "Checking for 'running_resources "
         "attribute have '0' value")
     pcs_status = parse_pcs_status_xml(remote)
     pcs_attribs = get_pcs_nodes(pcs_status)
     return pcs_attribs[self.primary_controller_fqdn][
         'resources_running'] == '0'
Ejemplo n.º 34
0
 def checking_health_disk_attribute():
     logger.info("Checking for '#health_disk' attribute")
     cibadmin_status_xml = run_on_remote_get_results(
         remote, 'cibadmin --query --scope status')[
         'stdout_str']
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' in pcs_attribs[
         self.primary_controller_fqdn]
Ejemplo n.º 35
0
 def fail_deploy_cluster(self):
     """Deploy environment (must fail)."""
     try:
         self.fuel_web.deploy_cluster_wait(self.cluster_id)
         failed = False
     except AssertionError:
         failed = True
     assert_true(failed, 'Deploy passed with incorrect parameters')
     logger.info('Deploy failed')
Ejemplo n.º 36
0
 def _finish_case(self):
     """Finish test case"""
     case_time = time.time() - self._start_time
     minutes = int(round(case_time)) / 60
     seconds = int(round(case_time)) % 60
     name = getattr(self, "__doc__", self.__class__.__name__).splitlines()[0]
     finish_case = "[ FINISH {} CASE TOOK {} min {} sec ]".format(name, minutes, seconds)
     footer = "<<< {:=^142} >>>".format(finish_case)
     logger.info("\n{footer}\n".format(footer=footer))
Ejemplo n.º 37
0
 def check_started_resources():
     logger.info("Checking for 'running_resources' attribute "
                 "have {} value on node {}".format(
                     self.slave_node_running_resources,
                     self.primary_controller_fqdn))
     pcs_status = parse_pcs_status_xml(remote)
     pcs_attribs = get_pcs_nodes(pcs_status)
     return pcs_attribs[self.primary_controller_fqdn][
         'resources_running'] == self.slave_node_running_resources
Ejemplo n.º 38
0
 def fail_deploy_cluster(self):
     """Deploy environment (must fail)."""
     try:
         self.fuel_web.deploy_cluster_wait(self.cluster_id)
         failed = False
     except AssertionError:
         failed = True
     assert_true(failed, 'Deploy passed with incorrect parameters')
     logger.info('Deploy failed')
Ejemplo n.º 39
0
 def _destroy_controller(self, devops_node_name):
     logger.info("Suspend {} node".format(devops_node_name))
     d_node = self.env.d_env.get_node(name=devops_node_name)
     d_node.suspend(False)
     self.ostf_tests_should_failed += 1
     self.os_service_should_failed += 1
     if d_node not in self.destroyed_devops_nodes:
         self.destroyed_devops_nodes.append(d_node)
     else:
         logger.warning("Try destroy already destroyed node")
Ejemplo n.º 40
0
 def fail_ostf(self):
     """Run OSTF tests (must fail)."""
     try:
         self.fuel_web.run_ostf(self.cluster_id,
                                test_sets=['sanity', 'smoke', 'ha'])
         failed = False
     except AssertionError:
         failed = True
     assert_true(failed, 'OSTF passed with incorrect parameters')
     logger.info('OSTF failed')
Ejemplo n.º 41
0
 def checking_health_disk_attribute_is_not_present():
     logger.info("Checking for '#health_disk' attribute "
                 "is not present on node {}".format(
                     self.primary_controller_fqdn))
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' not in pcs_attribs[
         self.primary_controller_fqdn]
Ejemplo n.º 42
0
 def _get_controller_with_vip(self):
     """Return name of controller with VIPs."""
     for node in self.env.d_env.nodes().slaves:
         ng_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
         if ng_node['online'] and 'controller' in ng_node['roles']:
             hosts_vip = self.fuel_web.get_pacemaker_resource_location(
                 ng_node['devops_name'], 'vip__management')
             logger.info('Now primary controller is '
                         '{}'.format(hosts_vip[0].name))
             return hosts_vip[0].name
Ejemplo n.º 43
0
    def config_idc_glance(self):
        """Reconfigure vCenter settings with incorrect Glance Datacenter."""
        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']
        vcenter_data['value']['glance']['datacenter'] = '!@#$%^&*()'

        self.fuel_web.client.update_cluster_vmware_attributes(
            self.cluster_id, vmware_attr)
        logger.info("Glance datacenter settings have been updated")
Ejemplo n.º 44
0
 def check_started_resources():
     logger.info(
         "Checking for 'running_resources' attribute "
         "have {} value on node {}".format(
             self.slave_node_running_resources,
             self.primary_controller_fqdn))
     pcs_status = parse_pcs_status_xml(remote)
     pcs_attribs = get_pcs_nodes(pcs_status)
     return pcs_attribs[self.primary_controller_fqdn][
         'resources_running'] == self.slave_node_running_resources
Ejemplo n.º 45
0
    def check_haproxy(self):
        """HAProxy backend checking"""
        controller_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(self.cluster_id, ["controller"])

        for node in controller_nodes:
            remote = self.env.d_env.get_ssh_to_remote(node["ip"])
            logger.info("Check all HAProxy backends on {}".format(node["meta"]["system"]["fqdn"]))
            haproxy_status = checkers.check_haproxy_backend(remote)
            remote.clear()
            assert_equal(haproxy_status["exit_code"], 1, "HAProxy backends are DOWN. {0}".format(haproxy_status))
Ejemplo n.º 46
0
 def _destroy_controller(self, devops_node_name):
     logger.info("Suspend {} node".format(devops_node_name))
     d_node = self.env.d_env.get_node(name=devops_node_name)
     d_node.suspend(False)
     self.ostf_tests_should_failed += 1
     self.os_service_should_failed += 1
     if d_node not in self.destroyed_devops_nodes:
         self.destroyed_devops_nodes.append(d_node)
     else:
         logger.warning("Try destroy already destroyed node")
Ejemplo n.º 47
0
    def config_idc_glance(self):
        """Reconfigure vCenter settings with incorrect Glance Datacenter."""
        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']
        vcenter_data['value']['glance']['datacenter'] = '!@#$%^&*()'

        self.fuel_web.client.update_cluster_vmware_attributes(self.cluster_id,
                                                              vmware_attr)
        logger.info("Glance datacenter settings have been updated")
Ejemplo n.º 48
0
 def _get_controller_with_vip(self):
     """Return name of controller with VIPs."""
     for node in self.env.d_env.nodes().slaves:
         ng_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
         if ng_node['online'] and 'controller' in ng_node['roles']:
             hosts_vip = self.fuel_web.get_pacemaker_resource_location(
                 ng_node['devops_name'], 'vip__management')
             logger.info('Now primary controller is '
                         '{}'.format(hosts_vip[0].name))
             return hosts_vip[0].name
Ejemplo n.º 49
0
    def check_starting_resources(self):
        """Check starting pacemaker resources"""

        logger.info(
            "Waiting {} seconds for changing pacemaker status of {}".format(
                self.pacemaker_restart_timeout,
                self.primary_controller_fqdn))
        time.sleep(self.pacemaker_restart_timeout)

        with self.fuel_web.get_ssh_for_node(
                self.primary_controller.name) as remote:

            def checking_health_disk_attribute_is_not_present():
                logger.info(
                    "Checking for '#health_disk' attribute "
                    "is not present on node {}".format(
                        self.primary_controller_fqdn))
                cibadmin_status_xml = run_on_remote_get_results(
                    remote, 'cibadmin --query --scope status')[
                    'stdout_str']
                pcs_attribs = get_pacemaker_nodes_attributes(
                    cibadmin_status_xml)
                return '#health_disk' not in pcs_attribs[
                    self.primary_controller_fqdn]

            def check_started_resources():
                logger.info(
                    "Checking for 'running_resources' attribute "
                    "have {} value on node {}".format(
                        self.slave_node_running_resources,
                        self.primary_controller_fqdn))

                primary_ctrl = \
                    self.primary_controller.get_ip_address_by_network_name(
                        'admin')
                pcs_status = parse_pcs_status_xml(primary_ctrl)

                pcs_attribs = get_pcs_nodes(pcs_status)
                return pcs_attribs[self.primary_controller_fqdn][
                    'resources_running'] == self.slave_node_running_resources

            wait(checking_health_disk_attribute_is_not_present,
                 "Attribute #health_disk was appeared in attributes "
                 "on node {} in {} seconds".format(
                     self.primary_controller_fqdn,
                     self.pcs_check_timeout),
                 timeout=self.pcs_check_timeout)

            wait(check_started_resources,
                 "Attribute 'running_resources' doesn't have {} value "
                 "on node {} in {} seconds".format(
                     self.slave_node_running_resources,
                     self.primary_controller_fqdn,
                     self.pcs_check_timeout),
                 self.pcs_check_timeout)
Ejemplo n.º 50
0
    def check_pacemaker_status(self):
        """Check controllers status in pacemaker"""
        n_ctrls = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ['controller'])
        d_ctrls = map(self.fuel_web.get_devops_node_by_nailgun_node, n_ctrls)
        online_d_ctrls = set(d_ctrls) - set(self.destroyed_devops_nodes)

        for node in online_d_ctrls:
            logger.info("Check pacemaker status on {}".format(node.name))
            self.fuel_web.assert_pacemaker(node.name, online_d_ctrls,
                                           self.destroyed_devops_nodes)
Ejemplo n.º 51
0
 def _finish_case(self):
     """Finish test case"""
     case_time = time.time() - self._start_time
     minutes = int(round(case_time)) / 60
     seconds = int(round(case_time)) % 60
     name = getattr(self, "__doc__",
                    self.__class__.__name__).splitlines()[0]
     finish_case = "[ FINISH {} CASE TOOK {} min {} sec ]".format(
         name, minutes, seconds)
     footer = "<<< {:=^142} >>>".format(finish_case)
     logger.info("\n{footer}\n".format(footer=footer))
Ejemplo n.º 52
0
    def create_and_attach_empty_volume(self):
        """Create and attach to instance empty volume."""
        mount_point = '/dev/sdb'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        vol = os_conn.create_volume(availability_zone=self.cinder_az)
        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   security_groups=[sg],
                                   net_id=net['id'],
                                   timeout=210)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        logger.info("Attaching volume via cli")
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        cmd = '. openrc; nova volume-attach {srv_id} {volume_id} {mount}' \
              ''.format(srv_id=vm.id, volume_id=vol.id, mount=mount_point)
        logger.debug('CMD: {}'.format(cmd))
        SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)

        helpers.wait(lambda: os_conn.get_volume_status(vol) == "in-use",
                     timeout=30,
                     timeout_msg="Volume doesn't reach 'in-use' state")

        vm.reboot()
        sleep(10)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = 'sudo /sbin/fdisk -l | grep {}'.format(mount_point)
            res = remote.execute_through_host(hostname=floating_ip.ip,
                                              cmd=cmd,
                                              auth=cirros_auth)
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "Attached volume is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume(vol)
Ejemplo n.º 53
0
    def config_idatastore(self):
        """Reconfigure vCenter settings with incorrect regex of Datastore."""
        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']
        vcenter_data['value']['availability_zones'][0]['nova_computes'][0][
            'datastore_regex'] = '!@#$%^&*()'

        self.fuel_web.client.update_cluster_vmware_attributes(
            self.cluster_id, vmware_attr)
        logger.info("Datastore regex settings have been updated")
Ejemplo n.º 54
0
 def fail_ostf(self):
     """Run OSTF tests (must fail)."""
     try:
         self.fuel_web.run_ostf(
             self.cluster_id,
             test_sets=['sanity', 'smoke', 'ha'])
         failed = False
     except AssertionError:
         failed = True
     assert_true(failed, 'OSTF passed with incorrect parameters')
     logger.info('OSTF failed')
Ejemplo n.º 55
0
            def check_stopping_resources():
                logger.info("Checking for 'running_resources "
                            "attribute have '0' value")

                nail_node = self.fuel_web.get_nailgun_node_by_devops_node(
                    self.primary_controller)
                pcs_status = parse_pcs_status_xml(nail_node['ip'])

                pcs_attribs = get_pcs_nodes(pcs_status)
                return pcs_attribs[
                    self.primary_controller_fqdn]['resources_running'] == '0'
Ejemplo n.º 56
0
    def config_idatastore(self):
        """Reconfigure vCenter settings with incorrect regex of Datastore."""
        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']
        vcenter_data['value']['availability_zones'][0]['nova_computes'][0][
            'datastore_regex'] = '!@#$%^&*()'

        self.fuel_web.client.update_cluster_vmware_attributes(self.cluster_id,
                                                              vmware_attr)
        logger.info("Datastore regex settings have been updated")
Ejemplo n.º 57
0
 def checking_health_disk_attribute_is_not_present():
     logger.info(
         "Checking for '#health_disk' attribute "
         "is not present on node {}".format(
             self.primary_controller_fqdn))
     cibadmin_status_xml = remote.check_call(
         'cibadmin --query --scope status').stdout_str
     pcs_attribs = get_pacemaker_nodes_attributes(
         cibadmin_status_xml)
     return '#health_disk' not in pcs_attribs[
         self.primary_controller_fqdn]
Ejemplo n.º 58
0
    def create_and_attach_empty_volume(self):
        """Create and attach to instance empty volume."""
        mount_point = '/dev/sdb'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        vol = os_conn.create_volume(availability_zone=self.cinder_az)
        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   security_groups=[sg],
                                   net_id=net['id'],
                                   timeout=210)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        logger.info("Attaching volume via cli")
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        cmd = '. openrc; nova volume-attach {srv_id} {volume_id} {mount}' \
              ''.format(srv_id=vm.id, volume_id=vol.id, mount=mount_point)
        logger.debug('CMD: {}'.format(cmd))
        SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)

        helpers.wait(
            lambda: os_conn.get_volume_status(vol) == "in-use",
            timeout=30, timeout_msg="Volume doesn't reach 'in-use' state")

        vm.reboot()
        sleep(10)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = 'sudo /sbin/fdisk -l | grep {}'.format(mount_point)
            res = remote.execute_through_host(
                hostname=floating_ip.ip,
                cmd=cmd,
                auth=cirros_auth
            )
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "Attached volume is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume(vol)
Ejemplo n.º 59
0
 def _apply_cluster_attributes(self, replacements):
     """Apply replacements to fuel attributes (settings)"""
     if len(replacements) == 0:
         return
     attrs = self.fuel_web.client.get_cluster_attributes(self.cluster_id)
     for glob, value in replacements.items():
         path = self._get_settings_path(attrs, glob)
         logger.info('Set `{path}` to `{value}`'.format(path=path,
                                                        value=value))
         dpath.util.set(attrs, path, value)
     self.fuel_web.client.update_cluster_attributes(self.cluster_id, attrs)
Ejemplo n.º 60
0
    def check_haproxy(self):
        """HAProxy backend checking"""
        controller_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ['controller'])

        for node in controller_nodes:
            logger.info("Check all HAProxy backends on {}".format(
                node['meta']['system']['fqdn']))
            haproxy_status = checkers.check_haproxy_backend(node['ip'])
            assert_equal(
                haproxy_status['exit_code'], 1,
                "HAProxy backends are DOWN. {0}".format(haproxy_status))