示例#1
0
    def delete_environment(self):
        """Delete existing environment
        and verify nodes returns to unallocated state

        Scenario:
            1. Revert "simple flat" environment
            2. Delete environment
            3. Verify node returns to unallocated pull

        """
        self.env.revert_snapshot("deploy_simple_flat")

        cluster_id = self.fuel_web.get_last_created_cluster()
        self.fuel_web.client.delete_cluster(cluster_id)
        nailgun_nodes = self.fuel_web.client.list_nodes()
        nodes = filter(lambda x: x["pending_deletion"] is True, nailgun_nodes)
        assert_true(
            len(nodes) == 2, "Verify 2 node has pending deletion status"
        )
        wait(
            lambda:
            self.fuel_web.is_node_discovered(nodes[0]) and
            self.fuel_web.is_node_discovered(nodes[1]),
            timeout=10 * 60,
            interval=15
        )
示例#2
0
    def check_emc_cinder_config(cls, remote, path):
        command = 'cat {0}'.format(path)
        conf_data = ''.join(remote.execute(command)['stdout'])
        conf_data = cStringIO.StringIO(conf_data)
        cinder_conf = ConfigParser.ConfigParser()
        cinder_conf.readfp(conf_data)

        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'volume_driver'),
            'cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver')
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'storage_vnx_authentication_type'),
            'global')
        asserts.assert_false(
            cinder_conf.getboolean('DEFAULT',
                                   'destroy_empty_storage_group'))
        asserts.assert_true(
            cinder_conf.getboolean('DEFAULT',
                                   'initiator_auto_registration'))
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'attach_detach_batch_interval'), -1)
        asserts.assert_equal(
            cinder_conf.getint('DEFAULT', 'default_timeout'), 10)
        asserts.assert_equal(
            cinder_conf.get('DEFAULT', 'naviseccli_path'),
            '/opt/Navisphere/bin/naviseccli')

        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT',
                                                   'san_secondary_ip'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_login'))
        asserts.assert_true(cinder_conf.has_option('DEFAULT', 'san_password'))
示例#3
0
    def ha_pacemaker_configuration(self):
        """Verify resources are configured

        Scenario:
            1. SSH to controller node
            2. Verify resources are configured
            3. Go to next controller

        Snapshot deploy_ha

        """
        self.env.revert_snapshot("deploy_ha")

        devops_ctrls = self.env.nodes().slaves[:3]
        for devops_node in devops_ctrls:
            config = self.fuel_web.get_pacemaker_config(devops_node.name)
            for n in devops_ctrls:
                fqdn = self.fuel_web.fqdn(n)
                assert_true(
                    'node {0}'.format(fqdn) in config,
                    'node {0} exists'.format(fqdn))
            assert_not_equal(
                re.search('primitive (openstack-)?heat-engine', config), None,
                'heat engine')
            assert_true('primitive p_haproxy' in config, 'haproxy')
            assert_true('primitive p_mysql' in config, 'mysql')
            assert_true(
                'primitive vip__management_old' in config, 'vip management')
            assert_true(
                'primitive vip__public_old' in config, 'vip public')
示例#4
0
    def do_backup(self,
                  backup_path, local_path,
                  repos_backup_path=None, repos_local_path=None):
        """ Wrapper for backup process of upgrading procedure"""
        # BOTH repos arguments should be passed at the same time
        # or BOTH should not be passed
        assert_equal(bool(repos_backup_path), bool(repos_local_path),
                     "Both repos arguments should be specified")
        self.install_octane()

        cmd = "mkdir -p {}".format(self.remote_dir_for_backups)
        run_on_remote(self.admin_remote, cmd)

        self.octane_action("backup", backup_path)
        logger.info("Downloading {}".format(backup_path))
        # pylint: disable=no-member
        self.admin_remote.download(backup_path, local_path)
        # pylint: enable=no-member
        assert_true(os.path.exists(local_path))

        if repos_backup_path:
            self.octane_action("repo-backup", repos_backup_path)
            logger.info("Downloading {}".format(repos_backup_path))
            # pylint: disable=no-member
            self.admin_remote.download(repos_backup_path, repos_local_path)
            # pylint: enable=no-member
            assert_true(os.path.exists(repos_local_path))
示例#5
0
    def cluster_deletion(self):
        """
        Scenario:
            1. Revert snapshot 'prepare_ha_neutron'
            2. Delete cluster via cli
            3. Check cluster absence in the list

        Duration 25m

        """
        self.env.revert_snapshot("prepare_ha_neutron")

        remote = self.env.d_env.get_admin_remote()
        cluster_id = self.fuel_web.get_last_created_cluster()
        assert_true(
            remote.execute('fuel --env {0} env delete'.format(cluster_id))
            ['exit_code'] == 0)
        try:
            wait(lambda:
                 remote.execute(
                     "fuel env |  awk '{print $1}' |  tail -n 1 | grep '^.$'")
                 ['exit_code'] == 1, timeout=60 * 6)
        except TimeoutError:
            raise TimeoutError(
                "cluster {0} was not deleted".format(cluster_id))
        assert_false(
            check_cluster_presence(cluster_id, self.env.postgres_actions),
            "cluster {0} is found".format(cluster_id))
示例#6
0
    def ha_haproxy_termination(self):
        """Terminate haproxy on all controllers one by one

        Scenario:
            1. Terminate haproxy
            2. Wait while it is being restarted
            3. Verify it is restarted
            4. Go to another controller
            5. Run OSTF

        Snapshot deploy_ha

        """
        self.env.revert_snapshot("deploy_ha")

        for devops_node in self.env.nodes().slaves[:3]:
            remote = self.fuel_web.get_ssh_for_node(devops_node.name)
            remote.check_call('kill -9 $(pidof haproxy)')

            mysql_started = lambda: \
                len(remote.check_call(
                    'ps aux | grep "/usr/sbin/haproxy"')['stdout']) == 3
            wait(mysql_started, timeout=20)
            assert_true(mysql_started(), 'haproxy restarted')

        cluster_id = self.fuel_web.client.get_cluster_id(
            self.__class__.__name__)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['ha', 'smoke', 'sanity'])
示例#7
0
    def update_nodes(self, cluster_id, nodes_dict, pending_addition=True, pending_deletion=False):
        # update nodes in cluster
        nodes_data = []
        for node_name in nodes_dict:
            devops_node = self.environment.get_virtual_environment().node_by_name(node_name)

            wait(lambda: self.get_nailgun_node_by_devops_node(devops_node)["online"], timeout=60 * 2)
            node = self.get_nailgun_node_by_devops_node(devops_node)
            assert_true(node["online"], "Node {} is online".format(node["mac"]))

            node_data = {
                "cluster_id": cluster_id,
                "id": node["id"],
                "pending_addition": pending_addition,
                "pending_deletion": pending_deletion,
                "pending_roles": nodes_dict[node_name],
                "name": "{}_{}".format(node_name, "_".join(nodes_dict[node_name])),
            }
            nodes_data.append(node_data)

        # assume nodes are going to be updated for one cluster only
        cluster_id = nodes_data[-1]["cluster_id"]
        node_ids = [str(node_info["id"]) for node_info in nodes_data]
        self.client.update_nodes(nodes_data)

        nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
        cluster_node_ids = map(lambda _node: str(_node["id"]), nailgun_nodes)
        assert_true(all([node_id in cluster_node_ids for node_id in node_ids]))

        self.update_nodes_interfaces(cluster_id)

        return nailgun_nodes
示例#8
0
文件: pxc.py 项目: magictour/trove
    def test_instance_resize_flavor(self):
        """Tests the resize instance/flavor API."""

        flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
                                        'm1.medium')
        flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)
        new_flavor = flavors[0]

        asserts.assert_true(new_flavor is not None,
                            "Flavor '%s' not found!" % flavor_name)

        if not getattr(self, 'instance', None):
            raise SkipTest(
                "Skipping this test since instance is not available.")

        self.rd_client = create_dbaas_client(self.instance.user)
        self.rd_client.instances.resize_instance(self.instance.id,
                                                 new_flavor.id)

        asserts.assert_equal(202, self.rd_client.last_http_code)
        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal("RESIZE", test_instance.status)

        poll_until(lambda: self._find_status(self.rd_client,
                                             self.instance.id, "ACTIVE"),
                   sleep_time=SLEEP_TIME, time_out=TIMEOUT)

        test_instance = self.rd_client.instances.get(self.instance.id)
        asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)
        self.report.log("Resized Flavor for Instance ID: %s to %s." % (
            self.instance.id, new_flavor.id))
示例#9
0
文件: root.py 项目: NeCTAR-RC/trove
 def test_root_initially_disabled_details(self):
     """Use instance details to test that root is disabled."""
     instance = self.dbaas.instances.get(instance_info.id)
     assert_true(hasattr(instance, 'rootEnabled'),
                 "Instance has no rootEnabled property.")
     assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.")
     assert_equal(self.root_enabled_timestamp, 'Never')
示例#10
0
 def result_is_active():
     instance = instance_info.dbaas.instances.get(slave_instance.id)
     if instance.status == "ACTIVE":
         return True
     else:
         assert_true(instance.status in ['BUILD', 'BACKUP'])
         return False
    def _assert_status_failure(result):
        """Checks if status==FAILED, plus asserts REST API is in sync.

        The argument is a tuple for the state in the database followed by
        the REST API status for the instance.

        If state is BUILDING this will assert that the REST API result is
        similar, or is FAILED (because the REST API is called after the
        call to the database the status might change in between).

        """
        if result[0].state == power_state.BUILDING:
            assert_true(
                result[1].status == dbaas_mapping[power_state.BUILDING] or
                result[1].status == dbaas_mapping[power_state.FAILED],
                "Result status from API should only be BUILDING or FAILED"
                " at this point but was %s" % result[1].status)
            return False
        else:
            # After building the only valid state is FAILED (because
            # we've destroyed the instance).
            assert_equal(result[0].state, power_state.FAILED)
            # Make sure the REST API agrees.
            assert_equal(result[1].status, dbaas_mapping[power_state.FAILED])
            return True
示例#12
0
def disable_plugin_manila(cluster_id, fuel_web_client):
    """Disable Manila plugin on cluster."""
    assert_true(
        fuel_web_client.check_plugin_exists(cluster_id, plugin_name),
        "Plugin couldn't be enabled. Check plugin version.")
    options = {'metadata/enabled': False}
    fuel_web_client.update_plugin_data(cluster_id, plugin_name, options)
示例#13
0
        def assert_download(sid):
            filename, audio = self.mm.download_song(sid)

            # TODO could use original filename to verify this
            # but, when manually checking, got modified title occasionally
            assert_true(filename.endswith('.mp3'))
            assert_is_not_none(audio)
示例#14
0
def instance_is_active(id):
    instance = instance_info.dbaas.instances.get(id)
    if instance.status == "ACTIVE":
        return True
    else:
        assert_true(instance.status in ['PROMOTE', 'EJECT', 'BUILD', 'BACKUP'])
        return False
    def ha_one_controller_neutron_node_deletion(self):
        """Remove compute from cluster in ha mode with neutron

         Scenario:
            1. Revert "deploy_ha_one_controller_neutron" environment
            2. Remove compute node
            3. Deploy changes
            4. Verify node returns to unallocated pull

        Duration 8m

        """
        self.env.revert_snapshot("deploy_ha_one_controller_neutron")

        cluster_id = self.fuel_web.get_last_created_cluster()
        nailgun_nodes = self.fuel_web.update_nodes(
            cluster_id, {'slave-02': ['compute']}, False, True)
        task = self.fuel_web.deploy_cluster(cluster_id)
        self.fuel_web.assert_task_success(task)
        nodes = [
            node for node in nailgun_nodes if node["pending_deletion"] is True]
        assert_true(
            len(nodes) == 1, "Verify 1 node has pending deletion status"
        )
        self.fuel_web.wait_node_is_discovered(nodes[0])
示例#16
0
 def test_node_tags_del(self):
     """ Testing DELETE:api/2.0/nodes/:id/tags/:tagName """
     get_codes = []
     del_codes = []
     Api().nodes_get_all()
     rsp = self.__client.last_response
     nodes = loads(rsp.data)
     get_codes.append(rsp)
     for n in nodes:
         for t in self.__test_tags.get('tags'):
             Api().nodes_del_tag_by_id(identifier=n.get('id'), tag_name=t)
             rsp = self.__client.last_response
             del_codes.append(rsp)
         Api().nodes_get_by_id(identifier=n.get('id'))
         rsp = self.__client.last_response
         get_codes.append(rsp)
         updated_node = loads(rsp.data)
         for t in self.__test_tags.get('tags'):
             assert_true(t not in updated_node.get('tags'), message= "Tag " + t + " was not deleted" )
     for c in get_codes:
         assert_equal(200, c.status, message=c.reason)
     for c in del_codes:
         assert_equal(204, c.status, message=c.reason)
       
     assert_raises(rest.ApiException, Api().nodes_del_tag_by_id, 'fooey',tag_name=['tag'])
示例#17
0
 def test_single_failure_is_presented(self):
     try:
         with Check() as check:
             check.equal(4, 6)
         fail("Expected an assertion!")
     except ASSERTION_ERROR as ae:
         assert_true("4 != 6" in str(ae), str(ae))
示例#18
0
文件: client.py 项目: NeCTAR-RC/trove
 def find_flavor_self_href(flavor):
     self_links = [link for link in flavor.links if link['rel'] == 'self']
     asserts.assert_true(len(self_links) > 0, "Flavor had no self href!")
     flavor_href = self_links[0]['href']
     asserts.assert_false(flavor_href is None,
                          "Flavor link self href missing.")
     return flavor_href
示例#19
0
 def test_root_now_enabled_details(self):
     """Use instance details to test that root is now enabled."""
     instance = self.dbaas.instances.get(instance_info.id)
     assert_true(hasattr(instance, "rootEnabled"), "Instance has no rootEnabled property.")
     assert_true(instance.rootEnabled, "Root SHOULD be enabled.")
     assert_not_equal(self.root_enabled_timestamp, "Never")
     self._verify_root_timestamp(instance_info.id)
    def dvs_install(self):
        """Check that plugin can be installed.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugin.
            3. Ensure that plugin is installed successfully using cli,
               run command 'fuel plugins'. Check name, version of plugin.

        Duration: 30 min

        """
        self.env.revert_snapshot("ready_with_1_slaves")

        self.show_step(1)
        self.show_step(2)
        plugin.install_dvs_plugin(
            self.env.d_env.get_admin_remote())

        cmd = 'fuel plugins list'

        output = list(self.env.d_env.get_admin_remote().execute(
            cmd)['stdout']).pop().split(' ')

        # check name
        assert_true(
            plugin.plugin_name in output,
            "Plugin  {} is not installed.".format(plugin.plugin_name)
        )
        # check version
        assert_true(
            plugin.DVS_PLUGIN_VERSION in output,
            "Plugin  {} is not installed.".format(plugin.plugin_name)
        )
        self.env.make_snapshot("dvs_install", is_make=True)
    def dvs_uninstall(self):
        """Check that plugin can be removed.

        Scenario:
            1. Revert to snapshot 'dvs_install'.
            2. Remove plugin.
            3. Verify that plugin is removed, run command 'fuel plugins'.


        Duration: 5 min

        """
        self.show_step(1)
        self.env.revert_snapshot("dvs_install")

        self.show_step(2)
        cmd = 'fuel plugins --remove {0}=={1}'.format(
            plugin.plugin_name, plugin.DVS_PLUGIN_VERSION)

        assert_true(
            self.env.d_env.get_admin_remote().execute(cmd)['exit_code'] == 0,
            'Can not remove plugin.')

        self.show_step(3)
        cmd = 'fuel plugins list'
        output = list(self.env.d_env.get_admin_remote().execute(
            cmd)['stdout']).pop().split(' ')

        assert_true(
            plugin.plugin_name not in output,
            "Plugin is not removed {}".format(plugin.plugin_name)
        )
示例#22
0
    def assign_floating_ip(self, srv, use_neutron=False):
        if use_neutron:
            #   Find external net id for tenant
            nets = self.neutron.list_networks()['networks']
            err_msg = "Active external network not found in nets:{}"
            ext_net_ids = [
                net['id'] for net in nets
                if net['router:external'] and net['status'] == "ACTIVE"]
            asserts.assert_true(ext_net_ids, err_msg.format(nets))
            net_id = ext_net_ids[0]
            #   Find instance port
            ports = self.neutron.list_ports(device_id=srv.id)['ports']
            err_msg = "Not found active ports for instance:{}"
            asserts.assert_true(ports, err_msg.format(srv.id))
            port = ports[0]
            #   Create floating IP
            body = {'floatingip': {'floating_network_id': net_id,
                                   'port_id': port['id']}}
            flip = self.neutron.create_floatingip(body)
            #   Wait active state for port
            port_id = flip['floatingip']['port_id']
            state = lambda: self.neutron.show_port(port_id)['port']['status']
            helpers.wait(lambda: state() == "ACTIVE")
            return flip['floatingip']

        fl_ips_pool = self.nova.floating_ip_pools.list()
        if fl_ips_pool:
            floating_ip = self.nova.floating_ips.create(
                pool=fl_ips_pool[0].name)
            self.nova.servers.add_floating_ip(srv, floating_ip)
            return floating_ip
示例#23
0
    def do_sync_time(self, ntps=None):
        # 0. 'ntps' can be filled by __init__() or outside the class
        self.ntps = ntps or self.ntps
        if not self.ntps:
            raise ValueError("No servers were provided to synchronize "
                             "the time in self.ntps")

        # 1. Set actual time on all nodes via 'ntpdate'
        [ntp.set_actual_time() for ntp in self.ntps]
        assert_true(self.is_synchronized, "Time on nodes was not set:"
                    " \n{0}".format(self.report_not_synchronized()))

        # 2. Restart NTPD service
        [ntp.stop() for ntp in self.ntps]
        [ntp.start() for ntp in self.ntps]

        # 3. Wait for established peers
        [ntp.wait_peer() for ntp in self.ntps]
        assert_true(self.is_connected, "Time on nodes was not synchronized:"
                    " \n{0}".format(self.report_not_connected()))

        # 4. Report time on nodes
        for ntp in self.ntps:
            logger.info("Time on '{0}' = {1}".format(ntp.node_name,
                                                     ntp.date()[0].rstrip()))
示例#24
0
 def setup(self):
     self.set_up()
     if USE_IP:
         self.connection.connect()
         asserts.assert_true(self.connection.is_connected(),
                             "Should be able to connect before resize.")
     self.user_was_deleted = False
示例#25
0
 def obtain_flavor_ids(self):
     old_id = self.instance.flavor['id']
     self.expected_old_flavor_id = old_id
     res = instance_info.dbaas.find_flavor_and_self_href(old_id)
     self.expected_dbaas_flavor, _ = res
     if EPHEMERAL_SUPPORT:
         flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name',
                                         'eph.rd-smaller')
     else:
         flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
                                         'm1.small')
     flavors = self.dbaas.find_flavors_by_name(flavor_name)
     asserts.assert_equal(len(flavors), 1,
                          "Number of flavors with name '%s' "
                          "found was '%d'." % (flavor_name,
                                               len(flavors)))
     flavor = flavors[0]
     self.old_dbaas_flavor = instance_info.dbaas_flavor
     instance_info.dbaas_flavor = flavor
     asserts.assert_true(flavor is not None,
                         "Flavor '%s' not found!" % flavor_name)
     flavor_href = self.dbaas.find_flavor_self_href(flavor)
     asserts.assert_true(flavor_href is not None,
                         "Flavor href '%s' not found!" % flavor_name)
     self.expected_new_flavor_id = flavor.id
示例#26
0
    def check_rados_daemon(self):
        """Check the radosgw daemon is started"""
        def radosgw_started(remote):
            return remote.check_call('pkill -0 radosgw')['exit_code'] == 0

        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            assert_true(radosgw_started(remote), 'radosgw daemon started')
def check_grafana_dashboards(grafana_url):

    login_key_xpath = '/html/body/div/div[2]/div/div/div[2]/form/div[2]/button'

    with ui_driver(grafana_url, "Grafana", login_key_xpath) as driver:
        login_page = pages.LoginPage(driver)
        login_page.is_login_page()
        home_page = login_page.login("grafana", "grafanapass")
        home_page.is_main_page()
        dashboard_names = {
            "Apache", "Cinder", "Elasticsearch", "Glance", "HAProxy", "Heat",
            "Hypervisor", "InfluxDB", "Keystone", "LMA self-monitoring",
            "Memcached", "MySQL", "Neutron", "Nova", "RabbitMQ", "System"
        }
        dashboard_names = {
            panel_name.lower() for panel_name in dashboard_names}
        available_dashboards_names = {
            dashboard.text.lower() for dashboard in home_page.dashboards}
        msg = ("There is not enough panels in available panels, "
               "panels that are not presented: {}")
        # NOTE(rpromyshlennikov): should there be 'elasticsearch'
        # and 'influxdb' dashboards?
        asserts.assert_true(
            dashboard_names.issubset(available_dashboards_names),
            msg.format(dashboard_names - available_dashboards_names))
        for name in available_dashboards_names:
            dashboard_page = home_page.open_dashboard(name)
            dashboard_page.get_back_to_home()
 def restart_rabbit_again(self):
     """Now stop and start rabbit, ensuring the agent reconnects."""
     self.rabbit.stop()
     assert_false(self.rabbit.is_alive)
     self.rabbit.reset()
     self.rabbit.start()
     assert_true(self.rabbit.is_alive)
示例#29
0
    def deploy_cluster(self, cluster_settings):
        slaves_count = len(cluster_settings['nodes'])
        slaves = self.env.d_env.nodes().slaves[:slaves_count]
        for chunk in [slaves[x:x + 5] for x in range(0, slaves_count, 5)]:
            self.env.bootstrap_nodes(chunk)
        cluster_id = self.fuel_web.create_cluster(
            name=cluster_settings['name'],
            mode=settings.DEPLOYMENT_MODE,
            settings=cluster_settings['settings']
        )
        if cluster_settings.get('plugin'):
            plugin_name = cluster_settings['plugin']['name']
            assert_true(
                self.fuel_web.check_plugin_exists(cluster_id, plugin_name))
            self.fuel_web.update_plugin_data(
                cluster_id, plugin_name, cluster_settings['plugin']['data'])

        self.fuel_web.update_nodes(cluster_id, cluster_settings['nodes'])
        self.fuel_web.verify_network(cluster_id)

        # Code for debugging on hosts with low IO
        # for chunk in [slaves[x:x+5] for x in range(0, slaves_count, 5)]:
        #     ids = [self.fuel_web.get_nailgun_node_by_devops_node(x)['id']
        #            for x in chunk]
        #     self.fuel_web.client.provision_nodes(cluster_id, ids)
        #     wait(lambda: all(
        #         [self.fuel_web.get_nailgun_node_by_devops_node(node)['status'
        #          ] == 'provisioned' for node in chunk]),
        #          timeout=30 * 60,
        #          interval=60)

        self.fuel_web.deploy_cluster_wait(cluster_id)
        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id)
示例#30
0
    def check_nodes_notifications(self):
        """Verify nailgun notifications for discovered nodes

        Scenario:
            1. Revert snapshot "ready_with_3_slaves"
            2. Verify hard drive sizes for discovered nodes in /api/nodes
            3. Verify hard drive sizes for discovered nodes in notifications

        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # assert /api/nodes
        disk_size = NODE_VOLUME_SIZE * 1024 ** 3
        nailgun_nodes = self.fuel_web.client.list_nodes()
        for node in nailgun_nodes:
            for disk in node['meta']['disks']:
                assert_equal(disk['size'], disk_size, 'Disk size')

        hdd_size = "{} TB HDD".format(float(disk_size * 3 / (10 ** 9)) / 1000)
        notifications = self.fuel_web.client.get_notifications()
        for node in nailgun_nodes:
            # assert /api/notifications
            for notification in notifications:
                discover = notification['topic'] == 'discover'
                current_node = notification['node_id'] == node['id']
                if current_node and discover:
                    assert_true(hdd_size in notification['message'])

            # assert disks
            disks = self.fuel_web.client.get_node_disks(node['id'])
            for disk in disks:
                assert_equal(disk['size'],
                             NODE_VOLUME_SIZE * 1024 - 500, 'Disk size')
示例#31
0
    def deploy_emc_ha(self):
        """Deploy cluster in ha mode with emc plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 3 nodes with controller role
            5. Add 2 nodes with compute role
            6. Deploy the cluster
            7. Run network verification
            8. Check plugin installation
            9. Run OSTF

        Duration 35m
        Snapshot deploy_ha_emc
        """
        self.env.revert_snapshot("ready_with_5_slaves")

        # copy plugin to the master node
        utils.upload_tarball(ip=self.ssh_manager.admin_ip,
                             tar_path=settings.EMC_PLUGIN_PATH,
                             tar_target='/var')

        # install plugin
        utils.install_plugin_check_code(ip=self.ssh_manager.admin_ip,
                                        plugin=os.path.basename(
                                            settings.EMC_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
        )

        attr = self.fuel_web.client.get_cluster_attributes(cluster_id)

        # check plugin installed and attributes have emc options

        for option in [
                "emc_sp_a_ip", "emc_sp_b_ip", "emc_username", "emc_password",
                "emc_pool_name"
        ]:
            asserts.assert_true(
                option in attr["editable"]["emc_vnx"],
                "{0} is not in cluster attributes: {1}".format(
                    option, str(attr["editable"]["storage"])))

        # disable LVM-based volumes

        attr["editable"]["storage"]["volumes_lvm"]["value"] = False

        # enable EMC plugin

        emc_options = attr["editable"]["emc_vnx"]
        emc_options["metadata"]["enabled"] = True
        emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP
        emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP
        emc_options["emc_username"]["value"] = settings.EMC_USERNAME
        emc_options["emc_password"]["value"] = settings.EMC_PASSWORD
        emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME

        self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
            })
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # get remotes for all nodes

        controller_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-01', 'slave-02', 'slave-03']
        ]
        compute_nodes = [
            self.fuel_web.get_nailgun_node_by_name(node)
            for node in ['slave-04', 'slave-05']
        ]

        controller_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in controller_nodes
        ]
        compute_remotes = [
            self.env.d_env.get_ssh_to_remote(node['ip'])
            for node in compute_nodes
        ]

        # check cinder-volume settings

        for node in controller_nodes:
            self.check_emc_cinder_config(ip=node['ip'],
                                         path='/etc/cinder/cinder.conf')
            self.check_emc_management_package(ip=node['ip'])

        # check cinder-volume layout on controllers

        cinder_volume_ctrls = [
            self.check_service(controller, "cinder-volume")
            for controller in controller_remotes
        ]
        asserts.assert_equal(
            sum(cinder_volume_ctrls), 1, "Cluster has more than one "
            "cinder-volume on controllers")

        # check cinder-volume layout on computes

        cinder_volume_comps = [
            self.check_service(compute, "cinder-volume")
            for compute in compute_remotes
        ]
        # closing connections
        for remote in controller_remotes:
            remote.clear()
        for remote in compute_remotes:
            remote.clear()

        asserts.assert_equal(sum(cinder_volume_comps), 0,
                             "Cluster has active cinder-volume on compute")

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_ha_emc")
示例#32
0
def validate_fix_apply_step(apply_step, environment, slaves):
    verify_fix_apply_step(apply_step)
    slaves = [] if not slaves else slaves
    command = ''
    remotes_ips = set()
    devops_action = ''
    devops_nodes = set()
    nodes_ids = set()

    if apply_step['type'] == 'run_tasks':
        remotes_ips.add(environment.get_admin_node_ip())
        assert_true(
            'master' not in apply_step['target'],
            "Action type 'run_tasks' accepts only slaves (roles) "
            "as target value, but 'master' is specified!")

        for target in apply_step['target']:
            if target == 'slaves':
                nodes_ids.update(get_slaves_ids_by_role(slaves, role=None))
            else:
                role = target.split('_role')[0]
                nodes_ids.update(get_slaves_ids_by_role(slaves, role=role))
    else:
        for target in apply_step['target']:
            if target == 'master':
                remotes_ips.add(environment.get_admin_node_ip())
                devops_nodes.add(environment.d_env.nodes().admin)
            elif target == 'slaves':
                remotes_ips.update(get_slaves_ips_by_role(slaves, role=None))
                devops_nodes.update(
                    get_devops_slaves_by_role(environment, slaves))
            else:
                role = target.split('_role')[0]
                remotes_ips.update(get_slaves_ips_by_role(slaves, role))
                devops_nodes.update(
                    get_devops_slaves_by_role(environment, slaves, role=role))
    if apply_step['type'] in ('service_stop', 'service_start',
                              'service_restart'):
        assert_true(
            len(apply_step['service'] or '') > 0,
            "Step #{0} in apply patch scenario perform '{1}', but "
            "service isn't specified".format(apply_step['id'],
                                             apply_step['type']))
        action = apply_step['type'].split('service_')[1]
        command = ("find /etc/init.d/ -regex '/etc/init.d/{service}' -printf "
                   "'%f\n' -quit | xargs -i service {{}} {action}").format(
                       service=apply_step['service'], action=action)
    elif apply_step['type'] in ('server_down', 'server_up', 'server_reboot'):
        assert_true(
            'master' not in apply_step['target'],
            'Action type "{0}" doesn\'t accept "master" node as '
            'target! Use action "run_command" instead.'.format(
                apply_step['type']))
        devops_action = apply_step['type'].split('server_')[1]
    elif apply_step['type'] == 'upload_script':
        assert_true(
            len(apply_step['script'] or '') > 0,
            "Step #{0} in apply patch scenario perform '{1}', but "
            "script isn't specified".format(apply_step['id'],
                                            apply_step['type']))
        assert_true(
            len(apply_step['upload_path'] or '') > 0,
            "Step #{0} in apply patch scenario perform '{1}', but "
            "upload path isn't specified".format(apply_step['id'],
                                                 apply_step['type']))
        command = ('UPLOAD', apply_step['script'], apply_step['upload_path'])
    elif apply_step['type'] == 'run_tasks':
        assert_true(
            len(apply_step['tasks'] or '') > 0,
            "Step #{0} in apply patch scenario perform '{1}', but "
            "tasks aren't specified".format(apply_step['id'],
                                            apply_step['type']))
        tasks_timeout = apply_step['tasks_timeout'] if 'tasks_timeout' in \
            apply_step.keys() else 60 * 30
        command = ['RUN_TASKS', nodes_ids, apply_step['tasks'], tasks_timeout]
    else:
        assert_true(
            len(apply_step['command'] or '') > 0,
            "Step #{0} in apply patch scenario perform '{1}', but "
            "command isn't specified".format(apply_step['id'],
                                             apply_step['type']))
        command = apply_step['command']
    # remotes sessions .clear() placed in run_actions()
    remotes = [environment.d_env.get_ssh_to_remote(ip) for ip in remotes_ips] \
        if command else []
    devops_nodes = devops_nodes if devops_action else []
    return command, remotes, devops_action, devops_nodes
示例#33
0
    def test_dpdk_check_public_connectivity_from_instance(self):
        """Check network connectivity from instance via floating IP.

        Scenario:
            1. Create no default network with subnet.
            2. Create Router_01, set gateway and add interface
               to external network.
            3. Get existing flavor with hpgs.
            4. Create a new security group (if it doesn`t exist yet).
            5. Launch an instance using the default image and flavor with hpgs
               in the hpgs availability zone.
            6. Create a new floating IP.
            7. Assign the new floating IP to the instance.
            8. Check connectivity to the floating IP using ping command.
            9. Check that public IP 8.8.8.8 can be pinged from instance.
            10. Delete instance.

        Duration 5 min

        """
        az_name = 'hpgs'
        ping_command = "ping -c 5 8.8.8.8"

        logger.info('Create no default network with subnet.')
        network = self.os_conn.create_network(
            network_name=self.net_name)['network']
        subnet = self.os_conn.create_subnet(subnet_name=self.net_name,
                                            network_id=network['id'],
                                            cidr=self.subnet_cidr,
                                            ip_version=4)

        logger.info("""Create Router_01, set gateway and add interface
               to external network.""")
        gateway = {
            "network_id": self.os_conn.get_network('admin_floating_net')['id'],
            "enable_snat": True
        }
        router_param = {
            'router': {
                'name': self.router_name,
                'external_gateway_info': gateway
            }
        }
        router = self.os_conn.neutron.create_router(
            body=router_param)['router']
        self.os_conn.add_router_interface(router_id=router["id"],
                                          subnet_id=subnet["id"])

        logger.info("Get existing flavor with hpgs.")
        flavor = [
            f for f in self.os_conn.nova.flavors.list() if az_name in f.name
        ][0]

        logger.info("""Launch an instance using the default image and flavor
            with hpgs in the hpgs availability zone.""")
        srv = self.os_conn.create_server_for_migration(
            neutron=True,
            availability_zone=az_name,
            label=self.net_name,
            flavor=flavor)

        logger.info("Create a new floating IP.")
        logger.info("Assign the new floating IP to the instance.")
        fip = self.os_conn.assign_floating_ip(srv).ip

        logger.info(
            "Check connectivity to the floating IP using ping command.")
        wait(lambda: tcp_ping(fip, 22),
             timeout=180,
             interval=5,
             timeout_msg="Node {0} is not accessible by SSH.".format(fip))

        logger.info(
            "Check that public IP 8.8.8.8 can be pinged from instance.")
        with SSH(fip) as remote:
            result = remote.execute(ping_command)
            assert_true(result['exit_code'] == 0, result['stderr'])

        logger.info("Delete instance.")
        self.os_conn.delete_instance(srv)
        wait(lambda: self.os_conn.is_srv_deleted(srv),
             timeout=200,
             timeout_msg="Instance was not deleted.")
示例#34
0
 def assert_true(cls, condition, message=None):
     asserts.assert_true(condition, message=message)
示例#35
0
    def test_pagination(self):
        users = []
        users.append({
            "name": "Jetson",
            "password": "******",
            "databases": [{
                "name": "Sprockets"
            }]
        })
        users.append({
            "name": "Jetson",
            "password": "******",
            "host": "127.0.0.1",
            "databases": [{
                "name": "Sprockets"
            }]
        })
        users.append({
            "name": "Spacely",
            "password": "******",
            "databases": [{
                "name": "Sprockets"
            }]
        })
        users.append({
            "name": "Spacely",
            "password": "******",
            "host": "127.0.0.1",
            "databases": [{
                "name": "Sprockets"
            }]
        })
        users.append({
            "name": "Uniblab",
            "password": "******",
            "databases": [{
                "name": "Sprockets"
            }]
        })
        users.append({
            "name": "Uniblab",
            "password": "******",
            "host": "192.168.0.10",
            "databases": [{
                "name": "Sprockets"
            }]
        })

        self.dbaas.users.create(instance_info.id, users)
        assert_equal(202, self.dbaas.last_http_code)
        if not FAKE:
            time.sleep(5)
        limit = 2
        users = self.dbaas.users.list(instance_info.id, limit=limit)
        assert_equal(200, self.dbaas.last_http_code)
        marker = users.next

        # Better get only as many as we asked for
        assert_true(len(users) <= limit)
        assert_true(users.next is not None)
        expected_marker = "%s@%s" % (users[-1].name, users[-1].host)
        expected_marker = urllib.quote(expected_marker)
        assert_equal(marker, expected_marker)
        marker = users.next

        # I better get new users if I use the marker I was handed.
        users = self.dbaas.users.list(instance_info.id,
                                      limit=limit,
                                      marker=marker)
        assert_equal(200, self.dbaas.last_http_code)
        assert_true(marker not in [user.name for user in users])

        # Now fetch again with a larger limit.
        users = self.dbaas.users.list(instance_info.id)
        assert_equal(200, self.dbaas.last_http_code)
        assert_true(users.next is None)
示例#36
0
    def load_ceph_partitions_cold_reboot(self):
        """Load ceph-osd partitions on 30% ~start rally~ reboot nodes

        Scenario:
            1. Revert snapshot 'load_ceph_ha'
            2. Wait until MySQL Galera is UP on some controller
            3. Check Ceph status
            4. Run ostf
            5. Fill ceph partitions on all nodes up to 30%
            6. Check Ceph status
            7. Run RALLY
            8. Cold restart all nodes
            9. Wait for HA services ready
            10. Wait until MySQL Galera is UP on some controller
            11. Run ostf

        Duration 30m
        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("load_ceph_ha")

        self.show_step(2)
        self.fuel_web.wait_mysql_galera_is_up(['slave-01'])
        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(3)
        self.fuel_web.check_ceph_status(cluster_id)

        self.show_step(4)
        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.show_step(5)
        for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:
            with self.fuel_web.get_ssh_for_node(node) as remote:
                file_name = "test_data"
                file_dir = remote.execute(
                    'mount | grep -m 1 ceph')['stdout'][0].split()[2]
                file_path = os.path.join(file_dir, file_name)
                result = remote.execute(
                    'fallocate -l 30G {0}'.format(file_path))['exit_code']
                assert_equal(
                    result, 0, "The file {0} was not "
                    "allocated".format(file_name))

        self.show_step(6)
        self.fuel_web.check_ceph_status(cluster_id)

        self.show_step(7)
        assert_true(settings.PATCHING_RUN_RALLY,
                    'PATCHING_RUN_RALLY was not set in true')
        rally_benchmarks = {}
        for tag in set(settings.RALLY_TAGS):
            rally_benchmarks[tag] = RallyBenchmarkTest(
                container_repo=settings.RALLY_DOCKER_REPO,
                environment=self.env,
                cluster_id=cluster_id,
                test_type=tag)
            rally_benchmarks[tag].run(result=False)

        self.show_step(8)
        self.fuel_web.cold_restart_nodes(
            self.env.d_env.get_nodes(name__in=[
                'slave-01', 'slave-02', 'slave-03', 'slave-04', 'slave-05'
            ]))

        for tag in rally_benchmarks:
            task_id = rally_benchmarks[tag].current_task.uuid
            rally_benchmarks[tag].current_task.abort(task_id)

        self.show_step(9)
        self.fuel_web.assert_ha_services_ready(cluster_id)

        self.fuel_web.assert_os_services_ready(cluster_id)

        self.show_step(10)
        self.fuel_web.wait_mysql_galera_is_up(['slave-01'])

        try:
            self.fuel_web.run_single_ostf_test(
                cluster_id,
                test_sets=['smoke'],
                test_name=map_ostf.OSTF_TEST_MAPPING.get(
                    'Create volume and attach it to instance'))
        except AssertionError:
            logger.debug("Test failed from first probe,"
                         " we sleep 180 seconds and try one more time "
                         "and if it fails again - test will fail ")
            time.sleep(180)
            self.fuel_web.run_single_ostf_test(
                cluster_id,
                test_sets=['smoke'],
                test_name=map_ostf.OSTF_TEST_MAPPING.get(
                    'Create volume and attach it to instance'))
        self.show_step(11)
        # LB 1519018
        self.fuel_web.run_ostf(cluster_id=cluster_id)
        self.env.make_snapshot("load_ceph_partitions_cold_reboot")
示例#37
0
    def deploy_sahara_ha_one_controller_tun(self):
        """Deploy cluster in ha mode with 1 controller Sahara and Neutron VXLAN

        Scenario:
            1. Create a Fuel cluster. Set the option for Sahara installation
            2. Add 1 node with "controller" role
            3. Add 1 node with "compute" role
            4. Deploy the Fuel cluster
            5. Verify Sahara service on controller
            6. Run all sanity and smoke tests
            7. Register Vanilla2 image for Sahara
            8. Run platform Vanilla2 test for Sahara

        Duration 65m
        Snapshot: deploy_sahara_ha_one_controller_tun
        """

        self.env.revert_snapshot("ready_with_3_slaves")

        logger.debug('Create Fuel cluster for Sahara tests')
        data = {
            'sahara': True,
            'net_provider': 'neutron',
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'tenant': 'saharaSimple',
            'user': '******',
            'password': '******'
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id),
            data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)

        logger.debug('Verify Sahara service on controller')
        _ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
        # count = 1 + api_workers (from sahara.conf)
        checkers.verify_service(_ip, service_name='sahara-api', count=2)
        # count = 2 * 1 (hardcoded by deployment team)
        checkers.verify_service(_ip, service_name='sahara-engine', count=2)

        logger.debug('Check MD5 sum of Vanilla2 image')
        check_image = checkers.check_image(
            settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
            settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5,
            settings.SERVTEST_LOCAL_PATH)
        asserts.assert_true(check_image)

        logger.debug('Run all sanity and smoke tests')
        path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
        test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates',
                      'HDPTwoTemplatesTest.test_hdp_two_templates']
        self.fuel_web.run_ostf(
            cluster_id=self.fuel_web.get_last_created_cluster(),
            tests_must_be_passed=[path_to_tests + test_name
                                  for test_name in test_names]
        )

        logger.debug('Import Vanilla2 image for Sahara')

        with open('{0}/{1}'.format(
                settings.SERVTEST_LOCAL_PATH,
                settings.SERVTEST_SAHARA_VANILLA_2_IMAGE)) as data:
            os_conn.create_image(
                name=settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_NAME,
                properties=settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_META,
                data=data,
                is_public=True,
                disk_format='qcow2',
                container_format='bare')

        path_to_tests = 'fuel_health.tests.tests_platform.test_sahara.'
        test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster']
        for test_name in test_names:
            logger.debug('Run platform test {0} for Sahara'.format(test_name))
            self.fuel_web.run_single_ostf_test(
                cluster_id=cluster_id, test_sets=['tests_platform'],
                test_name=path_to_tests + test_name, timeout=60 * 200)

        self.env.make_snapshot("deploy_sahara_ha_one_controller_tun")
示例#38
0
def _execute_query(host, user_name, password, query):
    print(host, user_name, password, query)
    with create_mysql_connection(host, user_name, password) as db:
        result = db.execute(query)
        return result
    assert_true(False, "something went wrong in the sql connection")
示例#39
0
 def mc_list_shared_playlist_entries(self):
     entries = self.mc.get_shared_playlist_contents(TEST_PLAYLIST_SHARETOKEN)
     assert_true(len(entries) > 0)
示例#40
0
def _test_configuration_is_applied_to_instance(instance, configuration_id):
    if CONFIG.fake_mode:
        raise SkipTest("configuration from sql does not work in fake mode")
    instance_test = instance_info.dbaas.instances.get(instance.id)
    assert_equal(configuration_id, instance_test.configuration['id'])
    if configuration_id:
        testconfig_info = instance_info.dbaas.configurations.get(
            configuration_id)
    else:
        testconfig_info = instance_info.dbaas.instance.configuration(
            instance.id)
        testconfig_info['configuration']
    conf_instances = instance_info.dbaas.configurations.instances(
        configuration_id)
    config_instance_ids = [inst.id for inst in conf_instances]
    assert_true(instance_test.id in config_instance_ids)
    cfg_names = testconfig_info.values.keys()

    host = _get_address(instance.id)
    for user in instance.users:
        username = user['name']
        password = user['password']
        concat_variables = "','".join(cfg_names)
        query = ("show variables where Variable_name "
                 "in ('%s');" % concat_variables)
        actual_values = _execute_query(host, username, password, query)
    print("actual_values %s" % actual_values)
    print("testconfig_info.values %s" % testconfig_info.values)
    assert_true(len(actual_values) == len(cfg_names))

    # check the configs exist
    attrcheck = AttrCheck()
    allowed_attrs = [actual_key for actual_key, actual_value in actual_values]
    attrcheck.contains_allowed_attrs(
        testconfig_info.values, allowed_attrs,
        msg="Configurations parameters")

    def _get_parameter_type(name):
        instance_info.dbaas.configuration_parameters.get_parameter(
            instance_info.dbaas_datastore,
            instance_info.dbaas_datastore_version,
            name)
        resp, body = instance_info.dbaas.client.last_response
        print(resp)
        print(body)
        return json.loads(body)['type']

    # check the config values are correct
    for key, value in actual_values:
        key_type = _get_parameter_type(key)
        # mysql returns 'ON' and 'OFF' for True and False respectively
        if value == 'ON':
            converted_key_value = (str(key), 1)
        elif value == 'OFF':
            converted_key_value = (str(key), 0)
        else:
            if key_type == 'integer':
                value = int(value)
            converted_key_value = (str(key), value)
        print("converted_key_value: %s" % str(converted_key_value))
        assert_true(converted_key_value in testconfig_info.values.items())
示例#41
0
    def test_log_rotation_one_week_11MB(self):
        """Logrotate with logrotate.conf for 1 week old file with size 11MB

        Scenario:
            1. Revert snapshot "empty"
            2. Check free disk space and free inodes under /var/log
            3. Generate 1 week old 11MB size file
            4. Run logrotate 2 times
            5. Check free disk space and free inodes

        Duration 30m

        """
        self.show_step(1, initialize=True)
        self.env.revert_snapshot("empty")
        with self.env.d_env.get_admin_remote() as remote:

            # get data before logrotate
            self.show_step(2)
            free = self.check_free_space(remote, return_as_is=True)

            free_inodes, i_suff = self.check_free_inodes(remote)
            logger.debug('Free inodes before file '
                         'creation: {0}{1}'.format(free_inodes, i_suff))
            # create 1 week old empty file

            self.create_old_file(remote, name='/var/log/messages')
            self.show_step(3)
            self.generate_file(remote,
                               size='11M',
                               path='/var/log/',
                               name='messages')

            free2 = self.check_free_space(remote, return_as_is=True)
            assert_true(
                free2 < free, 'File was not created. Free space '
                'before creation {0}, '
                'free space after '
                'creation {1}'.format(free, free2))
            self.show_step(4)
            self.execute_logrotate_cmd(remote)

            free3 = self.check_free_space(remote, return_as_is=True)
            logger.debug('Free space after first'
                         ' rotation {0}'.format(free3))
            res = self.execute_logrotate_cmd(remote, exit_code=1)

            # Expect 1 exit code here, according
            # to some rotated logs are skipped to rotate
            # second run. That's caused 1
            assert_equal(1, res['exit_code'])
            assert_equal(
                False, 'error' in res['stderr'],
                'Second run of logrotate failed'
                ' with {0}'.format(res['stderr']))
            self.show_step(5)
            free4 = self.check_free_space(remote, return_as_is=True)
            free_inodes4, i_suff4 = self.check_free_inodes(remote)
            logger.info('Free inodes  after logrotation:'
                        ' {0}{1}'.format(free_inodes4, i_suff4))

            assert_true(
                free4 > free2, 'Logs were not rotated. '
                'Rotate was executed 2 times. '
                'Free space after file creation: {0}, '
                'after rotation {1} free space before rotation'
                '{2}'.format(free2, free4, free))

            assert_equal(
                (free_inodes, i_suff), (free_inodes4, i_suff4),
                'Unexpected  free inodes count. Before log rotate was: {0}{1}'
                ' after logrotation: {2}{3}'.format(free_inodes, i_suff,
                                                    free_inodes4, i_suff4))
        self.env.make_snapshot("test_logrotate_one_week_11MB")
示例#42
0
    def test_configurations_get(self):
        # test that the instance shows up on the assigned configuration
        result = instance_info.dbaas.configurations.get(configuration_info.id)
        assert_equal(configuration_info.id, result.id)
        assert_equal(configuration_info.name, result.name)
        assert_equal(configuration_info.description, result.description)

        # check the result field types
        with TypeCheck("configuration", result) as check:
            check.has_field("id", six.string_types)
            check.has_field("name", six.string_types)
            check.has_field("description", six.string_types)
            check.has_field("values", dict)
            check.has_field("created", six.string_types)
            check.has_field("updated", six.string_types)
            check.has_field("instance_count", int)

        print(result.values)

        # check for valid timestamps
        assert_true(_is_valid_timestamp(result.created))
        assert_true(_is_valid_timestamp(result.updated))

        # check that created and updated timestamps differ, since
        # test_appending_to_existing_configuration should have changed the
        # updated timestamp
        if not CONFIG.fake_mode:
            assert_not_equal(result.created, result.updated)

        assert_equal(result.instance_count, 1)

        with CollectionCheck("configuration_values", result.values) as check:
            # check each item has the correct type according to the rules
            for (item_key, item_val) in result.values.items():
                print("item_key: %s" % item_key)
                print("item_val: %s" % item_val)
                dbaas = instance_info.dbaas
                param = dbaas.configuration_parameters.get_parameter(
                    instance_info.dbaas_datastore,
                    instance_info.dbaas_datastore_version,
                    item_key)
                if param.type == 'integer':
                    check.has_element(item_key, int)
                if param.type == 'string':
                    check.has_element(item_key, six.string_types)
                if param.type == 'boolean':
                    check.has_element(item_key, bool)

        # Test to make sure that another user is not able to GET this config
        reqs = Requirements(is_admin=False)
        test_auth_user = instance_info.user.auth_user
        other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user])
        other_user_tenant_id = other_user.tenant_id
        client_tenant_id = instance_info.user.tenant_id
        if other_user_tenant_id == client_tenant_id:
            other_user = CONFIG.users.find_user(
                reqs, black_list=[instance_info.user.auth_user,
                                  other_user])
        print(other_user)
        print(other_user.__dict__)
        other_client = create_dbaas_client(other_user)
        assert_raises(exceptions.NotFound, other_client.configurations.get,
                      configuration_info.id)
示例#43
0
    def deploy_murano_ha_with_gre(self):
        """Deploy cluster in ha mode with Murano and Neutron GRE

        Scenario:
            1. Create cluster. Set install Murano option
            2. Add 3 node with controller role
            3. Add 1 nodes with compute role
            4. Deploy the cluster
            5. Verify Murano services
            6. Run OSTF
            7. Register Murano image
            8. Run OSTF Murano platform tests

        Snapshot: deploy_murano_ha_with_gre

        """
        self.env.revert_snapshot("ready_with_5_slaves")

        LOGGER.debug('Check MD5 of image')
        check_image = checkers.check_image(
            settings.SERVTEST_MURANO_IMAGE,
            settings.SERVTEST_MURANO_IMAGE_MD5,
            settings.SERVTEST_LOCAL_PATH)
        asserts.assert_true(check_image, "Image verification failed")

        data = {
            'murano': True,
            'net_provider': 'neutron',
            'net_segment_type': 'gre',
            'tenant': 'muranoHA',
            'user': '******',
            'password': '******'
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings=data)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        cluster_vip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(
            cluster_vip, data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=13, networks_count=2, timeout=300)
        for slave in ["slave-01", "slave-02", "slave-03"]:
            checkers.verify_service(
                self.env.get_ssh_to_remote_by_name(slave),
                service_name='murano-api')

        common_func = Common(cluster_vip, data['user'], data['password'],
                             data['tenant'])

        LOGGER.debug('Run sanity and functional Murano OSTF tests')
        self.fuel_web.run_single_ostf_test(
            cluster_id=self.fuel_web.get_last_created_cluster(),
            test_sets=['sanity'],
            test_name=('fuel_health.tests.sanity.test_sanity_murano.'
                       'MuranoSanityTests.test_create_and_delete_service')
        )

        LOGGER.debug('Import Murano image')
        common_func.image_import(
            settings.SERVTEST_LOCAL_PATH,
            settings.SERVTEST_MURANO_IMAGE,
            settings.SERVTEST_MURANO_IMAGE_NAME,
            settings.SERVTEST_MURANO_IMAGE_META)

        LOGGER.debug('Boot instance with Murano image')

        image_name = settings.SERVTEST_MURANO_IMAGE_NAME
        srv = common_func.create_instance(flavor_name='test_murano_flavor',
                                          ram=2048, vcpus=1, disk=20,
                                          server_name='murano_instance',
                                          image_name=image_name,
                                          neutron_network=True)

        wait(lambda: common_func.get_instance_detail(srv).status == 'ACTIVE',
             timeout=60 * 60)

        common_func.delete_instance(srv)

        LOGGER.debug('Run OSTF platform tests')

        test_class_main = ('fuel_health.tests.platform_tests'
                           '.test_murano_linux.MuranoDeployLinuxServicesTests')
        tests_names = ['test_deploy_apache_service', ]

        test_classes = []

        for test_name in tests_names:
            test_classes.append('{0}.{1}'.format(test_class_main,
                                                 test_name))

        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(
                cluster_id=cluster_id, test_sets=['platform_tests'],
                test_name=test_name, timeout=60 * 36)

        self.env.make_snapshot("deploy_murano_ha_with_gre")
示例#44
0
    def deploy_influxdb_grafana_plugin(self):
        """Deploy a cluster with the InfluxDB-Grafana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with influxdb_grafana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_influxdb_grafana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        # copy plugin to the master node and install it
        with self.env.d_env.get_admin_remote() as remote:
            checkers.upload_tarball(
                remote, INFLUXDB_GRAFANA_PLUGIN_PATH, '/var')
            checkers.install_plugin_check_code(
                remote, plugin=os.path.basename(INFLUXDB_GRAFANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT_TYPE,
            }
        )

        plugin_name = 'influxdb_grafana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_influxdb_grafana',
            'influxdb_rootpass/value': 'lmapass',
            'influxdb_userpass/value': 'lmapass',
            'grafana_userpass/value': 'lmapass',
        }

        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            "Plugin couldn't be enabled. Check plugin version. Test aborted")

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['influxdb_grafana']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)

        influxdb_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        influxdb_server_ip = influxdb_server.get('ip')
        assert_is_not_none(influxdb_server_ip,
                           "Failed to get the IP of InfluxDB server")

        logger.debug("Check that InfluxDB is ready")

        influxdb_url = "http://{0}:8086/query?db=lma&u={1}&p={2}&" + \
            "q=show+measurements"
        r = requests.get(influxdb_url.format(
            influxdb_server_ip, 'lma', options['influxdb_userpass/value']))
        msg = "InfluxDB responded with {}, expected 200".format(r.status_code)
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the Grafana server is running")

        r = requests.get(
            "http://{0}:{1}@{2}:8000/api/org".format(
                'grafana', options['grafana_userpass/value'],
                influxdb_server_ip))
        msg = "Grafana server responded with {}, expected 200".format(
            r.status_code)
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_influxdb_grafana_plugin")
    def dvs_vcenter_bind_port(self):
        """Check abilities to bind port on DVS to VM, disable/enable this port.

        Scenario:
            1. Revert snapshot to dvs_vcenter_destructive_setup
            2. Create private networks net01 with sunet.
            3. Launch instances VM_1 and VM_2 in the net01
               with image TestVM and flavor m1.micro in nova az.
            4. Launch instances VM_3 and VM_4 in the net01
               with image TestVM-VMDK and flavor m1.micro in nova az.
            4. Bind sub_net port of instances.
            5. Check instances are not available.
            6. Enable sub_net port of all instances.
            7. Verify that instances should communicate between each other.
               Send icmp ping between instances.


        Duration: 1,5 hours

        """
        self.env.revert_snapshot("dvs_vcenter_systest_setup")

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Create new network
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        # create security group with rules for ssh and ping
        security_group = os_conn.create_sec_group_for_ssh()

        logger.info("Create non default network with subnet.")
        logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
        network = os_conn.create_network(
            network_name=self.net_data[0].keys()[0])['network']

        subnet = os_conn.create_subnet(
            subnet_name=network['name'],
            network_id=network['id'],
            cidr=self.net_data[0][self.net_data[0].keys()[0]])

        logger.info("Check that network are created.")
        assert_true(
            os_conn.get_network(network['name'])['id'] == network['id'])

        logger.info("Add net_1 to default router")
        router = os_conn.get_router(os_conn.get_network(self.ext_net_name))
        os_conn.add_router_interface(router_id=router["id"],
                                     subnet_id=subnet["id"])

        #  Launch instance VM_1 and VM_2
        instances = openstack.create_instances(
            os_conn=os_conn,
            nics=[{
                'net-id': network['id']
            }],
            vm_count=1,
            security_groups=[security_group.name])
        openstack.verify_instance_state(os_conn)

        ports = os_conn.neutron.list_ports()['ports']
        floating_ip = openstack.create_and_assign_floating_ips(
            os_conn, instances)
        instance_ports = []
        for instance in instances:
            instance_addr = os_conn.get_nova_instance_ip(
                instance, net_name=network['name'])
            for port in ports:
                port_addr = port['fixed_ips'][0]['ip_address']
                if instance_addr == port_addr:
                    instance_ports.append(port)
        for port in instance_ports:
            os_conn.neutron.update_port(port['id'],
                                        {'port': {
                                            'admin_state_up': False
                                        }})

        controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])
        with self.fuel_web.get_ssh_for_node(controller.name) as ssh_controller:
            # Verify that not connection to instances
            try:
                openstack.check_connection_vms(os_conn,
                                               floating_ip,
                                               remote=ssh_controller,
                                               command='pingv4')
            except Exception as e:
                logger.info(str(e))

            # Enable sub_net ports of instances
            for port in instance_ports:
                os_conn.neutron.update_port(port['id'],
                                            {'port': {
                                                'admin_state_up': True
                                            }})

                instance.reboot()
                wait(lambda: os_conn.get_instance_detail(instance).status ==
                     "ACTIVE",
                     timeout=300)

            time.sleep(60)  # need time after reboot to get ip by instance

            # Verify that instances should communicate between each other.
            # Send icmp ping between instances
            openstack.check_connection_vms(os_conn,
                                           floating_ip,
                                           remote=ssh_controller,
                                           command='pingv4')
示例#46
0
    def deploy_sahara_simple_gre(self):
        """Deploy cluster in simple mode with Sahara and Neutron GRE

        Scenario:
            1. Create cluster. Set install Sahara option
            2. Add 1 node with controller role
            3. Add 1 node with compute role
            4. Deploy the cluster
            5. Verify Sahara services
            6. Run OSTF
            7. Register Sahara image
            8. Run OSTF platform Sahara test only

        Snapshot: deploy_sahara_simple_gre

        """
        if settings.OPENSTACK_RELEASE == settings.OPENSTACK_RELEASE_REDHAT:
            raise SkipTest()

        LOGGER.debug('Check MD5 of image')
        check_image = checkers.check_image(
            settings.SERVTEST_SAHARA_IMAGE,
            settings.SERVTEST_SAHARA_IMAGE_MD5,
            settings.SERVTEST_LOCAL_PATH)
        asserts.assert_true(check_image)

        self.env.revert_snapshot("ready_with_3_slaves")
        LOGGER.debug('Create cluster for sahara tests')
        data = {
            'sahara': True,
            'net_provider': 'neutron',
            'net_segment_type': 'gre',
            'tenant': 'saharaSimple',
            'user': '******',
            'password': '******'
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute']
            }
        )

        self.fuel_web.deploy_cluster_wait(cluster_id)
        controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
        os_conn = os_actions.OpenStackActions(
            controller['ip'], data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=5, networks_count=2, timeout=300)

        checkers.verify_service(
            self.env.get_ssh_to_remote_by_name("slave-01"),
            service_name='sahara-all')

        common_func = Common(controller['ip'], data['user'], data['password'],
                             data['tenant'])

        test_classes = ['fuel_health.tests.sanity.test_sanity_sahara.'
                        'SanitySaharaTests.test_sanity_sahara']
        self.fuel_web.run_ostf(
            cluster_id=self.fuel_web.get_last_created_cluster(),
            tests_must_be_passed=test_classes
        )

        LOGGER.debug('Import image')
        common_func.image_import(
            settings.SERVTEST_LOCAL_PATH,
            settings.SERVTEST_SAHARA_IMAGE,
            settings.SERVTEST_SAHARA_IMAGE_NAME,
            settings.SERVTEST_SAHARA_IMAGE_META)

        common_func.goodbye_security()

        LOGGER.debug('Run OSTF Sahara platform test')

        self.fuel_web.run_single_ostf_test(
            cluster_id=cluster_id, test_sets=['platform_tests'],
            test_name=('fuel_health.tests.platform_tests.'
                       'test_sahara.PlatformSaharaTests.'
                       'test_platform_sahara'), timeout=60 * 200)

        self.env.make_snapshot("deploy_sahara_simple_gre")
    def mongo_mysql_partition_preservation(self):
        """Verify partition preservation of Ceilometer and mysql data.

        Scenario:
            1. Revert the snapshot
            2. Create a ceilometer alarm
            3. Mark 'mongo' and 'mysql' partitions to be
               preserved on one of controllers
            4. Reinstall the controller
            5. Verify that the alarm is present after the node reinstallation
            6. Verify IST has been received for the reinstalled controller
            7. Run network verification
            8. Run OSTF

        Duration: 110m

        """
        self.env.revert_snapshot("node_reinstallation_env")

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Create a ceilometer alarm
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            alarm_name = "test_alarm"
            res = remote.execute("source openrc; "
                                 "ceilometer alarm-threshold-create "
                                 "--name {0} "
                                 "-m {1} "
                                 "--threshold {2}".format(
                                     alarm_name, "cpu_util", "80.0"))
            assert_equal(0, res['exit_code'],
                         "Creating alarm via ceilometer CLI failed.")
            initial_alarms = remote.execute(
                "source openrc; ceilometer alarm-list")

        mongo_nailgun = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['mongo'])[0]

        # Mark 'mongo' and 'mysql' partitions to be preserved
        with self.env.d_env.get_admin_remote() as remote:
            PartitionPreservation._preserve_partition(remote,
                                                      mongo_nailgun['id'],
                                                      "mongo")
            PartitionPreservation._preserve_partition(remote,
                                                      mongo_nailgun['id'],
                                                      "mysql")

        NodeReinstallationEnv._reinstall_nodes(self.fuel_web, cluster_id,
                                               [str(mongo_nailgun['id'])])

        with self.fuel_web.get_ssh_for_nailgun_node(mongo_nailgun) as remote:
            alarms = remote.execute("source openrc; ceilometer alarm-list")
            assert_equal(
                initial_alarms['stdout'], alarms['stdout'],
                "{0} alarm is not available in mongo after reinstallation "
                "of the controllers".format(alarm_name))

            log_path = "/var/log/mysql/error.log"
            output = remote.execute(
                'grep "IST received" {0} | grep -v grep &>/dev/null '
                '&& echo "OK" || echo "FAIL"'.format(log_path))

        assert_true(
            'OK' in output['stdout'][0],
            "IST was not received after the {0} node "
            "reinstallation.".format(mongo_nailgun['hostname']))

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity'])
    def extended_tests_reset_vcenter(self, openstack_ip):
        """Common verification of dvs_reboot_vcenter* test cases.

        :param openstack_ip: type string, openstack ip
        """
        admin = os_actions.OpenStackActions(openstack_ip, SERVTEST_USERNAME,
                                            SERVTEST_PASSWORD, SERVTEST_TENANT)

        # create security group with rules for ssh and ping
        security_group = admin.create_sec_group_for_ssh()

        default_sg = [
            sg
            for sg in admin.neutron.list_security_groups()['security_groups']
            if sg['tenant_id'] == admin.get_tenant(SERVTEST_TENANT).id
            if sg['name'] == 'default'
        ][0]

        network = admin.nova.networks.find(label=self.inter_net_name)

        # create access point server
        access_point, access_point_ip = openstack.create_access_point(
            os_conn=admin,
            nics=[{
                'net-id': network.id
            }],
            security_groups=[security_group.name, default_sg['name']])

        self.show_step(13)
        self.show_step(14)
        instances = openstack.create_instances(
            os_conn=admin,
            nics=[{
                'net-id': network.id
            }],
            vm_count=1,
            security_groups=[default_sg['name']])
        openstack.verify_instance_state(admin)

        # Get private ips of instances
        ips = []
        for instance in instances:
            ips.append(
                admin.get_nova_instance_ip(instance,
                                           net_name=self.inter_net_name))
        time.sleep(30)
        self.show_step(15)
        for ip in ips:
            ping_result = openstack.remote_execute_command(
                access_point_ip, ip, "ping -c 5 {}".format(ip))
            assert_true(ping_result['exit_code'] == 0,
                        "Ping isn't available from {0} to {1}".format(ip, ip))

        self.show_step(16)
        vcenter_name = [
            name for name in self.WORKSTATION_NODES if 'vcenter' in name
        ].pop()
        node = vmrun.Vmrun(self.host_type,
                           self.path_to_vmx_file.format(vcenter_name),
                           host_name=self.host_name,
                           username=self.WORKSTATION_USERNAME,
                           password=self.WORKSTATION_PASSWORD)
        node.reset()

        self.show_step(17)
        wait(lambda: not icmp_ping(self.VCENTER_IP),
             interval=1,
             timeout=10,
             timeout_msg='Vcenter is still availabled.')

        self.show_step(18)
        wait(lambda: icmp_ping(self.VCENTER_IP),
             interval=5,
             timeout=120,
             timeout_msg='Vcenter is not availabled.')

        self.show_step(20)
        for ip in ips:
            ping_result = openstack.remote_execute_command(
                access_point_ip, ip, "ping -c 5 {}".format(ip))
            assert_true(ping_result['exit_code'] == 0,
                        "Ping isn't available from {0} to {1}".format(ip, ip))
示例#49
0
    def deploy_heat_ha(self):
        """Deploy Heat cluster in HA mode

        Scenario:
            1. Create cluster
            2. Add 3 node with controller role
            3. Add 1 nodes with compute role
            4. Deploy the cluster
            5. Verify heat services
            6. Run OSTF
            7. Register heat image
            8. Run OSTF platform tests

        Snapshot: deploy_heat_ha

        """

        self.env.revert_snapshot("ready_with_5_slaves")

        LOGGER.debug('Check MD5 of image')
        check_image = checkers.check_image(
            settings.SERVTEST_HEAT_IMAGE,
            settings.SERVTEST_HEAT_IMAGE_MD5,
            settings.SERVTEST_LOCAL_PATH)
        asserts.assert_true(check_image, "Image verification failed")

        data = {
            'net_provider': 'neutron',
            'net_segment_type': 'gre',
            'tenant': 'heatSimple',
            'user': '******',
            'password': '******'
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings=data)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)
        cluster_vip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(
            cluster_vip, data['user'], data['password'], data['tenant'])
        self.fuel_web.assert_cluster_ready(
            os_conn, smiles_count=13, networks_count=2, timeout=300)

        for slave in ["slave-01", "slave-02", "slave-03"]:
            checkers.verify_service(
                self.env.get_ssh_to_remote_by_name(slave),
                service_name='heat-api', count=3)

        common_func = Common(cluster_vip,
                             data['user'],
                             data['password'],
                             data['tenant'])

        LOGGER.debug('Import Heat image')
        common_func.image_import(
            settings.SERVTEST_LOCAL_PATH,
            settings.SERVTEST_HEAT_IMAGE,
            settings.SERVTEST_HEAT_IMAGE_NAME,
            settings.SERVTEST_HEAT_IMAGE_META)

        LOGGER.debug('Run Heat OSTF platform tests')

        test_class_main = ('fuel_health.tests.platform_tests.'
                           'test_heat.'
                           'HeatSmokeTests')
        tests_names = ['test_actions',
                       'test_rollback']

        test_classes = []

        for test_name in tests_names:
            test_classes.append('{0}.{1}'.format(test_class_main,
                                                 test_name))

        for test_name in test_classes:
            self.fuel_web.run_single_ostf_test(
                cluster_id=cluster_id, test_sets=['platform_tests'],
                test_name=test_name, timeout=60 * 60)

        self.env.make_snapshot("deploy_heat_ha")
    def compute_stop_reinstallation(self):
        """Verify stop reinstallation of compute.

        Scenario:
            1. Revert the snapshot
            2. Create an OS volume and OS instance
            3. Mark 'cinder' and 'vm' partitions to be preserved
            4. Stop reinstallation process of compute
            5. Start the reinstallation process again
            6. Run network verification
            7. Run OSTF
            8. Verify that the volume is present and has 'available' status
               after the node reinstallation
            9. Verify that the VM is available and pingable
               after the node reinstallation

        Duration: 115m

        """
        self.env.revert_snapshot("node_reinstallation_env")

        cluster_id = self.fuel_web.get_last_created_cluster()

        # Create an OS volume
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))

        volume = os_conn.create_volume()

        # Create an OS instance
        cmp_host = os_conn.get_hypervisors()[0]

        net_label = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']

        vm = os_conn.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(cmp_host.hypervisor_hostname),
            label=net_label)
        vm_floating_ip = os_conn.assign_floating_ip(vm)
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(vm_floating_ip.ip, 22),
            timeout=120)

        cmp_nailgun = self.fuel_web.get_nailgun_node_by_fqdn(
            cmp_host.hypervisor_hostname)

        # Mark 'cinder' and 'vm' partitions to be preserved
        with self.env.d_env.get_admin_remote() as remote:
            PartitionPreservation._preserve_partition(remote,
                                                      cmp_nailgun['id'],
                                                      "cinder")
            PartitionPreservation._preserve_partition(remote,
                                                      cmp_nailgun['id'], "vm")

        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
        devops_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
            slave_nodes)

        logger.info('Stop reinstallation process')
        self._stop_reinstallation(self.fuel_web, cluster_id,
                                  [str(cmp_nailgun['id'])], devops_nodes)

        self.fuel_web.run_network_verify(cluster_id)
        logger.info('Start the reinstallation process again')
        NodeReinstallationEnv._reinstall_nodes(self.fuel_web, cluster_id,
                                               [str(cmp_nailgun['id'])])

        self.fuel_web.verify_network(cluster_id)
        self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity'])

        # Verify that the created volume is still available
        try:
            volume = os_conn.cinder.volumes.get(volume.id)
        except NotFound:
            raise AssertionError(
                "{0} volume is not available after its {1} hosting node "
                "reinstallation".format(volume.id, cmp_nailgun['fqdn']))
        expected_status = "available"
        assert_equal(
            expected_status, volume.status,
            "{0} volume status is {1} after its {2} hosting node "
            "reinstallation. Expected status is {3}.".format(
                volume.id, volume.status, cmp_nailgun['fqdn'],
                expected_status))

        # Verify that the VM is still available
        try:
            os_conn.verify_instance_status(vm, 'ACTIVE')
        except AssertionError:
            raise AssertionError(
                "{0} VM is not available after its {1} hosting node "
                "reinstallation".format(vm.name, cmp_host.hypervisor_hostname))
        assert_true(
            devops_helpers.tcp_ping(vm_floating_ip.ip, 22),
            "{0} VM is not accessible via its {1} floating "
            "ip".format(vm.name, vm_floating_ip))
示例#51
0
    def cli_node_deletion_check(self):
        """Destroy node and remove it from Nailgun using Fuel CLI

        Scenario:
            1. Revert snapshot 'cli_selected_nodes_deploy'
            2. Check 'slave-03' is present
            3. Destroy 'slave-03'
            4. Wait until 'slave-03' become offline
            5. Delete offline 'slave-03' from db
            6. Check presence of 'slave-03'

        Duration 30m

        """
        self.env.revert_snapshot("cli_selected_nodes_deploy")

        with self.env.d_env.get_admin_remote() as remote:
            node_id = self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[2])['id']

            assert_true(check_cobbler_node_exists(remote, node_id),
                        "node-{0} is not found".format(node_id))
        self.env.d_env.nodes().slaves[2].destroy()
        try:
            wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
                self.env.d_env.nodes().slaves[2])['online'],
                 timeout=60 * 6)
        except TimeoutError:
            raise
        with self.env.d_env.get_admin_remote() as remote:
            res = remote.execute(
                'fuel node --node-id {0} --delete-from-db'.format(node_id))
        assert_true(
            res['exit_code'] == 0, "Offline node-{0} was not"
            "deleted from database".format(node_id))

        with self.env.d_env.get_admin_remote() as remote:
            try:
                wait(lambda: not remote.execute(
                    "fuel node | awk '{{print $1}}' | grep -w '{0}'".format(
                        node_id))['exit_code'] == 0,
                     timeout=60 * 4)
            except TimeoutError:
                raise TimeoutError(
                    "After deletion node-{0} is found in fuel list".format(
                        node_id))

        with self.env.d_env.get_admin_remote() as remote:
            is_cobbler_node_exists = check_cobbler_node_exists(remote, node_id)

        assert_false(
            is_cobbler_node_exists,
            "After deletion node-{0} is found in cobbler list".format(node_id))

        with self.env.d_env.get_admin_remote() as remote:
            cluster_id = ''.join(
                remote.execute("fuel env | tail -n 1 | awk {'print $1'}")
                ['stdout']).rstrip()

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'],
                               should_fail=1)
示例#52
0
    def deploy_ha_neutron(self):
        """Deploy cluster in HA mode, Neutron with GRE segmentation

        Scenario:
            1. Create cluster. HA, Neutron with GRE segmentation
            2. Add 3 nodes with controller roles
            3. Add 2 nodes with compute roles
            4. Add 1 node with cinder role
            5. Deploy the cluster
            6. Destroy controller with running l3 agent
            7. Wait for OFFLINE of the controller at fuel UI
            8. Run instance connectivity OSTF tests

        Snapshot deploy_ha_neutron

        """
        self.env.revert_snapshot("ready")
        self.env.bootstrap_nodes(self.env.nodes().slaves[:6])

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE_HA,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": 'gre'
            }
        )
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['cinder']
            }
        )
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # Look for controller with l3 agent
        ret = self.fuel_web.get_pacemaker_status(
            self.env.nodes().slaves[0].name)
        logger.debug('pacemaker state before fail is {0}'.format(ret))
        fqdn = re.search(
            'p_neutron-l3-agent\s+\(ocf::mirantis:neutron-agent-l3\):\s+'
            'Started (node-\d+)', ret).group(1)
        logger.debug('fdqn before fail is {0}'.format(fqdn))
        devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn(
            fqdn, self.env.nodes().slaves)
        logger.debug('devops node name before fail is {0}'.format(
            devops_node.name))
        # Destroy it and wait for OFFLINE status at fuel UI
        devops_node.destroy()
        # sleep max(op monitor interval)
        time.sleep(60 * 2)
        wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(
            devops_node)['online'])

        remains_online_nodes = \
            [node for node in self.env.nodes().slaves[0:3]
             if self.fuel_web.get_nailgun_node_by_devops_node(node)['online']]
        logger.debug('Online nodes are {0}'.format(
            [node.name for node in remains_online_nodes]))
        # Look for controller with l3 agent one more time
        ret = self.fuel_web.get_pacemaker_status(remains_online_nodes[0].name)
        fqdn = re.search(
            'p_neutron-l3-agent\s+\(ocf::mirantis:neutron-agent-l3\):\s+'
            'Started (node-\d+)', ret).group(1)

        logger.debug('fqdn with l3 after fail is {0}'.format(fqdn))

        devops_node = self.fuel_web.find_devops_node_by_nailgun_fqdn(
            fqdn, self.env.nodes().slaves)

        logger.debug('Devops node with recovered l3 is {0}'.format(
            devops_node.name))

        asserts.assert_true(
            devops_node.name in [node.name for node in remains_online_nodes])

        cluster_id = self.fuel_web.client.get_cluster_id(
            self.__class__.__name__)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['ha', 'smoke', 'sanity'],
            should_fail=1,
            failed_test_name=['Check that required services are running'])
示例#53
0
def assert_contains(exception_message, substrings):
    for substring in substrings:
        assert_true(substring in exception_message,
                    message="'%s' not in '%s'" %
                    (substring, exception_message))
示例#54
0
    def separate_rabbit_service(self):
        """Deploy cluster with 3 separate rabbit roles

        Scenario:
            1. Create cluster
            2. Add 3 nodes with controller role
            3. Add 3 nodes with rabbit role
            4. Add 1 compute and cinder
            5. Verify networks
            6. Deploy the cluster
            7. Verify networks
            8. Run OSTF

        Duration 120m
        Snapshot separate_rabbit_service
        """
        self.check_run("separate_rabbit_service")
        checkers.check_plugin_path_env(
            var_name='SEPARATE_SERVICE_RABBIT_PLUGIN_PATH',
            plugin_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH
        )

        self.env.revert_snapshot("ready_with_9_slaves")

        # copy plugins to the master node

        utils.upload_tarball(
            ip=self.ssh_manager.admin_ip,
            tar_path=settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH,
            tar_target="/var")

        # install plugins

        utils.install_plugin_check_code(
            ip=self.ssh_manager.admin_ip,
            plugin=os.path.basename(
                settings.SEPARATE_SERVICE_RABBIT_PLUGIN_PATH))

        data = {
            'tenant': 'separaterabbit',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['vlan'],
        }

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings=data)

        plugin_name = 'detach-rabbitmq'
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        assert_true(
            self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
            msg)
        options = {'metadata/enabled': True}
        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['standalone-rabbitmq'],
                'slave-05': ['standalone-rabbitmq'],
                'slave-06': ['standalone-rabbitmq'],
                'slave-07': ['compute'],
                'slave-08': ['cinder']
            }
        )

        self.fuel_web.verify_network(cluster_id)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.fuel_web.verify_network(cluster_id)

        self.fuel_web.run_ostf(
            cluster_id=cluster_id)

        self.env.make_snapshot("separate_rabbit_service", is_make=True)
示例#55
0
    def ceph_rados_gw(self):
        """Deploy ceph HA with RadosGW for objects

        Scenario:
            1. Create cluster with Neutron
            2. Add 3 nodes with controller role
            3. Add 3 nodes with compute and ceph-osd role
            4. Deploy the cluster
            5. Check ceph status
            6. Run OSTF tests
            7. Check the radosgw daemon is started

        Duration 90m
        Snapshot ceph_rados_gw

        """
        def radosgw_started(remote):
            return remote.check_call('pkill -0 radosgw')['exit_code'] == 0

        self.env.revert_snapshot("ready")
        self.env.bootstrap_nodes(self.env.d_env.nodes().slaves[:6])

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                'volumes_lvm': False,
                'volumes_ceph': True,
                'images_ceph': True,
                'objects_ceph': True,
                'tenant': 'rados',
                'user': '******',
                'password': '******'
            })
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd'],
                'slave-06': ['compute', 'ceph-osd']
            })
        self.fuel_web.verify_network(cluster_id)
        # Deploy cluster
        self.fuel_web.deploy_cluster_wait(cluster_id)

        # Network verification
        self.fuel_web.verify_network(cluster_id)

        # HAProxy backend checking
        controller_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['controller'])

        for node in controller_nodes:
            remote = self.env.d_env.get_ssh_to_remote(node['ip'])
            logger.info("Check all HAProxy backends on {}".format(
                node['meta']['system']['fqdn']))
            haproxy_status = checkers.check_haproxy_backend(remote)
            remote.clear()
            assert_equal(
                haproxy_status['exit_code'], 1,
                "HAProxy backends are DOWN. {0}".format(haproxy_status))

        self.fuel_web.check_ceph_status(cluster_id)

        # Run ostf
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        # Check the radosgw daemon is started
        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            assert_true(radosgw_started(remote), 'radosgw daemon started')

        self.env.make_snapshot("ceph_rados_gw")
示例#56
0
    def deploy_elasticsearch_kibana_plugin(self):
        """Deploy a cluster with the Elasticsearch-Kibana plugin

        Scenario:
            1. Upload plugin to the master node
            2. Install plugin
            3. Create cluster
            4. Add 1 node with controller role
            5. Add 1 node with compute role
            6. Add 1 node with elasticsearch_kibana role
            7. Deploy the cluster
            8. Check that plugin is working
            9. Run OSTF

        Duration 60m
        Snapshot deploy_elasticsearch_kibana_plugin
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        with self.env.d_env.get_admin_remote() as remote:
            # copy plugin to the master node
            checkers.upload_tarball(remote, ELASTICSEARCH_KIBANA_PLUGIN_PATH,
                                    '/var')

            # install plugin
            checkers.install_plugin_check_code(
                remote,
                plugin=os.path.basename(ELASTICSEARCH_KIBANA_PLUGIN_PATH))

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
        )

        plugin_name = 'elasticsearch_kibana'
        options = {
            'metadata/enabled': True,
            'node_name/value': 'slave-03_elasticsearch_kibana'
        }
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"

        assert_true(self.fuel_web.check_plugin_exists(cluster_id, plugin_name),
                    msg)

        self.fuel_web.update_plugin_data(cluster_id, plugin_name, options)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['compute'],
                'slave-03': ['elasticsearch_kibana']
            })

        self.fuel_web.deploy_cluster_wait(cluster_id)

        es_server = self.fuel_web.get_nailgun_node_by_name('slave-03')
        es_server_ip = es_server.get('ip')
        assert_is_not_none(es_server_ip,
                           "Failed to get the IP of Elasticsearch server")

        logger.debug("Check that Elasticsearch is ready")

        r = requests.get("http://{}:9200/".format(es_server_ip))
        msg = "Elasticsearch responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        logger.debug("Check that the HTTP server is running")

        r = requests.get("http://{}/".format(es_server_ip))
        msg = "HTTP server responded with {}".format(r.status_code)
        msg += ", expected 200"
        assert_equal(r.status_code, 200, msg)

        self.fuel_web.run_ostf(cluster_id=cluster_id)

        self.env.make_snapshot("deploy_elasticsearch_kibana_plugin")
 def check_plugin_v4_is_installed(self):
     plugin_name = 'fuel_plugin_example_v4_hotpluggable'
     msg = "Plugin couldn't be enabled. Check plugin version."
     assert_true(
         self.fuel_web.check_plugin_exists(self.cluster_id, plugin_name),
         msg)
示例#58
0
    def migrate_vm_backed_with_ceph(self):
        """Check VM backed with ceph migration in ha mode with 1 controller

        Scenario:
            1. Create cluster
            2. Add 1 node with controller and ceph OSD roles
            3. Add 2 nodes with compute and ceph OSD roles
            4. Deploy the cluster
            5. Check ceph status
            6. Run OSTF
            7. Create a new VM, assign floating ip
            8. Migrate VM
            9. Check cluster and server state after migration
            10. Terminate VM
            11. Check that DHCP lease is not offered for MAC of deleted VM
            12. Create a new VM for migration, assign floating ip
            13. Create a volume and attach it to the VM
            14. Create filesystem on the new volume and mount it to the VM
            15. Migrate VM
            16. Mount the volume after migration
            17. Check cluster and server state after migration
            18. Terminate VM

        Duration 35m
        Snapshot vm_backed_with_ceph_live_migration
        """
        self.env.revert_snapshot("ready_with_3_slaves")

        self.show_step(1, initialize=True)

        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=settings.DEPLOYMENT_MODE,
            settings={
                'volumes_ceph': True,
                'images_ceph': True,
                'ephemeral_ceph': True,
                'volumes_lvm': False,
            })

        self.show_step(2)
        self.show_step(3)

        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute', 'ceph-osd']
            })
        creds = ("cirros", "test")

        self.show_step(4)

        # Cluster deploy
        self.fuel_web.deploy_cluster_wait(cluster_id)

        def _check():
            # Run volume test several times with hope that it pass
            test_path = map_ostf.OSTF_TEST_MAPPING.get(
                'Create volume and attach it to instance')
            logger.debug('Start to run test {0}'.format(test_path))
            self.fuel_web.run_single_ostf_test(cluster_id,
                                               test_sets=['smoke'],
                                               test_name=test_path)

        self.show_step(5)
        try:
            _check()
        except AssertionError:
            logger.debug(AssertionError)
            logger.debug("Test failed from first probe,"
                         " we sleep 60 second try one more time "
                         "and if it fails again - test will fails ")
            time.sleep(60)
            _check()

        self.show_step(6)

        # Run ostf
        self.fuel_web.run_ostf(cluster_id)

        self.show_step(7)

        # Create new server
        os = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']

        logger.info("Create new server")
        srv = os.create_server_for_migration(
            neutron=True,
            scenario='./fuelweb_test/helpers/instance_initial_scenario',
            label=net_name)
        logger.info("Srv is currently in status: %s" % srv.status)

        # Prepare to DHCP leases checks
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        srv_instance_ip = os.get_nova_instance_ip(srv, net_name=net_name)
        srv_host_name = self.fuel_web.find_devops_node_by_nailgun_fqdn(
            os.get_srv_hypervisor_name(srv),
            self.env.d_env.nodes().slaves[:3]).name
        net_id = os.get_network(net_name)['id']
        ports = os.get_neutron_dhcp_ports(net_id)
        dhcp_server_ip = ports[0]['fixed_ips'][0]['ip_address']
        with self.fuel_web.get_ssh_for_node(srv_host_name) as srv_remote_node:
            srv_instance_mac = os.get_instance_mac(srv_remote_node, srv)

        logger.info("Assigning floating ip to server")
        floating_ip = os.assign_floating_ip(srv)
        srv_host = os.get_srv_host_name(srv)
        logger.info("Server is on host %s" % srv_host)

        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            md5before = os.get_md5sum("/home/test_file", remote,
                                      floating_ip.ip, creds)

        self.show_step(8)

        logger.info("Get available computes")
        avail_hosts = os.get_hosts_for_migr(srv_host)

        logger.info("Migrating server")
        new_srv = os.migrate_server(srv, avail_hosts[0], timeout=200)
        logger.info("Check cluster and server state after migration")

        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            md5after = os.get_md5sum("/home/test_file", remote, floating_ip.ip,
                                     creds)

        assert_true(
            md5after in md5before, "Md5 checksums don`t match."
            "Before migration md5 was equal to: {bef}"
            "Now it equals: {aft}".format(bef=md5before, aft=md5after))

        self.show_step(9)

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            res = os.execute_through_host(
                remote, floating_ip.ip,
                "ping -q -c3 -w10 {0} | grep 'received' |"
                " grep -v '0 packets received'".format(
                    settings.PUBLIC_TEST_IP), creds)
        logger.info("Ping {0} result on vm is: {1}".format(
            settings.PUBLIC_TEST_IP, res['stdout']))

        logger.info("Check Ceph health is ok after migration")
        self.fuel_web.check_ceph_status(cluster_id)

        logger.info("Server is now on host %s" % os.get_srv_host_name(new_srv))

        self.show_step(10)

        logger.info("Terminate migrated server")
        os.delete_instance(new_srv)
        assert_true(os.verify_srv_deleted(new_srv),
                    "Verify server was deleted")

        self.show_step(11)
        # Check if the dhcp lease for instance still remains
        # on the previous compute node. Related Bug: #1391010
        with self.fuel_web.get_ssh_for_node('slave-01') as remote:
            dhcp_port_tag = ovs_get_tag_by_port(remote, ports[0]['id'])
            assert_false(
                checkers.check_neutron_dhcp_lease(remote, srv_instance_ip,
                                                  srv_instance_mac,
                                                  dhcp_server_ip,
                                                  dhcp_port_tag),
                "Instance has been deleted, but it's DHCP lease "
                "for IP:{0} with MAC:{1} still offers by Neutron DHCP"
                " agent.".format(srv_instance_ip, srv_instance_mac))
        self.show_step(12)
        # Create a new server
        logger.info("Create a new server for migration with volume")
        srv = os.create_server_for_migration(
            neutron=True,
            scenario='./fuelweb_test/helpers/instance_initial_scenario',
            label=net_name)
        logger.info("Srv is currently in status: %s" % srv.status)

        logger.info("Assigning floating ip to server")
        floating_ip = os.assign_floating_ip(srv)
        srv_host = os.get_srv_host_name(srv)
        logger.info("Server is on host %s" % srv_host)

        self.show_step(13)
        logger.info("Create volume")
        vol = os.create_volume()
        logger.info("Attach volume to server")
        os.attach_volume(vol, srv)

        self.show_step(14)
        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)
        logger.info("Create filesystem and mount volume")

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            os.execute_through_host(remote, floating_ip.ip,
                                    'sudo sh /home/mount_volume.sh', creds)

            os.execute_through_host(remote, floating_ip.ip,
                                    'sudo touch /mnt/file-on-volume', creds)

        self.show_step(15)
        logger.info("Get available computes")
        avail_hosts = os.get_hosts_for_migr(srv_host)

        logger.info("Migrating server")
        new_srv = os.migrate_server(srv, avail_hosts[0], timeout=120)

        logger.info("Check cluster and server state after migration")
        wait(lambda: tcp_ping(floating_ip.ip, 22), timeout=120)

        self.show_step(16)
        logger.info("Mount volume after migration")
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            out = os.execute_through_host(remote, floating_ip.ip,
                                          'sudo mount /dev/vdb /mnt', creds)

        logger.info("out of mounting volume is: %s" % out['stdout'])

        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            out = os.execute_through_host(remote, floating_ip.ip,
                                          "sudo ls /mnt", creds)
        assert_true("file-on-volume" in out['stdout'],
                    "File is absent in /mnt")

        self.show_step(17)
        logger.info("Check Ceph health is ok after migration")
        self.fuel_web.check_ceph_status(cluster_id)

        logger.info("Server is now on host %s" % os.get_srv_host_name(new_srv))

        self.show_step(18)
        logger.info("Terminate migrated server")
        os.delete_instance(new_srv)
        assert_true(os.verify_srv_deleted(new_srv),
                    "Verify server was deleted")

        self.env.make_snapshot("vm_backed_with_ceph_live_migration")
示例#59
0
    def contrail_ceilometer_metrics(self):
        """Check that ceilometer collects contrail metrics.

        Scenario:
            1. Setup systest_setup.
            2. Create 2 instances in the default network.
            3. Send icpm packets from one instance to another.
            4. Check contrail ceilometer metrics:
                *ip.floating.receive.bytes
                *ip.floating.receive.packets
                *ip.floating.transmit.bytes
                *ip.floating.transmit.packets


        Duration 120 min

        """
        # constants
        ceilometer_metrics = [
            'ip.floating.receive.bytes', 'ip.floating.receive.packets',
            'ip.floating.transmit.bytes', 'ip.floating.transmit.packets'
        ]
        command = """source openrc; ceilometer sample-list -m {0}
         -q 'resource_id={1}'"""
        metric_type = 'cumulative'
        time_to_update_metrics = 60 * 10
        message = "Ceilometer doesn't collect metric {0}."

        self.show_step(1)
        self.env.revert_snapshot('systest_setup')
        self.show_step(2)
        cluster_id = self.fuel_web.get_last_created_cluster()
        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        srv_1 = os_conn.create_server_for_migration(neutron=True,
                                                    label='admin_internal_net')
        fip_1 = os_conn.assign_floating_ip(srv_1)

        srv_2 = os_conn.create_server_for_migration(neutron=True,
                                                    label='admin_internal_net')
        fip_2 = os_conn.assign_floating_ip(srv_2)

        for fip in [fip_1.ip, fip_2.ip]:
            wait(lambda: tcp_ping(fip, 22),
                 timeout=60,
                 interval=5,
                 timeout_msg="Node {0} is not accessible by SSH.".format(fip))

        self.show_step(3)
        controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[1])
        self.ping_instance_from_instance(os_conn, controller.name,
                                         {fip_1.ip: [fip_2.ip]})

        self.show_step(4)
        with self.fuel_web.get_ssh_for_node("slave-02") as ssh:
            for metric in ceilometer_metrics:
                for resource_id in [fip_1.id, fip_2.id]:
                    wait(lambda: len(
                        list(
                            ssh.execute(command.format(metric, resource_id))[
                                'stdout'])) > 4,
                         timeout=time_to_update_metrics,
                         timeout_msg=message.format(metric))
                    m = list(
                        ssh.execute(command.format(metric,
                                                   resource_id))['stdout'])
                    # Check type of metrics
                    collect_metric_type = m[3].split(' ')[5]
                    assert_true(
                        collect_metric_type == metric_type,
                        "Type of metric {0} not equel to {1}.".format(
                            collect_metric_type, metric_type))
示例#60
0
    def dvs_regression(self):
        """Deploy cluster with plugin and vmware datastore backend.

        Scenario:
            1. Upload plugins to the master node
            2. Install plugin.
            3. Create cluster with vcenter.
            4. Add 3 node with controller role.
            5. Add 2 node with compute + ceph role.
            6. Add 1 node with compute-vmware + cinder vmware role.
            7. Deploy the cluster.
            8. Run OSTF.
            9. Create non default network.
            10. Create Security groups
            11. Launch instances with created network in nova and vcenter az.
            12. Attached created security groups to instances.
            13. Check connection between instances from different az.

        Duration: 1.8 hours
        """
        self.env.revert_snapshot("dvs_bvt")

        cluster_id = self.fuel_web.get_last_created_cluster()

        os_ip = self.fuel_web.get_public_vip(cluster_id)
        os_conn = os_actions.OpenStackActions(os_ip, SERVTEST_USERNAME,
                                              SERVTEST_PASSWORD,
                                              SERVTEST_TENANT)

        tenant = os_conn.get_tenant(SERVTEST_TENANT)
        # Create non default network with subnet.
        logger.info('Create network {}'.format(self.net_data[0].keys()[0]))
        network = os_conn.create_network(
            network_name=self.net_data[0].keys()[0],
            tenant_id=tenant.id)['network']

        subnet = os_conn.create_subnet(
            subnet_name=network['name'],
            network_id=network['id'],
            cidr=self.net_data[0][self.net_data[0].keys()[0]],
            ip_version=4)

        # Check that network are created.
        assert_true(
            os_conn.get_network(network['name'])['id'] == network['id'])
        # Add net_1 to default router
        router = os_conn.get_router(os_conn.get_network(self.ext_net_name))
        os_conn.add_router_interface(router_id=router["id"],
                                     subnet_id=subnet["id"])
        # Launch instance 2 VMs of vcenter and 2 VMs of nova
        # in the tenant network net_01
        openstack.create_instances(os_conn=os_conn,
                                   vm_count=1,
                                   nics=[{
                                       'net-id': network['id']
                                   }])
        # Launch instance 2 VMs of vcenter and 2 VMs of nova
        # in the default network
        network = os_conn.nova.networks.find(label=self.inter_net_name)
        instances = openstack.create_instances(os_conn=os_conn,
                                               vm_count=1,
                                               nics=[{
                                                   'net-id': network.id
                                               }])
        openstack.verify_instance_state(os_conn)

        # Create security groups SG_1 to allow ICMP traffic.
        # Add Ingress rule for ICMP protocol to SG_1
        # Create security groups SG_2 to allow TCP traffic 22 port.
        # Add Ingress rule for TCP protocol to SG_2
        sec_name = ['SG1', 'SG2']
        sg1 = os_conn.nova.security_groups.create(sec_name[0], "descr")
        sg2 = os_conn.nova.security_groups.create(sec_name[1], "descr")
        rulesets = [
            {
                # ssh
                'ip_protocol': 'tcp',
                'from_port': 22,
                'to_port': 22,
                'cidr': '0.0.0.0/0',
            },
            {
                # ping
                'ip_protocol': 'icmp',
                'from_port': -1,
                'to_port': -1,
                'cidr': '0.0.0.0/0',
            }
        ]
        os_conn.nova.security_group_rules.create(sg1.id, **rulesets[0])
        os_conn.nova.security_group_rules.create(sg2.id, **rulesets[1])
        # Remove default security group and attach SG_1 and SG2 to VMs
        srv_list = os_conn.get_servers()
        for srv in srv_list:
            srv.remove_security_group(srv.security_groups[0]['name'])
            srv.add_security_group(sg1.id)
            srv.add_security_group(sg2.id)
        time.sleep(20)  # need wait to update rules on dvs
        fip = openstack.create_and_assign_floating_ips(os_conn, instances)
        # Check ping between VMs
        controller = self.fuel_web.get_nailgun_primary_node(
            self.env.d_env.nodes().slaves[0])
        with self.fuel_web.get_ssh_for_node(controller.name) as ssh_controller:
            openstack.check_connection_vms(os_conn,
                                           fip,
                                           remote=ssh_controller,
                                           command='pingv4')