Exemple #1
0
def show_systest_groups(**kwargs):
    """Show Proboscis groups defined in Systest suite"""
    groups_nums = get_groups()

    out = {k: len(v) for k, v in groups_nums.items()
           if k.startswith('system_test')}
    print(pretty_log(out))
Exemple #2
0
def print_explain(names):
    groups_nums = get_groups()
    if not isinstance(names, list):
        names = [names]
    out = []
    for name in names:
        for i in groups_nums[name]:
            if hasattr(i, 'home'):
                out.append((i.home._proboscis_entry_.parent.home, i.home))
            else:
                out.append(i)
    print(pretty_log(out))
def print_explain(names):
    groups_nums = get_groups()
    if not isinstance(names, list):
        names = [names]
    out = []
    for name in [split_group_config(i)[0] if split_group_config(i) else i
                 for i in names]:
        for i in groups_nums[name]:
            if hasattr(i, 'home'):
                out.append((i.home._proboscis_entry_.parent.home, i.home))
            else:
                out.append(i)
    print(pretty_log(out))
def remote_execute_command(instance1_ip, instance2_ip, command, wait=30):
    """Check execute remote command.

    :param instance1_ip: string, instance ip connect from
    :param instance2_ip: string, instance ip connect to
    :param command: string, remote command
    :param wait: integer, time to wait available ip of instances
    """
    with get_ssh_connection(
        instance1_ip, instance_creds[0], instance_creds[1]
    ) as ssh:

        interm_transp = ssh.get_transport()
        try:
            logger.info("Opening channel between VMs {0} and {1}".format(
                instance1_ip, instance2_ip))
            interm_chan = interm_transp.open_channel('direct-tcpip',
                                                     (instance2_ip, 22),
                                                     (instance1_ip, 0))
        except Exception as e:
            message = "{} Wait to update sg rules. Try to open channel again"
            logger.info(message.format(e))
            time.sleep(wait)
            interm_chan = interm_transp.open_channel('direct-tcpip',
                                                     (instance2_ip, 22),
                                                     (instance1_ip, 0))
        transport = paramiko.Transport(interm_chan)
        transport.start_client()
        logger.info("Passing authentication to VM")
        transport.auth_password(
            instance_creds[0], instance_creds[1])
        channel = transport.open_session()
        channel.get_pty()
        channel.fileno()
        channel.exec_command(command)

        result = {
            'stdout': [],
            'stderr': [],
            'exit_code': 0
        }
        logger.debug("Receiving exit_code, stdout, stderr")
        result['exit_code'] = channel.recv_exit_status()
        result['stdout'] = channel.recv(1024)
        result['stderr'] = channel.recv_stderr(1024)
        logger.debug('Command: {}'.format(command))
        logger.debug(pretty_log(result))
        logger.debug("Closing channel")
        channel.close()

        return result
Exemple #5
0
def remote_execute_command(instance1_ip, instance2_ip, command, wait=30):
    """Check execute remote command.

    :param instance1_ip: string, instance ip connect from
    :param instance2_ip: string, instance ip connect to
    :param command: string, remote command
    :param wait: integer, time to wait available ip of instances
    """
    with get_ssh_connection(instance1_ip, instance_creds[0],
                            instance_creds[1]) as ssh:

        interm_transp = ssh.get_transport()
        try:
            logger.info("Opening channel between VMs {0} and {1}".format(
                instance1_ip, instance2_ip))
            interm_chan = interm_transp.open_channel('direct-tcpip',
                                                     (instance2_ip, 22),
                                                     (instance1_ip, 0))
        except Exception as e:
            message = "{} Wait to update sg rules. Try to open channel again"
            logger.info(message.format(e))
            time.sleep(wait)
            interm_chan = interm_transp.open_channel('direct-tcpip',
                                                     (instance2_ip, 22),
                                                     (instance1_ip, 0))
        transport = paramiko.Transport(interm_chan)
        transport.start_client()
        logger.info("Passing authentication to VM")
        transport.auth_password(instance_creds[0], instance_creds[1])
        channel = transport.open_session()
        channel.get_pty()
        channel.fileno()
        channel.exec_command(command)

        result = {'stdout': [], 'stderr': [], 'exit_code': 0}
        logger.debug("Receiving exit_code, stdout, stderr")
        result['exit_code'] = channel.recv_exit_status()
        result['stdout'] = channel.recv(1024)
        result['stderr'] = channel.recv_stderr(1024)
        logger.debug('Command: {}'.format(command))
        logger.debug(pretty_log(result))
        logger.debug("Closing channel")
        channel.close()

        return result
    def enable_plugin(self, cluster_id, settings=None):
        """Enable NSX-T plugin on cluster.

        :param cluster_id: cluster id
        :param settings: settings in dict format
        :return: None
        """
        msg = "Plugin couldn't be enabled. Check plugin version. Test aborted"
        settings = settings if settings else {}
        checker = self.fuel_web.check_plugin_exists(cluster_id,
                                                    self.default.PLUGIN_NAME)
        assert_true(checker, msg)
        logger.info('Configure cluster with '
                    'following parameters: \n{}'.format(pretty_log(settings)))
        self.fuel_web.update_plugin_settings(
            cluster_id,
            self.default.PLUGIN_NAME,
            self.default.NSXT_PLUGIN_VERSION,
            dict(self.default.plugin_configuration, **settings))
    def deploy_with_custom_mirror(self):
        """Create local mirrors for Ubuntu repos using fuel-mirror tool

        Scenario:
            1. Create cluster with neutron networking
            2. Add 3 nodes with controller, ceph-osd roles
            3. Add 1 node with cinder, mongo roles
            4. Add 1 node with compute role
            5. Fix fuel-mirror config according to cluster repos
            6. Run create command for Ubuntu mirrors
            7. Run apply command for Ubuntu mirrors
            8. Check that only Ubuntu mirrors were changed
            9. Run create, apply commands for mos mirrors
            10. Run apply command for mos-mirrors
            11. Check than mos mirrors were also changed
            12. Run network verification
            13. Deploy the cluster
            14. Run OSTF
            15. Create snapshot

        Duration 90m
        Snapshot deploy_with_custom_mirror
        """
        self.env.revert_snapshot('ready_with_5_slaves')
        admin_ip = self.ssh_manager.admin_ip

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  mode=DEPLOYMENT_MODE,
                                                  settings={
                                                      "net_provider":
                                                      'neutron',
                                                      "net_segment_type":
                                                      NEUTRON_SEGMENT['tun'],
                                                      'sahara':
                                                      True,
                                                      'ceilometer':
                                                      True,
                                                      'volumes_lvm':
                                                      True,
                                                      'volumes_ceph':
                                                      False,
                                                      'images_ceph':
                                                      True
                                                  })

        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['cinder', 'mongo'],
                'slave-05': ['compute']
            })
        self.show_step(5)
        self._fix_fuel_mirror_config(admin_ip)
        cluster_repos = self._get_cluster_repos(cluster_id)
        message = pretty_log({'Cluster repos': cluster_repos['all']})
        logger.info(message)
        self.show_step(6)
        create_cmd = 'fuel-mirror create -P ubuntu -G ubuntu ' \
                     '--log-file /var/log/ubuntu_mirrors_create.log'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_cmd)
        self.show_step(7)
        apply_cmd = 'fuel-mirror apply --replace  -P ubuntu -G ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_cmd)

        self.show_step(8)

        cluster_repos = self._get_cluster_repos(cluster_id)
        remote_ubuntu_repos = [
            repo for repo in cluster_repos['ubuntu']
            if admin_ip not in repo['uri']
            and '{settings.MASTER_IP}' not in repo['uri']
        ]
        local_mos_repos = [
            repo for repo in cluster_repos['mos']
            if admin_ip in repo['uri'] or '{settings.MASTER_IP}' in repo['uri']
        ]
        repos_log = pretty_log({
            'All': cluster_repos['all'],
            'Remote Ubuntu': remote_ubuntu_repos,
            'Local MOS:': local_mos_repos
        })
        logger.info(repos_log)

        assert_false(remote_ubuntu_repos,
                     message="There are some remote Ubuntu repositories: "
                     "{repos}".format(repos=remote_ubuntu_repos))
        # NOTE Main MOS repository is always local in our tests
        assert_false(
            len(local_mos_repos) > 1,
            message="More than one MOS repo became local:{repos}".format(
                repos=local_mos_repos))

        self.show_step(9)
        create_cmd = 'fuel-mirror create -P ubuntu -G mos ' \
                     '--log-file /var/log/mos_mirrors_create.log'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_cmd)
        self.show_step(10)
        apply_cmd = 'fuel-mirror apply -P ubuntu -G mos'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_cmd)

        self.show_step(11)
        cluster_repos = self._get_cluster_repos(cluster_id)['all']
        remote_repos = [
            repo for repo in cluster_repos if admin_ip not in repo['uri']
            and '{settings.MASTER_IP}' not in repo['uri']
        ]
        message = pretty_log(cluster_repos)
        logger.info(message)
        assert_false(remote_repos,
                     message="There are some remote repositories: "
                     "{repos}".format(repos=remote_repos))

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(13)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(14)
        self.fuel_web.run_ostf(cluster_id=cluster_id,
                               test_sets=['ha', 'smoke', 'sanity'])

        self.show_step(15)
        self.env.make_snapshot('deploy_with_custom_mirror')
Exemple #8
0
def show_all_groups(**kwargs):
    """Show all Proboscis groups"""
    groups_nums = get_groups()
    out = {k: len(v) for k, v in groups_nums.items()}
    print(pretty_log(out))
    def shutdown_ceph_for_all(self):
        """Shutdown of Neutron Vxlan, ceph for all cluster

        Scenario:
            1. Create cluster with Neutron Vxlan, ceph for all,
            ceph replication factor - 3
            2. Add 3 controller, 2 compute, 3 ceph nodes
            3. Verify Network
            4. Deploy cluster
            5. Verify networks
            6. Run OSTF
            7. Create 2 volumes and 2 instances with attached volumes
            8. Fill ceph storages up to 30%(15% for each instance)
            9. Shutdown of all nodes
            10. Wait 5 minutes
            11. Start cluster
            12. Wait until OSTF 'HA' suite passes
            13. Verify networks
            14. Run OSTF tests

        Duration 230m

        """

        self.env.revert_snapshot('ready_with_9_slaves')

        self.show_step(1, initialize=True)
        data = {
            'tenant': 'failover',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '3',
            'volumes_lvm': False,
        }
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            settings=data
        )

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd']
            }
        )
        self.show_step(3)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(4)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.run_ostf(cluster_id)

        self.show_step(7)
        os = os_actions.OpenStackActions(
            controller_ip=self.fuel_web.get_public_vip(cluster_id),
            user='******', passwd='failover', tenant='failover')
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        hypervisors = os.get_hypervisors()
        hypervisor_name = hypervisors[0].hypervisor_hostname
        instance_1 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name
        )
        logger.info("New instance {0} created on {1}"
                    .format(instance_1.id, hypervisor_name))

        floating_ip_1 = os.assign_floating_ip(instance_1)
        logger.info("Floating address {0} associated with instance {1}"
                    .format(floating_ip_1.ip, instance_1.id))

        hypervisor_name = hypervisors[1].hypervisor_hostname
        instance_2 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name
        )
        logger.info("New instance {0} created on {1}"
                    .format(instance_2.id, hypervisor_name))

        floating_ip_2 = os.assign_floating_ip(instance_2)
        logger.info("Floating address {0} associated with instance {1}"
                    .format(floating_ip_2.ip, instance_2.id))

        self.show_step(8)
        ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['ceph-osd'])
        total_ceph_size = 0
        for node in ceph_nodes:
            total_ceph_size += \
                self.fuel_web.get_node_partition_size(node['id'], 'ceph')
        percent_15_mb = 0.15 * total_ceph_size
        percent_15_gb = percent_15_mb // 1024
        volume_size = int(percent_15_gb + 1)

        volume_1 = os.create_volume(size=volume_size)
        volume_2 = os.create_volume(size=volume_size)

        logger.info('Created volumes: {0}, {1}'.format(volume_1.id,
                                                       volume_2.id))

        ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']

        logger.info("Attach volumes")
        cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb'

        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' + cmd.format(srv_id=instance_1.id,
                                          volume_id=volume_1.id)
        )
        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' + cmd.format(srv_id=instance_2.id,
                                          volume_id=volume_2.id)
        )

        cmds = ['sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"',
                'sudo sh -c "/bin/mount /dev/vdb /mnt"',
                'sudo sh -c "/usr/bin/nohup'
                ' /bin/dd if=/dev/zero of=/mnt/bigfile '
                'bs=1M count={} &"'.format(int(percent_15_mb))]

        md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''}
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            for ip in [floating_ip_1.ip, floating_ip_2.ip]:
                for cmd in cmds:
                    res = os.execute_through_host(remote, ip, cmd)
                    logger.info('RESULT for {}: {}'.format(
                        cmd,
                        utils.pretty_log(res))
                    )
                logger.info('Wait 7200 untill "dd" ends')
                for _ in range(720):
                    cmd = 'ps -ef |grep -v grep| grep "dd if" '
                    res = os.execute_through_host(remote, ip, cmd)
                    if res['exit_code'] != 0:
                        break
                    time.sleep(10)
                    logger.debug('Wait another 10 sec -'
                                 ' totally waited {} sec'.format(10 * _))
                else:
                    raise TimeoutError('BigFile has not been'
                                       ' created yet, after 7200 sec')
                cmd = 'md5sum /mnt/bigfile'
                md5s[ip] = os.execute_through_host(remote,
                                                   ip, cmd)['stdout']

        self.show_step(9)
        nodes = {'compute': [], 'controller': [], 'ceph-osd': []}

        for role in nodes:
            nailgun_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
                cluster_id, [role])
            nodes[role] = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
                nailgun_nodes)

        self.fuel_web.warm_shutdown_nodes(nodes['compute'])
        self.fuel_web.warm_shutdown_nodes(nodes['controller'])
        self.fuel_web.warm_shutdown_nodes(nodes['ceph-osd'])

        self.show_step(10)
        time.sleep(300)

        self.show_step(11)
        self.fuel_web.warm_start_nodes(nodes['ceph-osd'])
        self.fuel_web.warm_start_nodes(nodes['controller'])
        self.show_step(12)
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.fuel_web.warm_start_nodes(nodes['compute'])
        self.fuel_web.assert_os_services_ready(cluster_id)

        self.show_step(13)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(14)
        self.fuel_web.run_ostf(cluster_id)
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            for ip in [floating_ip_1.ip, floating_ip_2.ip]:
                cmd = 'md5sum /mnt/bigfile'
                md5 = os.execute_through_host(remote, ip, cmd)['stdout']
                assert_equal(md5, md5s[ip],
                             "Actual md5sum {0} doesnt match"
                             " with old one {1} on {2}".format(
                                 md5, md5s[ip], ip))
    def block_net_traffic_cinder(self):
        """Block network traffic of whole environment

        Scenario:
            1. Revert environment deploy_ha_cinder
            2. Create 2 volumes and 2 instances with attached volumes
            3. Fill cinder storages up to 30%
            4. Start Rally
            5. Block traffic of all networks
            6. Sleep 5 minutes
            7. Unblock traffic of all networks
            8. Wait until cluster nodes become online
            9. Verify networks
            10. Run OSTF tests

        Duration: 40 min
        Snapshot: block_net_traffic
        """

        self.show_step(1)
        self.env.revert_snapshot('deploy_ha_cinder')
        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        os = os_actions.OpenStackActions(
            controller_ip=self.fuel_web.get_public_vip(cluster_id),
            user='******', passwd='failover', tenant='failover')
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        hypervisors = os.get_hypervisors()
        hypervisor_name = hypervisors[0].hypervisor_hostname
        instance_1 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name
        )
        logger.info("New instance {0} created on {1}"
                    .format(instance_1.id, hypervisor_name))

        floating_ip_1 = os.assign_floating_ip(instance_1)
        logger.info("Floating address {0} associated with instance {1}"
                    .format(floating_ip_1.ip, instance_1.id))

        hypervisor_name = hypervisors[1].hypervisor_hostname
        instance_2 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name
        )
        logger.info("New instance {0} created on {1}"
                    .format(instance_2.id, hypervisor_name))

        floating_ip_2 = os.assign_floating_ip(instance_2)
        logger.info("Floating address {0} associated with instance {1}"
                    .format(floating_ip_2.ip, instance_2.id))

        self.show_step(3)
        cinder_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['cinder'])
        total_cinder_size = 0
        for node in cinder_nodes:
            total_cinder_size += \
                self.fuel_web.get_node_partition_size(node['id'], 'cinder')
        percent_15_mb = 0.15 * total_cinder_size
        percent_15_gb = percent_15_mb // 1024
        volume_size = int(percent_15_gb + 1)

        volume_1 = os.create_volume(size=volume_size)
        volume_2 = os.create_volume(size=volume_size)

        logger.info('Created volumes: {0}, {1}'.format(volume_1.id,
                                                       volume_2.id))

        ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']

        logger.info("Attach volumes")
        cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb'

        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' + cmd.format(srv_id=instance_1.id,
                                          volume_id=volume_1.id)
        )
        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' + cmd.format(srv_id=instance_2.id,
                                          volume_id=volume_2.id)
        )

        cmds = ['sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"',
                'sudo sh -c "/bin/mount /dev/vdb /mnt"',
                'sudo sh -c "/usr/bin/nohup'
                ' /bin/dd if=/dev/zero of=/mnt/bigfile '
                'bs=1M count={} &"'.format(int(percent_15_mb))]

        md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''}
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            for ip in [floating_ip_1.ip, floating_ip_2.ip]:
                for cmd in cmds:
                    res = remote.execute_through_host(
                        hostname=ip,
                        cmd=cmd,
                        auth=cirros_auth)
                    logger.info('RESULT for {}: {}'.format(
                        cmd,
                        utils.pretty_log(res))
                    )
                logger.info('Wait 7200 untill "dd" ends')
                for _ in range(720):
                    cmd = 'ps -ef |grep -v grep| grep "dd if" '
                    res = remote.execute_through_host(
                        hostname=ip,
                        cmd=cmd,
                        auth=cirros_auth)
                    if res['exit_code'] != 0:
                        break
                    time.sleep(15)
                    logger.debug('Wait another 15 sec -'
                                 ' totally waited {} sec'.format(10 * _))
                else:
                    raise TimeoutError('BigFile has not been'
                                       ' created yet, after 7200 sec')
                cmd = 'md5sum /mnt/bigfile'
                md5s[ip] = remote.execute_through_host(
                    hostname=ip,
                    cmd=cmd,
                    auth=cirros_auth)['stdout']
        self.show_step(4)
        assert_true(settings.PATCHING_RUN_RALLY,
                    'PATCHING_RUN_RALLY was not set in true')
        rally_benchmarks = {}
        benchmark_results = {}
        for tag in set(settings.RALLY_TAGS):
            rally_benchmarks[tag] = RallyBenchmarkTest(
                container_repo=settings.RALLY_DOCKER_REPO,
                environment=self.env,
                cluster_id=cluster_id,
                test_type=tag
            )
            benchmark_results[tag] = rally_benchmarks[tag].run()
            logger.debug(benchmark_results[tag].show())

        self.show_step(5)
        nodes = [
            node for node in sorted(
                self.env.d_env.get_nodes(role='fuel_slave'),
                key=lambda x: x.name)
            if node.driver.node_active(node)]
        for interface in nodes[0].interfaces:
            if interface.is_blocked:
                raise Exception('Interface {0} is blocked'.format(interface))
            else:
                interface.network.block()

        self.show_step(6)
        time.sleep(60 * 5)

        self.show_step(7)
        for interface in nodes[0].interfaces:
            if interface.network.is_blocked:
                interface.network.unblock()
            else:
                raise Exception(
                    'Interface {0} was not blocked'.format(interface))

        self.show_step(8)
        self.fuel_web.wait_nodes_get_online_state(nodes)

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        try:
            self.fuel_web.run_ostf(
                cluster_id=cluster_id,
                test_sets=['ha', 'smoke', 'sanity'])
        except AssertionError:
            time.sleep(600)
            self.fuel_web.run_ostf(
                cluster_id=cluster_id,
                test_sets=['ha', 'smoke', 'sanity'])
Exemple #11
0
    def deploy_with_custom_mirror(self):
        """Create local mirrors for Ubuntu repos using fuel-mirror tool

        Scenario:
            1. Create cluster with neutron networking
            2. Add 3 nodes with controller, ceph-osd roles
            3. Add 1 node with cinder, mongo roles
            4. Add 1 node with compute role
            5. Fix fuel-mirror config according to cluster repos
            6. Run create command for Ubuntu mirrors
            7. Run apply command for Ubuntu mirrors
            8. Check that only Ubuntu mirrors were changed
            9. Run create, apply commands for mos mirrors
            10. Run apply command for mos-mirrors
            11. Check than mos mirrors were also changed
            12. Run network verification
            13. Deploy the cluster
            14. Run OSTF
            15. Create snapshot

        Duration 90m
        Snapshot deploy_with_custom_mirror
        """
        self.env.revert_snapshot('ready_with_5_slaves')
        admin_ip = self.ssh_manager.admin_ip

        self.show_step(1, initialize=True)
        cluster_id = self.fuel_web.create_cluster(
            name=self.__class__.__name__,
            mode=DEPLOYMENT_MODE,
            settings={
                "net_provider": 'neutron',
                "net_segment_type": NEUTRON_SEGMENT['tun'],
                'sahara': True,
                'ceilometer': True,
                'volumes_lvm': True,
                'volumes_ceph': False,
                'images_ceph': True
            }
        )

        self.show_step(2)
        self.show_step(3)
        self.show_step(4)
        self.fuel_web.update_nodes(
            cluster_id,
            {
                'slave-01': ['controller', 'ceph-osd'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'ceph-osd'],
                'slave-04': ['cinder', 'mongo'],
                'slave-05': ['compute']
            }
        )
        self.show_step(5)
        self._fix_fuel_mirror_config(admin_ip)
        cluster_repos = self._get_cluster_repos(cluster_id)
        message = pretty_log({'Cluster repos': cluster_repos['all']})
        logger.info(message)
        self.show_step(6)
        create_cmd = 'fuel-mirror create -P ubuntu -G ubuntu ' \
                     '--log-file /var/log/ubuntu_mirrors_create.log'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=create_cmd)
        self.show_step(7)
        apply_cmd = 'fuel-mirror apply --replace  -P ubuntu -G ubuntu'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_cmd)

        self.show_step(8)

        cluster_repos = self._get_cluster_repos(cluster_id)
        remote_ubuntu_repos = [
            repo for repo in cluster_repos['ubuntu']
            if admin_ip not in repo['uri'] and
            '{settings.MASTER_IP}' not in repo['uri']]
        local_mos_repos = [
            repo for repo in cluster_repos['mos']
            if admin_ip in repo['uri'] or
            '{settings.MASTER_IP}' in repo['uri']]
        repos_log = pretty_log(
            {'All': cluster_repos['all'],
             'Remote Ubuntu': remote_ubuntu_repos,
             'Local MOS:': local_mos_repos})
        logger.info(repos_log)

        assert_false(remote_ubuntu_repos,
                     message="There are some remote Ubuntu repositories: "
                             "{repos}".format(repos=remote_ubuntu_repos))
        # NOTE Main MOS repository is always local in our tests
        assert_false(
            len(local_mos_repos) > 1,
            message="More than one MOS repo became local:{repos}".format(
                repos=local_mos_repos
            )
        )

        self.show_step(9)
        create_cmd = 'fuel-mirror create -P ubuntu -G mos ' \
                     '--log-file /var/log/mos_mirrors_create.log'
        self.env.admin_actions.ensure_cmd(create_cmd)

        self.show_step(10)
        apply_cmd = 'fuel-mirror apply -P ubuntu -G mos'
        self.ssh_manager.execute_on_remote(ip=admin_ip, cmd=apply_cmd)

        self.show_step(11)
        cluster_repos = self._get_cluster_repos(cluster_id)['all']
        remote_repos = [
            repo for repo in cluster_repos
            if admin_ip not in repo['uri'] and
            '{settings.MASTER_IP}' not in repo['uri']]
        message = pretty_log(cluster_repos)
        logger.info(message)
        assert_false(remote_repos,
                     message="There are some remote repositories: "
                             "{repos}".format(repos=remote_repos))

        self.show_step(12)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(13)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(14)
        self.fuel_web.run_ostf(
            cluster_id=cluster_id,
            test_sets=['ha', 'smoke', 'sanity'])

        self.show_step(15)
        self.env.make_snapshot('deploy_with_custom_mirror')
Exemple #12
0
def show_all_groups(**kwargs):
    """Show all Proboscis groups"""
    groups_nums = get_groups()
    out = {k: len(v) for k, v in groups_nums.items()}
    print(pretty_log(out))
    def shutdown_ceph_for_all(self):
        """Shutdown of Neutron Vxlan, ceph for all cluster

        Scenario:
            1. Create cluster with Neutron Vxlan, ceph for all,
            ceph replication factor - 3
            2. Add 3 controller, 2 compute, 3 ceph nodes
            3. Verify Network
            4. Deploy cluster
            5. Verify networks
            6. Run OSTF
            7. Create 2 volumes and 2 instances with attached volumes
            8. Fill ceph storages up to 30%(15% for each instance)
            9. Shutdown of all nodes
            10. Wait 5 minutes
            11. Start cluster
            12. Wait until OSTF 'HA' suite passes
            13. Verify networks
            14. Run OSTF tests

        Duration 230m

        """

        self.env.revert_snapshot('ready_with_9_slaves')

        self.show_step(1, initialize=True)
        data = {
            'tenant': 'failover',
            'user': '******',
            'password': '******',
            "net_provider": 'neutron',
            "net_segment_type": settings.NEUTRON_SEGMENT['tun'],
            'volumes_ceph': True,
            'images_ceph': True,
            'ephemeral_ceph': True,
            'objects_ceph': True,
            'osd_pool_size': '3',
            'volumes_lvm': False,
        }
        cluster_id = self.fuel_web.create_cluster(name=self.__class__.__name__,
                                                  settings=data)

        self.show_step(2)
        self.fuel_web.update_nodes(
            cluster_id, {
                'slave-01': ['controller'],
                'slave-02': ['controller'],
                'slave-03': ['controller'],
                'slave-04': ['compute'],
                'slave-05': ['compute'],
                'slave-06': ['ceph-osd'],
                'slave-07': ['ceph-osd'],
                'slave-08': ['ceph-osd']
            })
        self.show_step(3)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(4)
        self.fuel_web.deploy_cluster_wait(cluster_id)

        self.show_step(5)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(6)
        self.fuel_web.run_ostf(cluster_id)

        self.show_step(7)
        os = os_actions.OpenStackActions(
            controller_ip=self.fuel_web.get_public_vip(cluster_id),
            user='******',
            passwd='failover',
            tenant='failover')
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        hypervisors = os.get_hypervisors()
        hypervisor_name = hypervisors[0].hypervisor_hostname
        instance_1 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name)
        logger.info("New instance {0} created on {1}".format(
            instance_1.id, hypervisor_name))

        floating_ip_1 = os.assign_floating_ip(instance_1)
        logger.info("Floating address {0} associated with instance {1}".format(
            floating_ip_1.ip, instance_1.id))

        hypervisor_name = hypervisors[1].hypervisor_hostname
        instance_2 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name)
        logger.info("New instance {0} created on {1}".format(
            instance_2.id, hypervisor_name))

        floating_ip_2 = os.assign_floating_ip(instance_2)
        logger.info("Floating address {0} associated with instance {1}".format(
            floating_ip_2.ip, instance_2.id))

        self.show_step(8)
        ceph_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['ceph-osd'])
        total_ceph_size = 0
        for node in ceph_nodes:
            total_ceph_size += \
                self.fuel_web.get_node_partition_size(node['id'], 'ceph')
        percent_15_mb = 0.15 * total_ceph_size
        percent_15_gb = percent_15_mb // 1024
        volume_size = int(percent_15_gb + 1)

        volume_1 = os.create_volume(size=volume_size)
        volume_2 = os.create_volume(size=volume_size)

        logger.info('Created volumes: {0}, {1}'.format(volume_1.id,
                                                       volume_2.id))

        ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']

        logger.info("Attach volumes")
        cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb'

        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' +
            cmd.format(srv_id=instance_1.id, volume_id=volume_1.id))
        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' +
            cmd.format(srv_id=instance_2.id, volume_id=volume_2.id))

        cmds = [
            'sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"',
            'sudo sh -c "/bin/mount /dev/vdb /mnt"',
            'sudo sh -c "/usr/bin/nohup'
            ' /bin/dd if=/dev/zero of=/mnt/bigfile '
            'bs=1M count={} &"'.format(int(percent_15_mb))
        ]

        md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''}
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            for ip in [floating_ip_1.ip, floating_ip_2.ip]:
                for cmd in cmds:
                    res = remote.execute_through_host(hostname=ip,
                                                      cmd=cmd,
                                                      auth=cirros_auth)
                    logger.info('RESULT for {}: {}'.format(
                        cmd, utils.pretty_log(res)))
                logger.info('Wait 7200 untill "dd" ends')
                for _ in range(720):
                    cmd = 'ps -ef |grep -v grep| grep "dd if" '
                    res = remote.execute_through_host(hostname=ip,
                                                      cmd=cmd,
                                                      auth=cirros_auth)
                    if res['exit_code'] != 0:
                        break
                    time.sleep(10)
                    logger.debug('Wait another 10 sec -'
                                 ' totally waited {} sec'.format(10 * _))
                else:
                    raise TimeoutError('BigFile has not been'
                                       ' created yet, after 7200 sec')
                cmd = 'md5sum /mnt/bigfile'
                md5s[ip] = remote.execute_through_host(
                    hostname=ip, cmd=cmd, auth=cirros_auth)['stdout']

        self.show_step(9)
        nodes = {'compute': [], 'controller': [], 'ceph-osd': []}

        for role in nodes:
            nailgun_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
                cluster_id, [role])
            nodes[role] = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
                nailgun_nodes)

        self.fuel_web.warm_shutdown_nodes(nodes['compute'])
        self.fuel_web.warm_shutdown_nodes(nodes['controller'])
        self.fuel_web.warm_shutdown_nodes(nodes['ceph-osd'])

        self.show_step(10)
        time.sleep(300)

        self.show_step(11)
        self.fuel_web.warm_start_nodes(nodes['ceph-osd'])
        self.fuel_web.warm_start_nodes(nodes['controller'])
        self.show_step(12)
        self.fuel_web.assert_ha_services_ready(cluster_id)
        self.fuel_web.warm_start_nodes(nodes['compute'])
        self.fuel_web.assert_os_services_ready(cluster_id)

        self.show_step(13)
        self.fuel_web.verify_network(cluster_id)
        self.show_step(14)
        self.fuel_web.run_ostf(cluster_id)
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            for ip in [floating_ip_1.ip, floating_ip_2.ip]:
                cmd = 'md5sum /mnt/bigfile'
                md5 = remote.execute_through_host(hostname=ip,
                                                  cmd=cmd,
                                                  auth=cirros_auth)['stdout']
                assert_equal(
                    md5, md5s[ip], "Actual md5sum {0} doesnt match"
                    " with old one {1} on {2}".format(md5, md5s[ip], ip))
Exemple #14
0
    def block_net_traffic_cinder(self):
        """Block network traffic of whole environment

        Scenario:
            1. Revert environment deploy_ha_cinder
            2. Create 2 volumes and 2 instances with attached volumes
            3. Fill cinder storages up to 30%
            4. Start Rally
            5. Block traffic of all networks
            6. Sleep 5 minutes
            7. Unblock traffic of all networks
            8. Wait until cluster nodes become online
            9. Verify networks
            10. Run OSTF tests

        Duration: 40 min
        Snapshot: block_net_traffic
        """

        self.show_step(1)
        self.env.revert_snapshot('deploy_ha_cinder')
        cluster_id = self.fuel_web.get_last_created_cluster()

        self.show_step(2)
        os = os_actions.OpenStackActions(
            controller_ip=self.fuel_web.get_public_vip(cluster_id),
            user='******',
            passwd='failover',
            tenant='failover')
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        hypervisors = os.get_hypervisors()
        hypervisor_name = hypervisors[0].hypervisor_hostname
        instance_1 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name)
        logger.info("New instance {0} created on {1}".format(
            instance_1.id, hypervisor_name))

        floating_ip_1 = os.assign_floating_ip(instance_1)
        logger.info("Floating address {0} associated with instance {1}".format(
            floating_ip_1.ip, instance_1.id))

        hypervisor_name = hypervisors[1].hypervisor_hostname
        instance_2 = os.create_server_for_migration(
            neutron=True,
            availability_zone="nova:{0}".format(hypervisor_name),
            label=net_name)
        logger.info("New instance {0} created on {1}".format(
            instance_2.id, hypervisor_name))

        floating_ip_2 = os.assign_floating_ip(instance_2)
        logger.info("Floating address {0} associated with instance {1}".format(
            floating_ip_2.ip, instance_2.id))

        self.show_step(3)
        cinder_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            cluster_id, ['cinder'])
        total_cinder_size = 0
        for node in cinder_nodes:
            total_cinder_size += \
                self.fuel_web.get_node_partition_size(node['id'], 'cinder')
        percent_15_mb = 0.15 * total_cinder_size
        percent_15_gb = percent_15_mb // 1024
        volume_size = int(percent_15_gb + 1)

        volume_1 = os.create_volume(size=volume_size)
        volume_2 = os.create_volume(size=volume_size)

        logger.info('Created volumes: {0}, {1}'.format(volume_1.id,
                                                       volume_2.id))

        ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']

        logger.info("Attach volumes")
        cmd = 'nova volume-attach {srv_id} {volume_id} /dev/vdb'

        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' +
            cmd.format(srv_id=instance_1.id, volume_id=volume_1.id))
        self.ssh_manager.execute_on_remote(
            ip=ip,
            cmd='. openrc; ' +
            cmd.format(srv_id=instance_2.id, volume_id=volume_2.id))

        cmds = [
            'sudo sh -c "/usr/sbin/mkfs.ext4 /dev/vdb"',
            'sudo sh -c "/bin/mount /dev/vdb /mnt"',
            'sudo sh -c "/usr/bin/nohup'
            ' /bin/dd if=/dev/zero of=/mnt/bigfile '
            'bs=1M count={} &"'.format(int(percent_15_mb))
        ]

        md5s = {floating_ip_1.ip: '', floating_ip_2.ip: ''}
        with self.fuel_web.get_ssh_for_node("slave-01") as remote:
            for ip in [floating_ip_1.ip, floating_ip_2.ip]:
                for cmd in cmds:
                    res = os.execute_through_host(remote, ip, cmd)
                    logger.info('RESULT for {}: {}'.format(
                        cmd, utils.pretty_log(res)))
                logger.info('Wait 7200 untill "dd" ends')
                for _ in range(720):
                    cmd = 'ps -ef |grep -v grep| grep "dd if" '
                    res = os.execute_through_host(remote, ip, cmd)
                    if res['exit_code'] != 0:
                        break
                    time.sleep(15)
                    logger.debug('Wait another 15 sec -'
                                 ' totally waited {} sec'.format(10 * _))
                else:
                    raise TimeoutError('BigFile has not been'
                                       ' created yet, after 7200 sec')
                cmd = 'md5sum /mnt/bigfile'
                md5s[ip] = os.execute_through_host(remote, ip, cmd)['stdout']
        self.show_step(4)
        assert_true(settings.PATCHING_RUN_RALLY,
                    'PATCHING_RUN_RALLY was not set in true')
        rally_benchmarks = {}
        benchmark_results = {}
        for tag in set(settings.RALLY_TAGS):
            rally_benchmarks[tag] = RallyBenchmarkTest(
                container_repo=settings.RALLY_DOCKER_REPO,
                environment=self.env,
                cluster_id=cluster_id,
                test_type=tag)
            benchmark_results[tag] = rally_benchmarks[tag].run()
            logger.debug(benchmark_results[tag].show())

        self.show_step(5)
        nodes = [
            node for node in self.env.d_env.get_nodes()
            if node.driver.node_active(node)
        ]
        for interface in nodes[1].interfaces:
            if interface.is_blocked:
                raise Exception('Interface {0} is blocked'.format(interface))
            else:
                interface.network.block()

        self.show_step(6)
        time.sleep(60 * 5)

        self.show_step(7)
        for interface in nodes[1].interfaces:
            if interface.network.is_blocked:
                interface.network.unblock()
            else:
                raise Exception(
                    'Interface {0} was not blocked'.format(interface))

        self.show_step(8)
        self.fuel_web.wait_nodes_get_online_state(nodes[1:])

        self.show_step(9)
        self.fuel_web.verify_network(cluster_id)

        self.show_step(10)
        try:
            self.fuel_web.run_ostf(cluster_id=cluster_id,
                                   test_sets=['ha', 'smoke', 'sanity'])
        except AssertionError:
            time.sleep(600)
            self.fuel_web.run_ostf(cluster_id=cluster_id,
                                   test_sets=['ha', 'smoke', 'sanity'])