Пример #1
0
 def get_flavor_by_name(self, name):
     flavor_list = self.nova.flavors.list()
     for flavor in flavor_list:
         if flavor.name == name:
             return flavor
     logger.warning("Flavor with name {} was not found".format(name))
     return None
Пример #2
0
    def resume_environment(self):
        self.d_env.resume()
        admin = self.d_env.nodes().admin

        self.ssh_manager.clean_all_connections()

        try:
            admin.await(self.d_env.admin_net, timeout=30, by_port=8000)
        except Exception as e:
            logger.warning("From first time admin isn't reverted: "
                           "{0}".format(e))
            admin.destroy()
            logger.info('Admin node was destroyed. Wait 10 sec.')
            time.sleep(10)

            admin.start()
            logger.info('Admin node started second time.')
            self.d_env.nodes().admin.await(self.d_env.admin_net)
            self.set_admin_ssh_password()
            self.admin_actions.wait_for_fuel_ready(timeout=600)

            # set collector address in case of admin node destroy
            if settings.FUEL_STATS_ENABLED:
                self.nailgun_actions.set_collector_address(
                    settings.FUEL_STATS_HOST,
                    settings.FUEL_STATS_PORT,
                    settings.FUEL_STATS_SSL)
                # Restart statsenderd in order to apply new collector address
                self.nailgun_actions.force_fuel_stats_sending()
                self.fuel_web.client.send_fuel_stats(enabled=True)
                logger.info('Enabled sending of statistics to {0}:{1}'.format(
                    settings.FUEL_STATS_HOST, settings.FUEL_STATS_PORT
                ))
        self.set_admin_ssh_password()
        self.admin_actions.wait_for_fuel_ready()
Пример #3
0
 def get_server_by_name(self, name):
     servers = self.get_servers()
     for srv in servers:
         if srv.name == name:
             return srv
     logger.warning("Instance with name {} was not found".format(name))
     return None
Пример #4
0
    def _get_keystoneclient(username, password, tenant_name, auth_url,
                            retries=3, ca_cert=None, insecure=False):
        exc_type, exc_value, exc_traceback = None, None, None
        for i in xrange(retries):
            try:
                if ca_cert:
                    return KeystoneClient(username=username,
                                          password=password,
                                          tenant_name=tenant_name,
                                          auth_url=auth_url,
                                          cacert=ca_cert,
                                          insecure=insecure)

                else:
                    return KeystoneClient(username=username,
                                          password=password,
                                          tenant_name=tenant_name,
                                          auth_url=auth_url)
            except ClientException as exc:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                err = "Try nr {0}. Could not get keystone client, error: {1}"
                logger.warning(err.format(i + 1, exc))
                time.sleep(5)
        if exc_type and exc_traceback and exc_value:
            six.reraise(exc_type, exc_value, exc_traceback)
        raise RuntimeError()
Пример #5
0
 def wrapper(*args, **kwargs):
     logger.info("\n" + "<" * 5 + "#" * 30 + "[ {} ]"
                 .format(func.__name__) + "#" * 30 + ">" * 5 + "\n{}"
                 .format(''.join(func.__doc__)))
     try:
         result = func(*args, **kwargs)
     except SkipTest:
         raise SkipTest()
     except Exception:
         name = 'error_{:s}'.format(func.__name__)
         store_error_details(name, args[0].env)
         logger.error(traceback.format_exc())
         logger.info("<" * 5 + "*" * 100 + ">" * 5)
         raise
     else:
         if settings.ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:
             if args[0].env is None:
                 logger.warning("Can't get diagnostic snapshot: "
                                "unexpected class is decorated.")
                 return result
             try:
                 args[0].env.resume_environment()
                 create_diagnostic_snapshot(args[0].env, "pass",
                                            func.__name__)
             except:
                 logger.error("Fetching of diagnostic snapshot failed: {0}".
                              format(traceback.format_exc()))
         return result
Пример #6
0
    def wrapper(*args, **kwargs):
        result = func(*args, **kwargs)
        if settings.UPDATE_FUEL:
            logger.info("Update fuel's packages from directory {0}."
                        .format(settings.UPDATE_FUEL_PATH))
            environment = get_current_env(args)
            if not environment:
                logger.warning("Decorator was triggered "
                               "from unexpected class.")
                return result

            centos_files_count, ubuntu_files_count = \
                environment.admin_actions.upload_packages(
                    local_packages_dir=settings.UPDATE_FUEL_PATH,
                    centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
                    ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)
            if not centos_files_count and not ubuntu_files_count:
                raise ConfigurationException('Nothing to update,'
                                             ' packages to update values is 0')
            cluster_id = environment.fuel_web.get_last_created_cluster()

            if centos_files_count > 0:
                environment.docker_actions.execute_in_containers(
                    cmd='yum -y install yum-plugin-priorities')

                # Update docker containers and restart them
                environment.docker_actions.execute_in_containers(
                    cmd='yum clean expire-cache; yum update -y')
                environment.docker_actions.restart_containers()

                with environment.d_env.get_admin_remote() as remote:
                    # Update packages on master node
                    remote.execute(
                        'yum -y install yum-plugin-priorities;'
                        'yum clean expire-cache; yum update -y')

                # Add auxiliary repository to the cluster attributes
                if settings.OPENSTACK_RELEASE_UBUNTU not in \
                        settings.OPENSTACK_RELEASE:
                    environment.fuel_web.add_local_centos_mirror(
                        cluster_id, path=settings.LOCAL_MIRROR_CENTOS,
                        priority=settings.AUX_RPM_REPO_PRIORITY)

            if ubuntu_files_count > 0:
                # Add auxiliary repository to the cluster attributes
                if settings.OPENSTACK_RELEASE_UBUNTU in \
                        settings.OPENSTACK_RELEASE:
                    environment.fuel_web.add_local_ubuntu_mirror(
                        cluster_id, name="Auxiliary",
                        path=settings.LOCAL_MIRROR_UBUNTU,
                        priority=settings.AUX_DEB_REPO_PRIORITY)
                else:
                    logger.error("{0} .DEB files uploaded but won't be used"
                                 " because of deploying wrong release!"
                                 .format(ubuntu_files_count))
            if settings.SYNC_DEPL_TASKS:
                with environment.d_env.get_admin_remote() as remote:
                    remote.execute("fuel release --sync-deployment-tasks"
                                   " --dir /etc/puppet/")
        return result
Пример #7
0
 def wrapper(*args, **kwargs):
     result = func(*args, **kwargs)
     try:
         if settings.UPLOAD_MANIFESTS:
             logger.info(
                 "Uploading new manifests from "
                 "{:s}".format(settings.UPLOAD_MANIFESTS_PATH))
             environment = get_current_env(args)
             if not environment:
                 logger.warning("Can't upload manifests: method of "
                                "unexpected class is decorated.")
                 return result
             with environment.d_env.get_admin_remote() as remote:
                 remote.execute('rm -rf /etc/puppet/modules/*')
                 remote.upload(settings.UPLOAD_MANIFESTS_PATH,
                               '/etc/puppet/modules/')
                 logger.info(
                     "Copying new site.pp from "
                     "{:s}".format(settings.SITEPP_FOR_UPLOAD))
                 remote.execute("cp %s /etc/puppet/manifests" %
                                settings.SITEPP_FOR_UPLOAD)
                 if settings.SYNC_DEPL_TASKS:
                     remote.execute("fuel release --sync-deployment-tasks"
                                    " --dir /etc/puppet/")
     except Exception:
         logger.error("Could not upload manifests")
         raise
     return result
Пример #8
0
def get_package_test_info(package, pkg_type, tests_path, patch_target):
    packages_path = "{0}/{1}/packages.yaml".format(tests_path, pkg_type)
    tests = set()
    tests_file = 'test.yaml'
    all_packages = yaml.load(open(packages_path).read())
    assert_is_not_none(_get_target_and_project(package, all_packages),
                       "Package '{0}' doesn't belong to any installation "
                       "target / project".format(package))
    target, project = _get_target_and_project(package, all_packages)
    if patch_target == 'master':
        if target not in ['master', 'bootstrap']:
            return {None}
    if patch_target == 'environment':
        if target not in ['deployment', 'provisioning']:
            return {None}
    target_tests_path = "/".join((tests_path, pkg_type, target, tests_file))
    project_tests_path = "/".join((tests_path, pkg_type, target, project,
                                   tests_file))
    package_tests_path = "/".join((tests_path, pkg_type, target, project,
                                   package, tests_file))
    for path in (target_tests_path, project_tests_path, package_tests_path):
        try:
            test = yaml.load(open(path).read())
            if 'system_tests' in test.keys():
                tests.update(test['system_tests']['tags'])
        except IOError as e:
            logger.warning('Ignoring exception: {!r}'.format(e))
            logger.debug(traceback.format_exc())
    return tests
Пример #9
0
 def _open(self, req):
     try:
         return self._get_response(req)
     except HTTPError as e:
         if e.code == 308:
             logger.info(e.read())
             url = req.get_full_url()
             req = requests.get(url, headers={'X-Auth-Token': self.token})
             if req.status_code in [200]:
                 return req.json()
             else:
                 req.raise_for_status()
         if e.code == 401:
             logger.warning('Authorization failure: {0}'.format(e.read()))
             self.authenticate()
             return self._get_response(req)
         elif e.code == 504:
             logger.error("Got HTTP Error 504: "
                          "Gateway Time-out: {}".format(e.read()))
             return self._get_response(req)
         else:
             logger.error('{} code {} [{}]'.format(e.reason,
                                                   e.code,
                                                   e.read()))
             raise
Пример #10
0
    def is_make_snapshot(self):
        """Check if the test 'test_name' is a dependency for other planned
        tests (snapshot is required). If yes return True, if no - False.

        :rtype: bool
        """
        test_name = get_test_method_name()
        tests = self.test_program.plan.tests
        test_cases = [t for t in tests if t.entry.method.__name__ == test_name]
        if len(test_cases) != 1:
            logger.warning("Method 'is_make_snapshot' is called from function "
                           "which is not a test case: {0}".format(test_name))
            return False
        test_groups = set(test_cases[0].entry.info.groups)
        dependent_tests = set()
        dependent_groups = set()
        for t in tests:
            for func in t.entry.info.depends_on:
                dependent_tests.add(func.__name__)
            for group in t.entry.info.depends_on_groups:
                dependent_groups.add(group)
        if test_name in dependent_tests or \
                test_groups & dependent_groups:
            return True
        return False
Пример #11
0
    def __start_keystone_session(
            self, retries=3, ca_cert=None, insecure=not VERIFY_SSL):
        exc_type, exc_value, exc_traceback = None, None, None
        for i in xrange(retries):
            try:
                if insecure:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=False)
                elif ca_cert:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth, verify=ca_cert)
                else:
                    self.keystone_session = KeystoneSession(
                        auth=self.__keystone_auth)
                self.keystone_session.get_auth_headers()
                return

            except ClientException as exc:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                err = "Try nr {0}. Could not get keystone token, error: {1}"
                logger.warning(err.format(i + 1, exc))
                time.sleep(5)
        if exc_type and exc_traceback and exc_value:
            six.reraise(exc_type, exc_value, exc_traceback)
        raise RuntimeError()
Пример #12
0
    def _get_keystoneclient(self, username, password, tenant_name, auth_url,
                            retries=3, ca_cert=None):
        keystone = None
        for i in range(retries):
            try:
                if ca_cert:
                    keystone = KeystoneClient(username=username,
                                              password=password,
                                              tenant_name=tenant_name,
                                              auth_url=auth_url,
                                              cacert=ca_cert)

                else:
                    keystone = KeystoneClient(username=username,
                                              password=password,
                                              tenant_name=tenant_name,
                                              auth_url=auth_url)
                break
            except ClientException as e:
                err = "Try nr {0}. Could not get keystone client, error: {1}"
                LOGGER.warning(err.format(i + 1, e))
                time.sleep(5)
        if not keystone:
            raise
        return keystone
Пример #13
0
    def create_instance(self, flavor_name='test_flavor', ram=64, vcpus=1,
                        disk=1, server_name='test_instance', image_name=None,
                        neutron_network=True, label=None):
        logger.debug('Try to create instance')

        start_time = time.time()
        while time.time() - start_time < 100:
            try:
                if image_name:
                    image = [i.id for i in self.nova.images.list()
                             if i.name == image_name]
                else:
                    image = [i.id for i in self.nova.images.list()]
                break
            except Exception as e:
                logger.warning('Ignoring exception: {!r}'.format(e))
                logger.debug(traceback.format_exc())
        else:
            raise Exception('Can not get image')

        kwargs = {}
        if neutron_network:
            net_label = label if label else 'net04'
            network = self.nova.networks.find(label=net_label)
            kwargs['nics'] = [{'net-id': network.id, 'v4-fixed-ip': ''}]

        logger.info('image uuid is {0}'.format(image))
        flavor = self.nova.flavors.create(
            name=flavor_name, ram=ram, vcpus=vcpus, disk=disk)
        logger.info('flavor is {0}'.format(flavor.name))
        server = self.nova.servers.create(
            name=server_name, image=image[0], flavor=flavor, **kwargs)
        logger.info('server is {0}'.format(server.name))
        return server
Пример #14
0
 def wrapper(*args, **kwargs):
     result = func(*args, **kwargs)
     try:
         if settings.UPLOAD_MANIFESTS:
             logger.info("Uploading new manifests from %s" %
                         settings.UPLOAD_MANIFESTS_PATH)
             if args[0].__class__.__name__ == "EnvironmentModel":
                 environment = args[0]
             elif args[0].__class__.__name__ == "FuelWebClient":
                 environment = args[0].environment
             else:
                 logger.warning("Can't upload manifests: method of "
                                "unexpected class is decorated.")
                 return result
             remote = environment.get_admin_remote()
             remote.execute('rm -rf /etc/puppet/modules/*')
             remote.upload(settings.UPLOAD_MANIFESTS_PATH,
                           '/etc/puppet/modules/')
             logger.info("Copying new site.pp from %s" %
                         settings.SITEPP_FOR_UPLOAD)
             remote.execute("cp %s /etc/puppet/manifests" %
                            settings.SITEPP_FOR_UPLOAD)
             if settings.SYNC_DEPL_TASKS:
                 remote.execute("fuel release --sync-deployment-tasks"
                                " --dir /etc/puppet/")
     except Exception:
         logger.error("Could not upload manifests")
         raise
     return result
Пример #15
0
    def wrapper(*args, **kwargs):
        result = func(*args, **kwargs)
        if not settings.UPDATE_FUEL:
                return result
        try:
            environment = get_current_env(args)
            if not environment:
                logger.warning("Can't update packages: method of "
                               "unexpected class is decorated.")
                return result

            if settings.UPDATE_FUEL_MIRROR:
                for url in settings.UPDATE_FUEL_MIRROR:
                    repo_url = urlparse(url)
                    cut_dirs = len(repo_url.path.strip('/').split('/'))
                    download_cmd = ('wget --recursive --no-parent'
                                    ' --no-verbose --reject "index'
                                    '.html*,*.gif" --exclude-directories'
                                    ' "{pwd}/repocache" '
                                    '--directory-prefix {path} -nH'
                                    ' --cut-dirs={cutd} {url}').\
                        format(pwd=repo_url.path.rstrip('/'),
                               path=settings.UPDATE_FUEL_PATH,
                               cutd=cut_dirs, url=repo_url.geturl())
                    return_code = call(download_cmd, shell=True)
                    assert_equal(return_code, 0, 'Mirroring of remote'
                                                 ' packages '
                                                 'repository failed')

            centos_files_count, ubuntu_files_count = \
                environment.admin_actions.upload_packages(
                    local_packages_dir=settings.UPDATE_FUEL_PATH,
                    centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
                    ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)

            if centos_files_count == 0:
                return result

            # Add temporary repo with new packages to YUM configuration
            conf_file = '/etc/yum.repos.d/temporary.repo'
            cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/"
                   "\ngpgcheck=0\npriority=1' > {1}").format(
                settings.LOCAL_MIRROR_CENTOS, conf_file)
            with environment.d_env.get_admin_remote() as remote:
                environment.execute_remote_cmd(remote, cmd, exit_code=0)
                update_command = 'yum clean expire-cache; yum update -y -d3'
                result = remote.execute(update_command)
                logger.debug('Result of "yum update" command on master node: '
                             '{0}'.format(result))
                assert_equal(int(result['exit_code']), 0,
                             'Packages update failed, '
                             'inspect logs for details')
                environment.execute_remote_cmd(remote,
                                               cmd='rm -f {0}'
                                               .format(conf_file),
                                               exit_code=0)
        except Exception:
            logger.error("Could not update packages")
            raise
        return result
Пример #16
0
    def admin_install_updates(self):
        logger.info("Searching for updates..")
        update_command = "yum clean expire-cache; yum update -y"

        update_result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=update_command)

        logger.info('Result of "{1}" command on master node: ' "{0}".format(update_result, update_command))
        assert_equal(int(update_result["exit_code"]), 0, "Packages update failed, " "inspect logs for details")

        # Check if any packets were updated and update was successful
        for str_line in update_result["stdout"]:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package", str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update", str_line)

        if (not match_updated_count or match_no_updates) and not match_complete_message:
            logger.warning("No updates were found or update was incomplete.")
            return
        logger.info("{0} packet(s) were updated".format(updates_count))

        cmd = "bootstrap_admin_node.sh;"

        result = self.ssh_manager.execute(ip=self.ssh_manager.admin_ip, cmd=cmd)
        logger.info('Result of "{1}" command on master node: ' "{0}".format(result, cmd))
        assert_equal(int(result["exit_code"]), 0, "bootstrap failed, " "inspect logs for details")
Пример #17
0
    def admin_install_updates(self):
        logger.info('Searching for updates..')
        update_command = 'yum clean expire-cache; yum update -y'
        with self.d_env.get_admin_remote() as admin_remote:
            update_result = admin_remote.execute(update_command)
        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(update_result, update_command))
        assert_equal(int(update_result['exit_code']), 0,
                     'Packages update failed, '
                     'inspect logs for details')

        # Check if any packets were updated and update was successful
        for str_line in update_result['stdout']:
            match_updated_count = re.search("Upgrade(?:\s*)(\d+).*Package",
                                            str_line)
            if match_updated_count:
                updates_count = match_updated_count.group(1)
            match_complete_message = re.search("(Complete!)", str_line)
            match_no_updates = re.search("No Packages marked for Update",
                                         str_line)

        if (not match_updated_count or match_no_updates)\
                and not match_complete_message:
            logger.warning('No updates were found or update was incomplete.')
            return
        logger.info('{0} packet(s) were updated'.format(updates_count))

        cmd = 'dockerctl destroy all; bootstrap_admin_node.sh;'
        with self.d_env.get_admin_remote() as admin_remote:
            result = admin_remote.execute(cmd)
        logger.info('Result of "{1}" command on master node: '
                    '{0}'.format(result, cmd))
        assert_equal(int(result['exit_code']), 0,
                     'bootstrap failed, '
                     'inspect logs for details')
Пример #18
0
    def check_extra_tasks(self, slave_nodes, deployment):
        """Check existing extra tasks regarding to fixture and actual task
           or tasks with a wrong type

        :param slave_nodes: a list of nailgun nodes
        :param deployment: a string, name of the deployment kind
        :return: a list with nodes for which extra tasks regarding to fixture
                 and actual task or tasks with a wrong type were found
        """
        result = {'extra_actual_tasks': {},
                  'extra_fixture_tasks': {},
                  'wrong_types': {},
                  'failed_tasks': {}}
        for node in slave_nodes:
            node_roles = "_".join(sorted(node["roles"]))
            node_ref = "{}_{}".format(node["id"], node_roles)
            fixture = self.load_fixture(deployment, node_roles)
            node_tasks = self.get_nodes_tasks(node["id"])
            extra_actual_tasks, extra_fixture_tasks, wrong_types = \
                self.get_fixture_relevance(node_tasks, fixture)
            result['extra_actual_tasks'][node_ref] = extra_actual_tasks
            result['extra_fixture_tasks'][node_ref] = extra_fixture_tasks
            result['wrong_types'][node_ref] = wrong_types
            result['failed_tasks'][node_ref] = \
                extra_actual_tasks | \
                extra_fixture_tasks | \
                set([task for task in wrong_types.keys()])

        logger.warning("Uncovered deployment tasks:\n{}"
                       .format(yaml.dump(result, default_flow_style=False)))
        failed_nodes = [node_refs
                        for node_refs, failed_tasks in
                        result['failed_tasks'].items()
                        if failed_tasks]
        return failed_nodes
Пример #19
0
 def wrapper(*args, **kwargs):
     result = func(*args, **kwargs)
     environment = get_current_env(args)
     if environment:
         store_packages_json(environment)
     else:
         logger.warning("Can't collect packages: " "Unexpected class is decorated.")
     return result
Пример #20
0
    def check_ceph_status(self, cluster_id, offline_nodes=[],
                          recovery_timeout=360):
        cluster_nodes = self.client.list_cluster_nodes(cluster_id)
        ceph_nodes = [n for n in cluster_nodes if 'ceph-osd' in
                      n['roles'] and n['id'] not in offline_nodes]
        clock_skew_status = ['clock', 'skew', 'detected']
        osd_recovery_status = ['degraded', 'recovery', 'osds', 'are', 'down']

        logger.info('Waiting until Ceph service become up...')
        for node in ceph_nodes:
            remote = self.environment.get_ssh_to_remote(node['ip'])
            try:
                wait(lambda: checkers.check_ceph_ready(remote) is True,
                     interval=20, timeout=120)
            except TimeoutError:
                logger.error('Ceph service is down on {0}'.format(
                    node['name']))
                raise

        logger.info('Ceph service is ready')
        logger.info('Checking Ceph Health...')
        for node in ceph_nodes:
            remote = self.environment.get_ssh_to_remote(node['ip'])
            health_status = checkers.get_ceph_health(remote)
            if 'HEALTH_OK' in health_status:
                continue
            elif 'HEALTH_WARN' in health_status:
                if checkers.check_ceph_health(remote, clock_skew_status):
                    logger.warning('Clock skew detected in Ceph.')
                    self.sync_ceph_time(ceph_nodes)
                    try:
                        wait(lambda: checkers.check_ceph_health(remote),
                             interval=30, timeout=recovery_timeout)
                    except TimeoutError:
                        logger.error('Ceph HEALTH is bad on {0}'.format(
                            node['name']))
                        raise
                elif checkers.check_ceph_health(remote, osd_recovery_status)\
                        and len(offline_nodes) > 0:
                    logger.info('Ceph is being recovered after osd node(s)'
                                ' shutdown.')
                    try:
                        wait(lambda: checkers.check_ceph_health(remote),
                             interval=30, timeout=recovery_timeout)
                    except TimeoutError:
                        logger.error('Ceph HEALTH is bad on {0}'.format(
                            node['name']))
                        raise
            else:
                assert_true(checkers.check_ceph_health(remote),
                            'Ceph health doesn\'t equal to "OK", please '
                            'inspect debug logs for details')

        logger.info('Checking Ceph OSD Tree...')
        for node in ceph_nodes:
            remote = self.environment.get_ssh_to_remote(node['ip'])
            checkers.check_ceph_disks(remote, [n['id'] for n in ceph_nodes])
        logger.info('Ceph cluster status is OK')
Пример #21
0
 def wrapper(*args, **kwargs):
     result = func(*args, **kwargs)
     if settings.STORE_ASTUTE_YAML:
         environment = get_current_env(args)
         if environment:
             store_astute_yaml(environment)
         else:
             logger.warning("Can't download astute.yaml: " "Unexpected class is decorated.")
     return result
Пример #22
0
def get_current_env(args):
    if args[0].__class__.__name__ == "EnvironmentModel":
        return args[0]
    elif args[0].__class__.__name__ == "FuelWebClient":
        return args[0].environment
    elif args[0].__class__.__name__ == "NeutronGre":
        return args[0].env
    else:
        logger.warning("Unexpected class!")
Пример #23
0
 def _get_response(self, req):
     if self.token is not None:
         try:
             logger.debug('Set X-Auth-Token to {0}'.format(self.token))
             req.add_header("X-Auth-Token", self.token)
         except exceptions.AuthorizationFailure:
             logger.warning('Failed with auth in http _get_response')
             logger.warning(traceback.format_exc())
     return self.opener.open(req)
Пример #24
0
 def ironic(self):
     try:
         endpoint = self.__make_endpoint(
             self._get_url_for_svc(service_type='baremetal'))
         return get_ironic_client('1', session=self.keystone_session,
                                  insecure=True, ironic_url=endpoint)
     except ClientException as e:
         logger.warning('Could not initialize ironic client {0}'.format(e))
         raise
Пример #25
0
    def wrapper(*args, **kwargs):
        result = func(*args, **kwargs)
        if not settings.UPDATE_FUEL:
            return result
        try:
            environment = get_current_env(args)
            if not environment:
                logger.warning("Can't update packages: method of " "unexpected class is decorated.")
                return result

            if settings.UPDATE_FUEL_MIRROR:
                for url in settings.UPDATE_FUEL_MIRROR:
                    repo_url = urllib.parse.urlparse(url)
                    cut_dirs = len(repo_url.path.strip("/").split("/"))
                    download_cmd = (
                        "wget --recursive --no-parent"
                        ' --no-verbose --reject "index'
                        '.html*,*.gif" --exclude-directories'
                        ' "{pwd}/repocache" '
                        "--directory-prefix {path} -nH"
                        " --cut-dirs={cutd} {url}"
                    ).format(
                        pwd=repo_url.path.rstrip("/"),
                        path=settings.UPDATE_FUEL_PATH,
                        cutd=cut_dirs,
                        url=repo_url.geturl(),
                    )
                    return_code = call(download_cmd, shell=True)
                    assert_equal(return_code, 0, "Mirroring of remote" " packages " "repository failed")

            centos_files_count, _ = environment.admin_actions.upload_packages(
                local_packages_dir=settings.UPDATE_FUEL_PATH,
                centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
                ubuntu_repo_path=None,
            )

            if centos_files_count == 0:
                return result

            # Add temporary repo with new packages to YUM configuration
            conf_file = "/etc/yum.repos.d/temporary.repo"
            cmd = (
                "echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/" "\ngpgcheck=0\npriority=1' > {1}"
            ).format(settings.LOCAL_MIRROR_CENTOS, conf_file)

            SSHManager().execute_on_remote(ip=SSHManager().admin_ip, cmd=cmd)
            update_command = "yum clean expire-cache; yum update -y -d3 " "2>>/var/log/yum-update-error.log"
            cmd_result = SSHManager().execute(ip=SSHManager().admin_ip, cmd=update_command)
            logger.debug('Result of "yum update" command on master node: ' "{0}".format(cmd_result))
            assert_equal(int(cmd_result["exit_code"]), 0, "Packages update failed, " "inspect logs for details")

            SSHManager().execute_on_remote(ip=SSHManager().admin_ip, cmd="rm -f {0}".format(conf_file))
        except Exception:
            logger.error("Could not update packages")
            raise
        return result
Пример #26
0
 def _open(self, req):
     try:
         return self._get_response(req)
     except urllib2.HTTPError as e:
         if e.code == 401:
             logger.warning('Authorization failure: {0}'.format(e.read()))
             self.authenticate()
             return self._get_response(req)
         else:
             raise
Пример #27
0
 def check_run_by_group(self, snapshot_name, expected_group):
     test_group = sys.argv[-1]
     try:
         self.check_run(snapshot_name=snapshot_name)
     except SkipTest as e:
         if expected_group in test_group:
             logger.warning('Ignoring exception: {!r}'.format(e))
             logger.debug(traceback.format_exc())
         else:
             raise
Пример #28
0
 def get_ssh_to_remote_by_key(self, ip, keyfile):
     try:
         with open(keyfile) as f:
             keys = [RSAKey.from_private_key(f)]
             return SSHClient(ip, private_keys=keys)
     except IOError:
         logger.warning('Loading of SSH key from file failed. Trying to use'
                        ' SSH agent ...')
         keys = Agent().get_keys()
         return SSHClient(ip, private_keys=keys)
Пример #29
0
 def authenticate(self):
     try:
         logger.info("Initialize keystoneclient with url %s", self.keystone_url)
         self.keystone = KeystoneClient(auth_url=self.keystone_url, **self.creds)
         # it depends on keystone version, some versions doing auth
         # explicitly some don't, but we are making it explicitly always
         self.keystone.authenticate()
         logger.debug("Authorization token is successfully updated")
     except exceptions.AuthorizationFailure:
         logger.warning("Cant establish connection to keystone with url %s", self.keystone_url)
Пример #30
0
def openstack_puppet_project_mapping(project):
    """
    find fuel-qa system test which have maximum coverage for edited
    openstack/puppet-project and register that group with
    "review_in_openstack_puppet_project" name
    project -  puppet project edited in review
    Example: project = "openstack/puppet-openstacklib"
    """

    # open yaml with covered projects
    with open(
            "gates_tests/helpers/openstack_puppet_projects_mapping.yaml",
            "r") as f:
        mapping = yaml.load(f)

        all_projects = set(list(itertools.chain.from_iterable(
            [mapping[test_group]['modules'] for test_group in mapping])))
        logger.debug(
            "List of openstack/puppet-projects "
            "covered by system_tests {}".format(
                all_projects))
        logger.info(
            "Edited project in review - '{}'".format(project))

        # checking that project from review covered by system_test
        if project not in all_projects:
            logger.warning(
                "{} project not exist or not covered by system_test"
                .format(project))

        # find test group which cover project edited in review
        system_test = "bvt_2"
        for test in mapping:
            if project in mapping[test]['projects']:
                system_test = test
                break

        devops_template = mapping[system_test]['devops_settings_template']

        import gates_tests

        path_to_template = os.path.join(
            os.path.dirname(os.path.abspath(gates_tests.__file__)),
            devops_template)

        logger.debug("devops template is {}".format(path_to_template))

        os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template
        logger.info(
            "Edited project in review - '{}'"
            " will be checked by next system test: {}".format(
                project, system_test))

        register(groups=['review_in_openstack_puppet_project'],
                 depends_on_groups=[system_test])
Пример #31
0
def puppet_modules_mapping(modules):
    """
    find fuel-qa system test which have maximum coverage for edited
    puppet modules and register that group with "review_in_fuel_library" name
    modules - dictionary of puppet modules edited in review
    Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'}
    """

    # open yaml with covered modules
    with open("gates_tests/helpers/puppet_module_mapping.yaml", "r") as f:
        mapping = yaml.load(f)

    if modules and isinstance(modules, dict):
        all_modules = set([j for i in mapping.values() for j in i])
        logger.debug(
            "List of puppet modules covered by system_tests {}".format(
                all_modules))
        logger.info("List of modules edited in review {}".format(
            modules.keys()))

        # checking that module from review covered by system_test
        for module in modules.keys():
            if module.split('.')[0] not in all_modules:
                logger.warning(
                    "{}:{} module not exist or not covered by system_test".
                    format(module, modules[module]))

        # find test group which has better coverage of modules from review
        formatted_modules = [module.split('.')[0] for module in modules]
        system_test = "bvt_2"
        max_intersection = 0
        if not ("ceph" in modules
                and {"roles/cinder.pp", "cinder", "openstack-cinder"}
                & set(modules)):
            for test in mapping:
                test_intersection = len(
                    set(mapping[test]).intersection(set(formatted_modules)))
                if test_intersection > max_intersection:
                    max_intersection = test_intersection
                    system_test = test
        # To completely check ceph module we can't mix ceph and cinder togeher
        else:
            logger.warning(
                "We cannot check cinder and ceph together {}".format(modules))
            system_test = "bvt_2"

    else:
        logger.warning("There no modules that changed in review "
                       "so just run default system test")
        system_test = "bvt_2"
    logger.info("Puppet modules from review {}"
                " will be checked by next system test: {}".format(
                    modules, system_test))

    register(groups=['review_in_fuel_library'],
             depends_on_groups=[system_test])
Пример #32
0
    def _contain_secret_data(data):
        _has_private_data = False
        # Check that stats doesn't contain private data (e.g.
        # specific passwords, settings, emails)
        for _private in private_data.keys():
            _regex = r'(?P<key>"\S+"): (?P<value>[^:]*"{0}"[^:]*)'.format(
                private_data[_private])
            for _match in re.finditer(_regex, data):
                logger.warning('Found private info in usage statistics using '
                               'pattern: {0}'. format(_regex))
                logger.debug('Usage statistics with private data:\n {0}'.
                             format(data))
                logger.error("Usage statistics contains private info: '{type}:"
                             " {value}'. Part of the stats: {match}".format(
                                 type=_private,
                                 value=private_data[_private],
                                 match=_match.group('key', 'value')))
                _has_private_data = True
        # Check that stats doesn't contain private types of data (e.g. any kind
        # of passwords)
        for _data_type in secret_data_types.keys():
            _regex = (r'(?P<secret>"[^"]*{0}[^"]*": (\{{[^\}}]+\}}|\[[^\]+]\]|'
                      r'"[^"]+"))').format(secret_data_types[_data_type])

            for _match in re.finditer(_regex, data, re.IGNORECASE):
                logger.warning('Found private info in usage statistics using '
                               'pattern: {0}'. format(_regex))
                logger.debug('Usage statistics with private data:\n {0}'.
                             format(data))
                logger.error("Usage statistics contains private info: '{type}:"
                             " {value}'. Part of the stats: {match}".format(
                                 type=_data_type,
                                 value=secret_data_types[_data_type],
                                 match=_match.group('secret')))
                _has_private_data = True
        return _has_private_data
Пример #33
0
    def _get_keystoneclient(self, username, password, tenant_name, auth_url,
                            retries=3, ca_cert=None, insecure=False):
        exception = None
        for i in xrange(retries):
            try:
                if ca_cert:
                    return KeystoneClient(username=username,
                                          password=password,
                                          tenant_name=tenant_name,
                                          auth_url=auth_url,
                                          cacert=ca_cert,
                                          insecure=insecure)

                else:
                    return KeystoneClient(username=username,
                                          password=password,
                                          tenant_name=tenant_name,
                                          auth_url=auth_url)
            except ClientException as exc:
                err = "Try nr {0}. Could not get keystone client, error: {1}"
                logger.warning(err.format(i + 1, exc))
                exception = exc
                time.sleep(5)
        raise exception if exception else RuntimeError()
Пример #34
0
    def sync_time_admin_node(self):
        logger.info("Sync time on revert for admin")
        remote = self.get_admin_remote()
        self.execute_remote_cmd(remote, 'hwclock -s')
        # Sync time using ntpd
        try:
            # If public NTP servers aren't accessible ntpdate will fail and
            # ntpd daemon shouldn't be restarted to avoid 'Server has gone
            # too long without sync' error while syncing time from slaves
            self.execute_remote_cmd(
                remote, "ntpdate -d $(awk '/^server/{print"
                " $2}' /etc/ntp.conf)")
        except AssertionError as e:
            logger.warning('Error occurred while synchronizing time on master'
                           ': {0}'.format(e))
            raise
        else:
            self.execute_remote_cmd(
                remote, 'service ntpd stop && ntpd -qg && '
                'service ntpd start')
            self.execute_remote_cmd(remote, 'hwclock -w')

        remote_date = remote.execute('date')['stdout']
        logger.info("Master node time: {0}".format(remote_date))
Пример #35
0
def fuel_library_modules_mapping(modules):
    """
    find fuel-qa system test which have maximum coverage for edited
    puppet modules and register that group with "review_in_fuel_library" name
    modules - dictionary of puppet modules edited in review
    Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'}
    """

    # open yaml with covered modules
    with open("gates_tests/helpers/fuel_library_modules_mapping.yaml",
              "r") as f:
        mapping = yaml.load(f)

    if modules and isinstance(modules, dict):
        all_modules = set(
            list(
                itertools.chain.from_iterable([
                    mapping[test_group]['modules'] for test_group in mapping
                ])))

        logger.debug(
            "List of puppet modules covered by system_tests {}".format(
                all_modules))
        logger.info("List of modules edited in review {}".format(
            modules.keys()))

        # checking that module from review covered by system_test
        for module in modules.keys():
            if module not in all_modules:
                logger.warning(
                    "{}:{} module not exist or not covered by system_test".
                    format(module, modules[module]))

        # find test group which has better coverage of modules from review
        system_test = "bvt_2"
        max_intersection = 0
        if not ("ceph" in modules
                and {"roles/cinder.pp", "cinder", "openstack-cinder"}
                & set(modules)):
            for test in mapping:
                test_intersection = len(
                    set(mapping[test]['modules']).intersection(set(modules)))
                if test_intersection > max_intersection:
                    max_intersection = test_intersection
                    system_test = test

            devops_template = mapping[system_test]['devops_settings_template']

            import gates_tests

            path_to_template = os.path.join(
                os.path.dirname(os.path.abspath(gates_tests.__file__)),
                devops_template)

            logger.debug("devops template is {}".format(path_to_template))

            os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template

        # To completely check ceph module we can't mix ceph and cinder togeher
        else:
            logger.warning(
                "We cannot check cinder and ceph together {}".format(modules))
            system_test = "bvt_2"

    else:
        logger.warning("There no modules that changed in review "
                       "so just run default system test")
        system_test = "bvt_2"
    logger.info("Puppet modules from review {}"
                " will be checked by next system test: {}".format(
                    modules, system_test))

    register(groups=['review_in_fuel_library'],
             depends_on_groups=[system_test])
Пример #36
0
    def __init__(self, controller_ip, user, password, tenant):
        self.controller_ip = controller_ip

        def make_endpoint(endpoint):
            parse = urlparse(endpoint)
            return parse._replace(
                netloc='{}:{}'.format(
                    self.controller_ip, parse.port)).geturl()

        if DISABLE_SSL:
            auth_url = 'http://{0}:5000/v2.0/'.format(self.controller_ip)
            path_to_cert = None
        else:
            auth_url = 'https://{0}:5000/v2.0/'.format(self.controller_ip)
            path_to_cert = PATH_TO_CERT

        insecure = not VERIFY_SSL

        LOGGER.debug('Auth URL is {0}'.format(auth_url))

        keystone_args = {'username': user, 'password': password,
                         'tenant_name': tenant, 'auth_url': auth_url,
                         'ca_cert': path_to_cert, 'insecure': insecure}
        self.keystone = self._get_keystoneclient(**keystone_args)

        token = self.keystone.auth_token
        LOGGER.debug('Token is {0}'.format(token))

        neutron_endpoint = self.keystone.service_catalog.url_for(
            service_type='network', endpoint_type='publicURL')
        neutron_args = {'username': user, 'password': password,
                        'tenant_name': tenant, 'auth_url': auth_url,
                        'ca_cert': path_to_cert, 'insecure': insecure,
                        'endpoint_url': make_endpoint(neutron_endpoint)}
        self.neutron = neutronclient.Client(**neutron_args)

        nova_endpoint = self.keystone.service_catalog.url_for(
            service_type='compute', endpoint_type='publicURL')
        nova_args = {'username': user, 'api_key': password,
                     'project_id': tenant, 'auth_url': auth_url,
                     'cacert': path_to_cert, 'insecure': insecure,
                     'bypass_url': make_endpoint(nova_endpoint),
                     'auth_token': token}
        self.nova = NovaClient(**nova_args)

        cinder_endpoint = self.keystone.service_catalog.url_for(
            service_type='volume', endpoint_type='publicURL')
        cinder_args = {'version': 1, 'username': user,
                       'api_key': password, 'project_id': tenant,
                       'auth_url': auth_url, 'cacert': path_to_cert,
                       'insecure': insecure,
                       'bypass_url': make_endpoint(cinder_endpoint)}
        self.cinder = cinderclient.Client(**cinder_args)

        glance_endpoint = self.keystone.service_catalog.url_for(
            service_type='image', endpoint_type='publicURL')
        LOGGER.debug('Glance endpoint is {0}'.format(
            make_endpoint(glance_endpoint)))
        glance_args = {'endpoint': make_endpoint(glance_endpoint),
                       'token': token,
                       'cacert': path_to_cert,
                       'insecure': insecure}
        self.glance = GlanceClient(**glance_args)

        try:
            ironic_endpoint = self.keystone.service_catalog.url_for(
                service_type='baremetal',
                endpoint_type='publicURL')
            self.ironic = ironicclient.get_client(
                api_version=1,
                os_auth_token=token,
                ironic_url=make_endpoint(ironic_endpoint), insecure=True)
        except ClientException as e:
            LOGGER.warning('Could not initialize ironic client {0}'.format(e))
    def contrail_murano(self):
        """Check deploy contrail with murano.

        Scenario:
            1. Create an environment with "Neutron with tunneling
               segmentation" as a network configuration
            2. Enable murano
            3. Enable and configure Contrail plugin
            4. Add a node with controller role
            5. Add a node with "compute" and "Storage-cinder" roles
            6. Add a node with 'contrail-controller' role
            7. Add a node with "contrail-analytics" role
            8. Add a node with "contrail-analytics-db" role
            9. Deploy cluster with plugin
            10. Run contrail health check tests
            11. Run OSTF tests

        Duration 120 min

        """
        min_slave_ram = 10000

        plugin.show_range(self, 1, 3)
        plugin.prepare_contrail_plugin(self, slaves=5,
                                       options={'murano': True})

        self.show_step(3)
        plugin.activate_plugin(self, contrail_api_public_port="8098")

        # activate vSRX image
        vsrx_setup_result = vsrx.activate()

        plugin.show_range(self, 4, 9)
        self.fuel_web.update_nodes(
            self.cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'cinder'],
                'slave-03': ['contrail-controller'],
                'slave-04': ['contrail-analytics'],
                'slave-05': ['contrail-analytics-db']
            })

        self.show_step(9)
        openstack.deploy_cluster(self)
        self.show_step(10)
        TestContrailCheck(self).cloud_check(['contrail'])

        self.show_step(11)
        check_ram_result = openstack.check_slave_memory(min_slave_ram)
        if vsrx_setup_result and check_ram_result:
            self.fuel_web.run_ostf(
                cluster_id=self.cluster_id,
                test_sets=['smoke', 'sanity', 'tests_platform'],
                timeout=settings.OSTF_RUN_TIMEOUT
                )
        elif vsrx_setup_result:
            logger.warning('Ostf tests will be run without platform tests')
            self.fuel_web.run_ostf(
                cluster_id=self.cluster_id,
                test_sets=['smoke', 'sanity'],
                timeout=settings.OSTF_RUN_TIMEOUT
                )
Пример #38
0
 def get_releases_details(self, release_id):
     msg = 'get_releases_details is deprecated in favor of get_release'
     warn(msg, DeprecationWarning)
     logger.warning(msg)
     return self._get(url="/releases/{}".format(release_id)).json()
Пример #39
0
    def wrapper(*args, **kwargs):
        result = func(*args, **kwargs)
        if not settings.UPDATE_FUEL:
            return result
        try:
            environment = get_current_env(args)
            if not environment:
                logger.warning("Can't update packages: method of "
                               "unexpected class is decorated.")
                return result

            if settings.UPDATE_FUEL_MIRROR:
                for url in settings.UPDATE_FUEL_MIRROR:
                    repo_url = urllib.parse.urlparse(url)
                    cut_dirs = len(repo_url.path.strip('/').split('/'))
                    download_cmd = ('wget --recursive --no-parent'
                                    ' --no-verbose --reject "index'
                                    '.html*,*.gif" --exclude-directories'
                                    ' "{pwd}/repocache" '
                                    '--directory-prefix {path} -nH'
                                    ' --cut-dirs={cutd} {url}').\
                        format(pwd=repo_url.path.rstrip('/'),
                               path=settings.UPDATE_FUEL_PATH,
                               cutd=cut_dirs, url=repo_url.geturl())
                    return_code = call(download_cmd, shell=True)
                    assert_equal(return_code, 0, 'Mirroring of remote'
                                                 ' packages '
                                                 'repository failed')

            centos_files_count, _ = \
                environment.admin_actions.upload_packages(
                    local_packages_dir=settings.UPDATE_FUEL_PATH,
                    centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
                    ubuntu_repo_path=None)

            if centos_files_count == 0:
                return result

            # Add temporary repo with new packages to YUM configuration
            conf_file = '/etc/yum.repos.d/temporary.repo'
            cmd = ("echo -e '[temporary]\nname=temporary\nbaseurl=file://{0}/"
                   "\ngpgcheck=0\npriority=1' > {1}").format(
                settings.LOCAL_MIRROR_CENTOS, conf_file)

            SSHManager().execute_on_remote(
                ip=SSHManager().admin_ip,
                cmd=cmd
            )
            update_command = 'yum clean expire-cache; yum update -y -d3 ' \
                             '2>>/var/log/yum-update-error.log'
            cmd_result = SSHManager().execute(ip=SSHManager().admin_ip,
                                              cmd=update_command)
            logger.debug('Result of "yum update" command on master node: '
                         '{0}'.format(cmd_result))
            assert_equal(int(cmd_result['exit_code']), 0,
                         'Packages update failed, '
                         'inspect logs for details')

            SSHManager().execute_on_remote(
                ip=SSHManager().admin_ip,
                cmd='rm -f {0}'.format(conf_file)
            )
        except Exception:
            logger.error("Could not update packages")
            raise
        return result
Пример #40
0
    # Enforce closing all connections for objects recreate and API arrival
    SSHClient.close_connections()

else:
    with open(
            os.path.abspath(
                os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             '../requirements-devops-source.txt'))) as req:
        d_req = req.read()

    req_ver = d_req.split('@')[-1]

    if StrictVersion(req_ver) >= StrictVersion('3.0.2'):
        logger.warning(
            'Please revert changes with change-id:\n'
            '\tId90f06b4c83f9e0a21adf5c90aa04111d2a4153e (gerrit 359684)\n'
            'This solution is not required for now due to using actual version'
            'of fuel-devops.')


class SSHManager(six.with_metaclass(SingletonMeta, object)):
    def __init__(self):
        logger.debug('SSH_MANAGER: Run constructor SSHManager')
        self.__connections = {}  # Disallow direct type change and deletion
        self.admin_ip = None
        self.admin_port = None
        self.admin_login = None
        self.__admin_password = None
        self.slave_login = None
        self.slave_fallback_login = '******'
        self.__slave_password = None
    def contrail_ceilometer(self):
        """Check deploy environment with Contrail and Ceilometer.

        Scenario:
            1. Create an environment with "Neutron with tunneling
               segmentation" as a network configuration and CEPH storage
            2. Enable and configure Contrail plugin
            3. Add a node with "controller" role
            4. Add a node with "controller" + "MongoDB" multirole
            5. Add a node with "controller"+ "ceph-OSD" multiroles
            6. Add a node with "compute" + "ceph-OSD" + "cinder" multiroles
            7. Add a node with "compute" + "ceph-OSD" multiroles
            8. Add a node with "MongoDB" role
            9. Add a node with 'contrail-controller' role
            10. Add a node with "contrail-analytics-db" and
               "contrail-analytics"  roles
            11. Deploy cluster with plugin
            12. Run contrail health check tests
            13. Run OSTF tests

        Duration 120 min

        """
        min_slave_ram = 10000
        self.show_step(1)
        plugin.prepare_contrail_plugin(
            self,
            slaves=9,
            options={'images_ceph': True,
                     'ceilometer': True})

        self.show_step(2)
        plugin.activate_plugin(self)
        # activate vSRX image
        vsrx_setup_result = vsrx.activate()

        plugin.show_range(self, 3, 11)
        self.fuel_web.update_nodes(
            self.cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['controller', 'ceph-osd'],
                'slave-03': ['controller', 'mongo'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['compute', 'ceph-osd', 'cinder'],
                'slave-06': ['mongo'],
                'slave-07': ['contrail-controller'],
                'slave-08': ['contrail-analytics-db', 'contrail-analytics']

            })
        self.show_step(11)
        openstack.deploy_cluster(self)
        self.show_step(12)
        TestContrailCheck(self).cloud_check(['contrail'])
        self.show_step(13)
        check_ram_result = openstack.check_slave_memory(min_slave_ram)
        # https://bugs.launchpad.net/fuel/newton/+bug/1584190
        # remove should fail, when bug will be fixed
        if vsrx_setup_result and check_ram_result:
            self.fuel_web.run_ostf(
                cluster_id=self.cluster_id,
                test_sets=['smoke', 'sanity', 'ha', 'tests_platform'],
                timeout=settings.OSTF_RUN_TIMEOUT,
                should_fail=1,
                failed_test_name=['Check stack autoscaling'])
        elif vsrx_setup_result:
            logger.warning('Ostf tests will be run without platform tests')
            self.fuel_web.run_ostf(
                cluster_id=self.cluster_id,
                test_sets=['smoke', 'sanity', 'ha'],
                timeout=settings.OSTF_RUN_TIMEOUT)
    def contrail_sahara(self):
        """Check Contrail deploy with sahara.

        Scenario:
            1. Create an environment with "Neutron with tunneling
               segmentation" as a network configuration and CEPH storage
            2. Enable sahara
            3. Enable and configure Contrail plugin
            4. Add a node with controller role
            5. Add 3 nodes with "compute" and "Ceph-OSD" roles
            6. Add a node with contrail-controller role
            7. Add a node with 'contrail-analytics' role
            8. Add a node with 'contrail-analytics-db' role
            9. Deploy cluster with plugin
            10. Run contrail health check tests
            11. Run OSTF tests

        Duration 120 min

        """
        min_slave_ram = 8138

        plugin.show_range(self, 1, 3)
        plugin.prepare_contrail_plugin(self, slaves=9,
                                       options={'images_ceph': True,
                                                'volumes_ceph': True,
                                                'ephemeral_ceph': True,
                                                'objects_ceph': True,
                                                'volumes_lvm': False,
                                                'sahara': True})

        self.show_step(3)
        plugin.activate_plugin(self)
        # activate vSRX image
        vsrx_setup_result = vsrx.activate()

        plugin.show_range(self, 4, 9)
        self.fuel_web.update_nodes(
            self.cluster_id,
            {
                'slave-01': ['controller'],
                'slave-02': ['compute', 'ceph-osd'],
                'slave-03': ['compute', 'ceph-osd'],
                'slave-04': ['compute', 'ceph-osd'],
                'slave-05': ['contrail-controller'],
                'slave-06': ['contrail-analytics-db'],
                'slave-07': ['contrail-analytics']
            })

        self.show_step(9)
        openstack.deploy_cluster(self)
        self.show_step(10)
        TestContrailCheck(self).cloud_check(['contrail'])
        self.show_step(11)
        check_ram_result = openstack.check_slave_memory(min_slave_ram)
        if vsrx_setup_result and check_ram_result:
            self.fuel_web.run_ostf(
                cluster_id=self.cluster_id,
                test_sets=['smoke', 'sanity', 'tests_platform'],
                timeout=settings.OSTF_RUN_TIMEOUT
                )
        elif vsrx_setup_result:
            logger.warning('Ostf tests will be run without platform tests')
            self.fuel_web.run_ostf(
                cluster_id=self.cluster_id,
                test_sets=['smoke', 'sanity'],
                timeout=settings.OSTF_RUN_TIMEOUT
            )
Пример #43
0
    def wrapper(*args, **kwargs):
        result = func(*args, **kwargs)
        if settings.UPDATE_FUEL:
            logger.info("Update fuel's packages from directory {0}.".format(
                settings.UPDATE_FUEL_PATH))
            environment = get_current_env(args)
            if not environment:
                logger.warning("Decorator was triggered "
                               "from unexpected class.")
                return result

            centos_files_count, ubuntu_files_count = \
                environment.admin_actions.upload_packages(
                    local_packages_dir=settings.UPDATE_FUEL_PATH,
                    centos_repo_path=settings.LOCAL_MIRROR_CENTOS,
                    ubuntu_repo_path=settings.LOCAL_MIRROR_UBUNTU)
            if not centos_files_count and not ubuntu_files_count:
                raise ConfigurationException('Nothing to update,'
                                             ' packages to update values is 0')
            cluster_id = environment.fuel_web.get_last_created_cluster()

            if centos_files_count > 0:
                environment.docker_actions.execute_in_containers(
                    cmd='yum -y install yum-plugin-priorities')

                # Update docker containers and restart them
                environment.docker_actions.execute_in_containers(
                    cmd='yum clean expire-cache; yum update -y')
                environment.docker_actions.restart_containers()

                with environment.d_env.get_admin_remote() as remote:
                    # Update packages on master node
                    remote.execute('yum -y install yum-plugin-priorities;'
                                   'yum clean expire-cache; yum update -y')

                # Add auxiliary repository to the cluster attributes
                if settings.OPENSTACK_RELEASE_UBUNTU not in \
                        settings.OPENSTACK_RELEASE:
                    environment.fuel_web.add_local_centos_mirror(
                        cluster_id,
                        path=settings.LOCAL_MIRROR_CENTOS,
                        priority=settings.AUX_RPM_REPO_PRIORITY)

            if ubuntu_files_count > 0:
                # Add auxiliary repository to the cluster attributes
                if settings.OPENSTACK_RELEASE_UBUNTU in \
                        settings.OPENSTACK_RELEASE:
                    environment.fuel_web.add_local_ubuntu_mirror(
                        cluster_id,
                        name="Auxiliary",
                        path=settings.LOCAL_MIRROR_UBUNTU,
                        priority=settings.AUX_DEB_REPO_PRIORITY)
                else:
                    logger.error("{0} .DEB files uploaded but won't be used"
                                 " because of deploying wrong release!".format(
                                     ubuntu_files_count))
            if settings.SYNC_DEPL_TASKS:
                with environment.d_env.get_admin_remote() as remote:
                    remote.execute("fuel release --sync-deployment-tasks"
                                   " --dir /etc/puppet/")
        return result
Пример #44
0
    def check_idempotency(self, deployment):
        """Check task idempotency for corresponding deployment

        :param deployment: a string, name of the deployment kind
        :return: a boolean, all tasks is idempotent - True,
                 some task is not idempotent - False
        """
        idempotent = True
        cluster_id = self.fuel_web.get_last_created_cluster()
        slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)

        result = {'tasks_idempotency': {}, 'timeouterror_tasks': {}}
        pr_ctrl = (self.define_pr_ctrl()
                   if deployment == '3_ctrl_3_cmp_ceph_sahara' else {})
        for node in slave_nodes:
            node_roles = "_".join(sorted(node["roles"]))
            if node.get('name') == pr_ctrl.get('name', None):
                node_roles = 'primary-' + node_roles
            node_ref = "{}_{}".format(node["id"], node_roles)
            fixture = self.load_fixture(deployment, node_roles)

            failed_tasks = {}
            timeouterror_tasks = []

            for task in fixture['tasks']:
                task_name, fixture_task = task.items()[0]

                if fixture_task['type'] != 'puppet':
                    logger.info(
                        'Skip checking of {!r} task,it is not puppet'.format(
                            task_name))
                    continue

                self.fuel_web.execute_task_on_node(task_name, node["id"],
                                                   cluster_id)

                try:
                    report = self.get_puppet_report(node)
                except AssertionError:
                    if not fixture_task.get('no_puppet_run'):
                        msg = ('Unexpected no_puppet_run for task: {!r}'.
                               format(task_name))
                        logger.info(msg)
                        timeouterror_tasks.append(task_name)
                    continue

                skip = fixture_task.get('skip')
                failed = False
                task_resources = []

                for res_name, res_stats in report['resource_statuses'].items():
                    if res_stats['changed'] and res_name not in skip:
                        failed = True
                        msg = ('Non-idempotent task {!r}, resource: {}'.format(
                            task, res_name))
                        logger.error(msg)
                        task_resources.append(res_name)

                if failed:
                    idempotent = False
                    failed_tasks.update({task_name: task_resources})
                else:
                    logger.info(
                        'Task {!r} on node {!r} was executed successfully'.
                        format(task_name, node['id']))

            result['tasks_idempotency'][node_ref] = failed_tasks
            result['timeouterror_tasks'][node_ref] = timeouterror_tasks

        logger.warning('Non-idempotent tasks:\n{}'.format(
            yaml.dump(result, default_flow_style=False)))
        return idempotent