Пример #1
0
 def runTest(self):
     env_lang = os.environ.get('LANG')
     os.environ['LANG'] = 'C'
     params = self.params
     if params.get("dependency_failed") == 'yes':
         raise exceptions.TestNotFoundError("Test dependency failed")
     try:
         if params.get("security_type") == "bandit":
             self._banditTest()
         else:
             self._syntribosTest()
     # This trick will give better reporting of cloud tests being executed
     # into avocado (skips, warns and errors will display correctly)
     except exceptions.TestNotFoundError, details:
         raise exceptions.TestSkipError(details)
Пример #2
0
    def _banditTest(self):
        params = self.params
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

            # Report cloud test version
            # logging.info(version.get_pretty_version_info())
            # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            self.log.info("    %s = %s", key, params[key])
        try:
            mould = __import__(str(params.get("openstack_project_name")))
        except ImportError:
            raise Exception("Not Found The Mould")
        project_path = os.path.split(mould.__file__)[0]
        cmd = 'bandit -r %s' % project_path
        severitymax = int(params.get("severity_max"))
        confidencemax = int(params.get("confidence_max"))
        self.log.info('Try to run command: %s' % cmd)
        try:
            result = process.run(cmd, ignore_status=True, shell=True, verbose=True)
            self.log.info("[Bandit output] %s" % result.stdout)
            self._log_detailed_cmd_info(result)
            pattern = "Total([\s\S]*)"
            matched_result = re.findall(pattern, result.stdout)
            tmp1 = matched_result[0]
            pat = "High:(.*)"
            tmp2 = re.findall(pat, tmp1)
            severity = tmp2[0]
            confidence = tmp2[1]
            self.log.info("The severity High is :%s" % severity)
            self.log.info("The confidence High is :%s" % confidence)
            if int(severity) > severitymax:
                self.log.info("The naumber of severity High is: %s" % severity)
                raise exceptions.TestFail("The number of severity High is greater"
                                          " than allowable value ")
            elif int(confidence) > confidencemax:
                self.log.info("The naumber of confidence High is: %s" % confidence)
                raise exceptions.TestFail("The number of confidence High is greater"
                                          " than allowable value ")
            self.log.info("Scan Completed")
        except Exception:
            self.log.debug("Exception happended during runing test")
            raise
Пример #3
0
    def test_lun_ops(self):
        ops = self.params.get('lun_ops')
        if ops in 'add':
            if not self._lun_ops('add'):
                raise exceptions.TestFail('Test of add lun failed')

        elif ops in 'delete':
            if not self.lun_id:
                self.lun_id = self.env['iscsi_target_lun']['lun_id']
            if not self._lun_ops('delete'):
                raise exceptions.TestFail('Test of delete lun failed')

        elif ops in 'get_lun_info':
            resp = self.client.get_lun_info()
            if not len(resp) > 0:
                raise exceptions.TestFail("Test of get_lun_info failed")
        else:
            raise exceptions.TestNotFoundError(
                'Did not find test for operation')
Пример #4
0
    def _bind_account_operation(self, account_ops, account_group_id,
                                target_id):
        """
        Test to bind or unbind account group to target
        """
        LOG.info("Try to %s account %s to target %s" %
                 (account_ops, account_group_id, target_id))

        if 'unbind' in account_ops:
            resp = self.iscsi_client.unbind_account(target_id,
                                                    account_group_id)
            if not len(resp) > 0:
                raise exceptions.TestFail("Unbind account group '%s' failed" %
                                          account_group_id)
        elif 'bind' in account_ops:
            resp = self.iscsi_client.bind_account(target_id, account_group_id)
            if not len(resp) > 0:
                raise exceptions.TestFail("Bind account group '%s' failed" %
                                          account_group_id)
        else:
            raise exceptions.TestNotFoundError('Did not find test for bind '
                                               'account operation')
Пример #5
0
    def test_bind_account_operation(self):
        """
        Test to bind or unbind account group to target
        """
        account_ops = self.params.get('account_operation')
        target_id = self.env.get('iscsi_target')['target_id']
        account_group_id = self.env.get('iscsi_accountgroup')['group_id']

        LOG.info("Try to %s account %s to target %s" %
                 (account_ops, account_group_id, target_id))

        if 'unbind' in account_ops:
            resp = self.client.unbind_account(target_id, account_group_id)
            if not len(resp) > 0:
                raise exceptions.TestFail("Unbind account group '%s' failed" %
                                          account_group_id)
        elif 'bind' in account_ops:
            resp = self.client.bind_account(target_id, account_group_id)
            if not len(resp) > 0:
                raise exceptions.TestFail("Bind account group '%s' failed" %
                                          account_group_id)
        else:
            raise exceptions.TestNotFoundError('Did not find test for bind '
                                               'account operation')
Пример #6
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            self.log.info("    %s = %s", key, params[key])

        ips_list = []
        host_list = params.get("host_ips").split(",")
        for item in host_list:
            if item.find("/") != -1:
                for ip_info in ipaddr.IPv4Network(item):
                    ips_list.append(str(ip_info))
            elif item.find("-") != -1:
                begin_ip, end_ip = item.split("-")
                ip_ranges = ipaddr.summarize_address_range(
                    ipaddr.IPv4Address(begin_ip), ipaddr.IPv4Address(end_ip))
                for ip_range in ip_ranges:
                    for ip_info in ipaddr.IPv4Network(str(ip_range)):
                        ips_list.append(str(ip_info))
            else:
                ips_list.append(item)

        ips_list = sorted(set(ips_list), key=ips_list.index)
        self.log.debug("all health check ip list:")
        self.log.debug(ips_list)

        test_passed = True
        health_check_result = {}

        try:
            try:
                try:
                    for host_ip in ips_list:
                        try:
                            health_check = \
                                HealthCheck(host_ip, params,
                                            is_raise_health_check_excp=True)

                            health_check.get_health_status()
                            health_check_result[host_ip] = True
                        except exceptions.TestError:
                            health_check_result[host_ip] = False
                        except exceptions.TestFail:
                            health_check_result[host_ip] = False
                    for key in health_check_result.keys():
                        result = health_check_result[key]
                        if not result:
                            self.log.error("host %s health check failed" % key)
                            test_passed = False
                        else:
                            self.log.info("host %s health check passed" % key)

                    self.verify_background_errors()

                    if not test_passed:
                        raise exceptions.TestFail("health check failed")

                except Exception:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    self.log.debug("Exception happened during running test")
                    raise

            finally:
                pass
                #     if self.__safe_env_save(env):
                #         env.destroy()   # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Пример #7
0
    def _syntribosTest(self):
        params = self.params
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

            # Report cloud test version
            # logging.info(version.get_pretty_version_info())
            # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            self.log.info("    %s = %s", key, params[key])

        utils_misc.set_openstack_environment()
        process.run('syntribos init --force', shell=True)

        syntribos_dir = os.path.join(self.logdir, 'syntribos')
        os.mkdir(syntribos_dir)
        syntribos_config_file = os.path.join(syntribos_dir, 'syntribos.conf')

        conf1 = ConfigParser.ConfigParser()
        conf1.read(syntribos_config_file)
        conf1.add_section("syntribos")
        conf1.add_section("user")
        conf1.add_section("auth")
        conf1.add_section('logging')

        auth_url = params.get("OS_AUTH_URL")
        endpoint = '//'.join([i for i in auth_url.split('/') if ':' in i])

        conf1.set("syntribos", "endpoint",
                      self.__get_endpoint(params.get('project_name')))

        conf1.set("user", "endpoint", endpoint)
        conf1.set("user", "username", params.get('OS_USERNAME'))
        conf1.set("user", "password", params.get('OS_PASSWORD'))
        conf1.set("user", "domain_name", params.get('OS_DOMAIN_NAME', 'Default'))
        conf1.set("user", "project_name", params.get('OS_TENANT_NAME'))
        conf1.set("auth", "endpoint", auth_url)
        conf1.set("logging", "log_dir", self.logdir)

        try:
            syntribos_file = open(syntribos_config_file, "w")
            conf1.write(syntribos_file)
            syntribos_file.close()
        except IOError:
            raise exceptions.TestError("Failed to generate config file")

        with open(syntribos_config_file, 'r') as f:
            content = f.read()
            self.log.info("Syntribos config:\n %s" % content)

        cmd = 'syntribos --config-file %s --syntribos-custom_root %s run' % (
                  syntribos_config_file, syntribos_dir)
        failure_count = 0
        error_count = 0

        try:
            self.log.info('Try to run command: %s' % cmd)
            sub = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
            result = ""
            while True:
                line = sub.stdout.readline()
                if line.strip() != '':
                    self.log.info("[Syntribos output] %s" % line.strip('\n'))
                    result += '\n' + line
                if line == '' and sub.poll() is not None:
                    break

            pat1 = "%  :(.+?)Failure"
            pat2 = ",(.+?)Error"
            failures = re.findall(pat1, result)
            errors = re.findall(pat2, result)

            for failure in failures:
                if int(failure) > 0:
                    failure_count += int(failure)

            for error in errors:
                if int(error) > 0:
                    error_count += int(error)

            self.log.info('=================')
            self.log.info('Total Failure: %d' % failure_count)
            self.log.info('Total Error: %d' % error_count)
            if failure_count > 0:
                raise exceptions.TestFail("There are yntribos test failures")
        except Exception:
            self.log.debug("Exception happended during runing syntribos test")
            raise
        finally:
            syntribos_file = open(syntribos_config_file, "w")
            syntribos_file.truncate()
            syntribos_file.close()
            self.log.info("Test Completed")
Пример #8
0
 def runTest(self):
     e_msg = ('Test %s could not be found in the test dir %s '
              '(or test path does not exist)' %
              (self.name, data_dir.get_test_dir()))
     raise exceptions.TestNotFoundError(e_msg)
Пример #9
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        utils_misc.set_openstack_environment()

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            if key != 'test_cases':
                self.log.info("    %s = %s", key, params[key])

        self.ct_type = self.params.get('ct_type')
        test_script = self.params.get('script')
        class_name = self.params.get('class_name')
        # Import the module
        mod_name = 'cloudtest.tests.nfv.%s' % test_script
        test_module = importlib.import_module(mod_name)

        for _, obj in inspect.getmembers(test_module):
            if (inspect.isclass(obj) and obj.__name__ == class_name
                    and inspect.getmodule(obj) == test_module):
                test_class = obj
                break
        self.log.info("Initialize test class: %s" % class_name)

        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({
            "func_at_exit": utils_env.cleanup_env,
            "args": (env_filename, self.env_version),
            "once": True
        })

        comp = test_class(params, env)

        test_passed = False
        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    quotas_to_update = params.get('quotas_need_to_update', '')
                    if quotas_to_update:
                        self.log.info('Need to update quotas before test: %s' %
                                      quotas_to_update)
                        compute_utils = Compute(self.params)
                        quotas_to_update = quotas_to_update.split(',')
                        expected_quota = dict()
                        for q in quotas_to_update:
                            k, v = q.split(':')
                            expected_quota[k] = v
                        compute_utils.update_quotas(expected_quota)

                    # Initialize injection tests including workload and fault
                    need_injection = (
                        self.params.get('workload_injection') in 'true'
                        or self.params.get('fault_injection') in 'true')

                    force_injection = (self.params.get(
                        'workload_injection_force',
                        'false') in 'true' or self.params.get(
                            'fault_injection_force', 'false') in 'true')

                    if need_injection:
                        injector = utils_injection.Injection(params, env)
                        if not injector:
                            self.log.error("Failed to initialize injection")
                            if force_injection:
                                raise Exception("Failed to inject"
                                                "workload and/or fault")

                        if not injector.start() and force_injection:
                            msg = "Failed to inject workload/fault"
                            raise exceptions.InjectionFail(msg)
                        # Sleep specified time after injection
                        delay = int(params.get('sleep_after_injection', 3))
                        logging.info("Sleep %d seconds before running test" %
                                     delay)
                        time.sleep(delay)

                    # Run the test function
                    self.log.info(
                        "Start to run NFV test: '%s:%s:%s'" %
                        (test_script, class_name, params.get('func_name')))
                    try:
                        comp.setup()
                        func = getattr(comp, params.get('func_name', 'test'))
                        func()
                    finally:
                        self.__safe_env_save(env)

                except Exception, e:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    stacktrace.log_exc_info(sys.exc_info(),
                                            logger='avocado.test')
                    logging.debug("Exception happened during running test")
                    raise e

            finally:
                try:
                    comp.teardown()
                except Exception, e:
                    self.log.error("Exception happened during teardown: %s" %
                                   e)

                # Postprocess
                try:
                    try:
                        # Stop injection
                        if need_injection:
                            injector.stop()

                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = funcatexit.run_exitfuncs(
                            env, self.ct_type)
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored
                    if params.get('sleep_after_test') is not None:
                        time.sleep(int(params.get('sleep_after_test')))

                    # Perform health check after test if needed
                    health_check = \
                        self.params.get("perform_health_check_after_case")
                    health_check = health_check.lower() == "true"
                    if health_check:
                        self._stop_logging()
                        self._start_logging_hc()
                        cloud_manager = CloudManager(params, env)
                        nodes = cloud_manager.get_controller_node(
                            select_policy="all")
                        nodes.extend(
                            cloud_manager.get_compute_node(
                                select_policy="all"))
                        self._run_health_check(nodes)
                        self._stop_logging_hc()
                        self._start_logging()

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Пример #10
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            if key != 'test_cases':
                self.log.info("    %s = %s", key, params[key])

        self.ct_type = self.params.get('ct_type')
        test_script = self.params.get('script')
        class_name = self.params.get('class_name')
        # Import the module
        mod_name = 'cloudtest.tests.ceph_api.tests.%s.%s' % \
                (params.get('sds_mgmt_test_type'), test_script)
        test_module = importlib.import_module(mod_name)

        for _, obj in inspect.getmembers(test_module):
            if (inspect.isclass(obj) and obj.__name__ == class_name
                    and inspect.getmodule(obj) == test_module):
                test_class = obj
                break
        self.log.info("Initialize test class: %s" % class_name)

        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({
            "func_at_exit": utils_env.cleanup_env,
            "args": (env_filename, self.env_version),
            "once": True
        })
        self.runner_queue.put({"func_at_exit": cleanup_token, "once": True})
        comp = test_class(params, env)

        test_passed = False
        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Run the test function
                    self.log.info("Start to run ceph management API test")
                    try:
                        comp.setup()
                        func = getattr(comp, params.get('func_name', 'test'))
                        # fixme: To solve create monitor hang
                        if 'test_monitors' in test_script and \
                           'test_create' in params.get('func_name'):
                            t1 = threading.Thread(target=func)
                            t1.start()
                            time.sleep(20)
                        else:
                            func()
                    finally:
                        self.__safe_env_save(env)

                except Exception, e:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    stacktrace.log_exc_info(sys.exc_info(),
                                            logger='avocado.test')
                    logging.debug("Exception happened during running test")
                    raise e

            finally:
                comp.teardown()
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = funcatexit.run_exitfuncs(
                            env, self.ct_type)
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
            raise exceptions.JobError("Aborted job as config specified.")
Пример #11
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            self.log.info("    %s = %s", key, params[key])

        if params.get('prepare_resource').lower() == 'true':
            self.log.info("Start to prepare resource for tempest...")
            resource_util = ConfigTempest(params)
            resource_util.set_resources()
            resource_util.prepare_tempest_log()
            resource_util.prepare_images_tempest()
            resource_util.gen_tempest_conf()
            self.log.info("Prepare resource for tempest done...")

        test_passed = False

        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Run the test function
                    test_name = self.params.get("id")
                    smoke_str = "\\[.*\\bsmoke\\b.*\\]"
                    self.log.info("Try to run tempest test: %s" % test_name)
                    cloudtest_dir = os.environ.get("CLOUDTEST_SOURCEDIR")
                    tempest_dir = os.path.join(cloudtest_dir, 'dependencies',
                                               'Tempest', 'tempest')
                    self.log.info("Try to change to tempest dir: %s" %
                                  tempest_dir)
                    os.chdir(tempest_dir)
                    process.run("testr init", ignore_status=True, shell=True)
                    process.run(cmd="find . -name '*.pyc' -delete",
                                ignore_status=True,
                                shell=True)

                    cmd = "testr run --subunit"
                    if test_name != 'tempest' and \
                                    test_name != 'tempest_smoke':
                        # Run module, suite, class or single case
                        cmd += ' %s' % self.name.name

                    if self.params.get('tempest_run_type') in 'smoke':
                        cmd += ' %s' % smoke_str

                    if params.get('tempest_run_mode') == 'parallel':
                        cmd += ' --parallel'

                    self.log.info('Try to run command: %s' % cmd)
                    proc = subprocess.Popen(cmd,
                                            shell=True,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.STDOUT)
                    proc2 = subprocess.Popen('subunit-trace -n -f',
                                             stdin=proc.stdout,
                                             stdout=subprocess.PIPE,
                                             stderr=subprocess.PIPE,
                                             shell=True)
                    test_timeout = params.get('tempest_test_timeout', 1200)
                    end_time = time.time() + int(test_timeout)
                    while time.time() < end_time:
                        line = proc2.stdout.readline()
                        if line.strip() != '':
                            self.log.info("[Tempest run] %s" % line)
                        if line == '' and proc2.poll() is not None:
                            break
                    else:
                        raise exceptions.TestError("Tempest test timed out"
                                                   " after %s seconds" %
                                                   test_timeout)

                    # Rerun failed case when needed
                    if params.get('auto_rerun_on_failure', 'false') == 'true':
                        curr_rerun = 0
                        try:
                            failed_case_file_path = \
                                os.path.join(self.logdir, "failed_cases.list")

                            failed_case_file = open(failed_case_file_path, 'w')
                        except IOError:
                            raise exceptions.TestError(
                                "Failed to create blank "
                                "failed_cases.list file")
                        while curr_rerun < int(params.get('auto_rerun_times')):
                            curr_rerun += 1

                            cmd1 = "testr last --subunit"
                            cmd2 = "subunit-filter -s --xfail"
                            cmd2 += " --with-tag=worker-0"
                            cmd3 = "subunit-ls"
                            cmd = cmd1 + ' | ' + cmd2 + ' | ' + cmd3
                            self.log.debug("Try to get failed cases from last"
                                           " run via command: %s" % cmd)
                            proc = subprocess.Popen(cmd1,
                                                    shell=True,
                                                    stdout=subprocess.PIPE,
                                                    stderr=subprocess.STDOUT)
                            proc1 = subprocess.Popen(cmd2,
                                                     shell=True,
                                                     stdin=proc.stdout,
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.STDOUT)
                            proc2 = subprocess.Popen(
                                cmd3,
                                shell=True,
                                stdin=proc1.stdout,
                                stdout=failed_case_file,
                                stderr=subprocess.STDOUT).wait()
                            self.log.info("Start to #%d round of rerun..." %
                                          curr_rerun)
                            cmd_rerun = "testr run --load-list=%s" % \
                                        failed_case_file_path
                            proc3 = subprocess.Popen(cmd_rerun,
                                                     shell=True,
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.STDOUT)
                            while True:
                                line = proc3.stdout.readline()
                                if line.strip() != '':
                                    self.log.info("[Tempest rerun #%d] %s" %
                                                  (curr_rerun, line))
                                if line == '' and proc3.poll() is not None:
                                    break

                except Exception:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    self.log.debug("Exception happened during running test")
                    raise

            finally:
                # Postprocess
                try:
                    # Generate HTML report of tempest
                    latest_id = 0
                    testrepository_path = os.path.join(tempest_dir,
                                                       '.testrepository')
                    for _, _, files in os.walk(testrepository_path):
                        for stream_file in files:
                            try:
                                last_stream = int(stream_file)
                            except (TypeError, ValueError):
                                last_stream = 0
                            latest_id = max(latest_id, last_stream)
                    self.log.info("The last result stream id: %d" % latest_id)

                    cmd_gen_html = "subunit2html %s/%d %s" % (
                        testrepository_path, latest_id,
                        os.path.join(self.job.logdir, 'tempest_result.html'))
                    self.log.info("Try to generate HTML report for tempest...")
                    process.run(cmd_gen_html, shell=True, ignore_status=False)

                    # Analyze test result
                    result = process.run(
                        'testr last --subunit | subunit-stats',
                        shell=True,
                        ignore_status=False).stdout
                    self.log.info("Tempest result:\n%s" % result)
                    total_num = re.findall('Total tests:.*(\d+)', result)[0]
                    passed_num = re.findall('Passed tests:.*(\d+)', result)[0]
                    skipped_num = \
                        re.findall('Skipped tests:.*(\d+)', result)[0]
                    if int(total_num) != int(passed_num) + int(skipped_num):
                        raise exceptions.TestFail("Tempest result failed")

                    try:
                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)
                finally:
                    pass
                    # if self.__safe_env_save(env):
                    #     env.destroy()   # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Пример #12
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            self.log.info("    %s = %s", key, params[key])

        # Set environment variables for OpenStack
        utils_misc.set_openstack_environment()

        test_passed = False
        vm_reliability_test_dir = os.path.join(data_dir.CLOUDTEST_TEST_DIR,
                                               'vm_reliability_tester')

        self._generate_openrc_py_file(vm_reliability_test_dir)
        self._generate_config_ini_file(vm_reliability_test_dir, False)
        self._generate_config_ini_file(vm_reliability_test_dir, True)
        self._generate_vm_list_csv_file(vm_reliability_test_dir)

        try:
            try:
                try:
                    self.log.info("start to execute vm reliability test")
                    execute_cmd = "python %s" % os.path.join(
                        vm_reliability_test_dir, "vm-reliability-tester.py")
                    process = subprocess.Popen(execute_cmd,
                                               shell=True,
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.STDOUT)
                    while True:
                        line = process.stdout.readline()
                        if line.strip() != '':
                            self.log.info("[vm reliability test run] %s" %
                                          line)
                        if line == '' and process.poll() is not None:
                            break
                    if process.returncode != 0:
                        test_passed = False
                        raise exceptions.TestFail(
                            "vm reliability test failed, return code is %s" %
                            process.returncode)
                    self._collect_csv_result(vm_reliability_test_dir,
                                             self.logdir)
                    algorithm_params = self._get_algorithm_result(
                        vm_reliability_test_dir, "dweibull")
                    self.log.info(algorithm_params)
                    success_rate = self._get_success_rate(
                        vm_reliability_test_dir)
                    if success_rate >= float(params.get('vm_success_rate')):
                        test_passed = True
                    else:
                        raise exceptions.TestFail(
                            "can not reach the success rate threshold")
                    self.verify_background_errors()
                except Exception:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    self.log.debug("Exception happened during running test")
                    raise

            finally:
                pass
                #     if self.__safe_env_save(env):
                #         env.destroy()   # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Пример #13
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        test_passed = False
        t_type = None

        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Run the test function
                    test_name = self.params.get("id")
                    self.log.info("Try to run shaker test: %s" % test_name)
                    working_dir = os.getcwd()
                    shaker_dir = params.get('shaker_path')
                    scenario = params.get('scenario')
                    self.log.info("Try to change to shaker dir: %s" %
                                  shaker_dir)
                    os.chdir(shaker_dir)

                    test_name = test_name.replace('.', '/')
                    s_len = len(test_name)
                    test_name = test_name[len('shaker/scenario_list/'):s_len]
                    report_name = test_name.replace('/', '_')
                    conf_file = params.get('conf_path') + "shaker.conf"
                    output_path = params.get('output_path')
                    test_name = shaker_dir + "/scenarios/" + test_name

                    cmd = "shaker --debug --config-file %s " \
                          "--scenario %s.yaml --output %s%s.json" % \
                          (conf_file, test_name, output_path, report_name)

                    self.log.info('Try to run command: %s' % cmd)
                    process.run(cmd, shell=True, ignore_status=False)

                except Exception:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    self.log.debug("Exception happened during running test")
                    raise

            finally:
                try:
                    cmd_gen_html = "shaker-report --input %s%s.json " \
                                   "--report %s%s.html" % \
                                   (output_path, report_name,
                                    output_path, report_name)
                    self.log.debug("Try to generate HTML report for shaker...")
                    process.run(cmd_gen_html, shell=True, ignore_status=False)
                    cmd = "cat %s%s.json" % (output_path, report_name)
                    result = process.run(cmd, shell=True,
                                         ignore_status=False).stdout
                    pat = ' "status": (.*) '
                    error_info = re.findall(pat, result)[0]
                    if error_info.find('error') != -1:
                        test_passed = False
                    else:
                        test_passed = True

                finally:
                    pass

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Пример #14
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            self.log.info("    %s = %s", key, params[key])

        env_filename = os.path.join(data_dir.get_tmp_dir(),
                                    params.get("env", "env"))
        env = utils_env.Env(env_filename, self.env_version)
        self.runner_queue.put({"func_at_exit": utils_env.cleanup_env,
                               "args": (env_filename, self.env_version),
                               "once": True})

        test_passed = False

        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Run the test function
                    self.log.info("Start to run benchmark test")
                    os.chdir('cloudtest/tests/benchmarker/perfkit/')
                    utils_misc.set_openstack_environment()
                    try:
                        cmd = "python pkb.py --cloud=OpenStack"
                        cmd += ' --benchmarks=%s' % params.get(
                            'benchmarker_name')

                        flavor = params.get('flavor_name', '2-2048-40')
                        cmd += " --machine_type=%s" % flavor

                        net_name = params.get('network_name', 'share_net')
                        cmd += ' --openstack_network=%s' % net_name

                        if params.get('floatingip_pool_name'):
                            cmd += (' --openstack_floating_ip_pool=%s' %
                                    params.get('floatingip_pool_name'))

                        if params.get('volume_size') is not None:
                            cmd += (' --openstack_volume_size=%s' %
                                    params.get('volume_size'))

                        self.log.info("Start running benchmark via command: %s"
                                      % cmd)
                        result = process.run(cmd, shell=True)
                        if result.exit_status != 0:
                            self.log.error(result.stderr)
                        self.log.info(result.stdout)

                    finally:
                        self.__safe_env_save(env)

                except Exception:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    logging.debug("Exception happened during running test")
                    raise

            finally:
                # Postprocess
                try:
                    try:
                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = \
                            funcatexit.run_exitfuncs(env, params.get(
                                'ct_type'))
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error("Exception raised during "
                                       "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)
Пример #15
0
    def _runTest(self):
        params = self.params

        # If a dependency test prior to this test has failed, let's fail
        # it right away as TestNA.
        if params.get("dependency_failed") == 'yes':
            raise exceptions.TestNotFoundError("Test dependency failed")

        # Report cloud test version
        # logging.info(version.get_pretty_version_info())
        # Report the parameters we've received and write them as keyvals
        self.log.info("Test parameters:")
        keys = params.keys()
        keys.sort()
        for key in keys:
            if key != 'test_cases':
                self.log.info("    %s = %s", key, params[key])

        run_func = None
        self.ct_type = self.params.get('ct_type')
        run_func = self._run_performance_test

        # TODO: the environment file is deprecated code, and should be removed
        # in future versions. Right now, it's being created on an Avocado temp
        # dir that is only persisted during the runtime of one job, which is
        # different from the original idea of the environment file (which was
        # persist information accross cloud-test/avocado-ct job runs)
        env_filename = os.path.join("/var/tmp/rally_env")
        env = utils_env.Env(env_filename, self.env_version)
        if self.ct_type == 'stability':
            if ((params.get('prepare_resource').lower() == 'true')
                    and not (env.get_status_for_stability_resources())):
                env.set_status_for_stability_resources("ready")
                resource_util = ConfigStability(params)
                resource_util.create_resources_for_stability(params)

        test_passed = False
        try:
            try:
                try:
                    # Preprocess
                    # try:
                    #     params = env_process.preprocess(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)

                    # Initialize injection tests including workload and fault
                    need_injection = ( \
                            self.params.get('workload_injection') in 'true' or
                            self.params.get('fault_injection') in 'true')

                    force_injection = ( \
                            self.params.get('workload_injection_force', 'false')
                            in 'true' or
                            self.params.get('fault_injection_force', 'false')
                            in 'true')

                    if need_injection:
                        injector = utils_injection.Injection(params, env)
                        if not injector:
                            self.log.error("Failed to initialize injection")
                            if force_injection:
                                raise Exception("Failed to inject"
                                                "workload and/or fault")

                        if not injector.start() and force_injection:
                            msg = "Failed to inject workload/fault"
                            raise exceptions.InjectionFail(msg)
                        # Sleep specified time after injection
                        delay = int(params.get('sleep_after_injection', 3))
                        logging.info("Sleep %d seconds before running test" %
                                     delay)
                        time.sleep(delay)

                    # Run the test function
                    self.log.info("Start to run performance test")
                    try:
                        run_func(params, env)
                    finally:
                        self.__safe_env_save(env)

                except Exception, e:
                    # try:
                    #     env_process.postprocess_on_error(self, params, env)
                    # finally:
                    #     self.__safe_env_save(env)
                    logging.debug("Exception happened during running test")
                    raise e

            finally:

                # Postprocess
                try:
                    try:
                        # Stop injection
                        if need_injection:
                            injector.stop()

                        params['test_passed'] = str(test_passed)
                        # env_process.postprocess(self, params, env)
                        error_message = funcatexit.run_exitfuncs(
                            env, self.ct_type)
                        if error_message:
                            logging.error(error_message)
                    except Exception, e:
                        if test_passed:
                            raise
                        self.log.error(
                            "Exception raised during "
                            "postprocessing: %s", e)

                finally:
                    if self.__safe_env_save(env):
                        env.destroy()  # Force-clean as it can't be stored

        except Exception, e:
            if params.get("abort_on_error") != "yes":
                raise
            # Abort on error
            self.log.info("Aborting job (%s)", e)