コード例 #1
0
ファイル: runner.py プロジェクト: AlBartash/mcv-consoler
 def run_individual_task(self, task, *args, **kwargs):
     LOG.info("-" * 60)
     LOG.info("Starting task %s" % task)
     try:
         test_time = kwargs['db'][kwargs['tool_name']][task]
     except KeyError:
         LOG.info("You must update the database time tests. "
                  "There is no time for %s", task)
     else:
         exp_time = utils.seconds_to_humantime(test_time)
         LOG.info("Expected time to complete the test: %s ", exp_time)
     self._run_ostf_on_docker(task)
     LOG.info("-" * 60)
コード例 #2
0
ファイル: runner.py プロジェクト: AlBartash/mcv-consoler
    def run_batch(self, tasks, *args, **kwargs):
        with self.store('rally.log', 'tempest.log'):
            tool_name = kwargs["tool_name"]
            all_time = kwargs["all_time"]
            elapsed_time = kwargs["elapsed_time"]

            # Note (ayasakov): the database execution time of each test.
            # In the first run for each test tool calculate the multiplier,
            # which shows the difference of execution time between testing
            # on our cloud and the current cloud.

            db = kwargs.get('db')
            first_run = True
            multiplier = 1.0
            test_time = 0
            all_time -= elapsed_time

            self.create_cirros_image()
            self._setup_rally_on_docker()

            # NOTE(ogrytsenko): only test-suites are discoverable for tempest
            if not kwargs.get('run_by_name'):
                cid = self.container_id
                tasks, missing = self.discovery(cid).match(tasks)
                self.test_not_found.extend(missing)

            t = []
            tempest_task_results_details = {}
            LOG.info("Time start: %s UTC\n" % str(datetime.datetime.utcnow()))
            for task in tasks:
                LOG.info("-" * 60)
                task = task.replace(' ', '')
                if kwargs.get('event').is_set():
                    LOG.info("Keyboard interrupt. Set %s won't start" % task)
                    break
                time_start = datetime.datetime.utcnow()
                LOG.info('Running %s tempest set' % task)

                LOG.debug("Time start: %s UTC" % str(time_start))
                if not CONF.times.update:
                    try:
                        test_time = db[tool_name][task]
                    except KeyError:
                        test_time = 0

                    exp_time = utils.seconds_to_humantime(test_time *
                                                          multiplier)
                    msg = "Expected time to complete %s: %s"
                    if not test_time:
                        LOG.debug(msg, task, exp_time)
                    else:
                        LOG.info(msg, task, exp_time)

                self.run_individual_task(task, *args, **kwargs)

                time_end = datetime.datetime.utcnow()
                time = time_end - time_start
                LOG.debug("Time end: %s UTC" % str(time_end))

                if CONF.times.update:
                    if tool_name in db.keys():
                        db[tool_name].update({task: time.seconds})
                    else:
                        db.update({tool_name: {task: time.seconds}})
                else:
                    if first_run:
                        first_run = False
                        if test_time:
                            multiplier = float(time.seconds) / float(test_time)
                    all_time -= test_time
                    persent = 1.0
                    if kwargs["all_time"]:
                        persent -= float(all_time) / float(kwargs["all_time"])
                    persent = int(persent * 100)
                    persent = 100 if persent > 100 else persent

                    line = 'Completed %s' % persent + '%'
                    time_str = utils.seconds_to_humantime(all_time *
                                                          multiplier)
                    if all_time and multiplier:
                        line += ' and remaining time %s' % time_str

                    LOG.info(line)
                    LOG.info("-" * 60)

                t.append(self.task['test_cases'].keys())

                tempest_task_results_details[task] = {
                    # overall number of tests in suit
                    "tests": self.task.get("tests", 0),
                    "test_succeed": self.task.get("success", 0),
                    "test_failed": self.task.get("failures", 0),
                    "test_skipped": self.task.get("skipped", 0),
                    "expected_failures": self.task.get("expected_failures", 0)
                }
                if self.failed_cases > CONF.tempest.max_failed_tests:
                    LOG.info('*LIMIT OF FAILED TESTS EXCEEDED! STOP RUNNING.*')
                    self.failure_indicator = \
                        TempestError.FAILED_TEST_LIMIT_EXCESS
                    break

            if CONF.times.update:
                with open(TIMES_DB_PATH, "w") as f:
                    json.dump(db, f)

            LOG.info("\nTime end: %s UTC" % str(datetime.datetime.utcnow()))
            self.cleanup_toolbox()
            self.cleanup_cirros_image()
            return {"test_failures": self.test_failures,
                    "test_success": self.test_success,
                    "test_not_found": self.test_not_found,
                    "time_of_tests": self.time_of_tests,
                    "tempest_tests_details": tempest_task_results_details,
                    }
コード例 #3
0
ファイル: consoler.py プロジェクト: AlBartash/mcv-consoler
    def _exec_tests(self, test_plan, **kwargs):
        elapsed_time_by_group = dict()
        dispatch_result = {}

        with open(TIMES_DB_PATH, 'r') as fd:
            db = json.load(fd)

        if not CONF.times.update:
            for key in test_plan.keys():
                batch = [x for x in (''.join(test_plan[key].split()
                                             ).split(',')) if x]
                elapsed_time_by_group[key] = self.all_time
                for test in batch:
                    test = test.replace(' ', '')
                    try:
                        self.all_time += db[key][test]
                    except KeyError:
                        LOG.info("You must update the database time tests. "
                                 "There is no time for %s", test)

            msg = "Expected time to complete all the tests: %s\n"
            time_str = utils.seconds_to_humantime(self.all_time)
            if self.all_time == 0:
                LOG.debug(msg, time_str)
            else:
                LOG.info(msg, time_str)

        for key in test_plan:
            if self.ctx.terminate_event.is_set():
                LOG.info("Catch Keyboard interrupt. "
                         "No more tests will be launched")
                break
            if CONF.times.update:
                elapsed_time_by_group[key] = 0
                # FIXME(dbogun): rewrite timesdb implementation
                # We must reload timesdb because plugin (probably) update id
                with open(TIMES_DB_PATH, 'rt') as fd:
                    db = json.load(fd)

            dispatch_result[key] = {}
            try:
                spawn_point = os.path.dirname(__file__)
                path_to_runner = os.path.join(spawn_point, self.plugin_dir,
                                              key, "runner.py")
                module = imp.load_source("runner" + key, path_to_runner)
            except IOError as e:
                LOG.debug("Looks like there is no such runner: %s.", key)
                dispatch_result[key]['major_crash'] = 1
                LOG.error("The following exception has been caught: %s", e)
                LOG.debug(traceback.format_exc())
                self.failure_indicator = CAError.RUNNER_LOAD_ERROR
            except Exception:
                dispatch_result[key]['major_crash'] = 1
                LOG.error("Something went wrong. "
                          "Please check mcvconsoler logs")
                LOG.debug(traceback.format_exc())
                self.failure_indicator = CAError.RUNNER_LOAD_ERROR
            else:
                path = os.path.join(self.results_dir, key)
                os.mkdir(path)

                factory = getattr(module, CONF[key]['runner'])
                runner = factory(context.Context(
                    self.ctx,
                    work_dir=utils.WorkDir(
                        path, parent=self.ctx.work_dir_global)))

                batch = test_plan[key]
                if isinstance(batch, basestring):
                    batch = [batch]

                LOG.debug("Running {batch} for {key}"
                          .format(batch=len(batch),
                                  key=key))

                try:
                    run_failures = runner.run_batch(
                        batch,
                        compute=1,
                        event=self.ctx.terminate_event,
                        scenario=self.scenario,
                        tool_name=key,
                        db=db,
                        all_time=self.all_time,
                        elapsed_time=elapsed_time_by_group[key],
                        **kwargs
                    )

                    if len(run_failures['test_failures']) > 0:
                        if self.failure_indicator == CAError.NO_ERROR:
                            self.failure_indicator = runner.failure_indicator
                        else:
                            self.failure_indicator = \
                                ComplexError.SOME_SUITES_FAILED
                except subprocess.CalledProcessError as e:
                    if e.returncode == 127:
                        LOG.debug(("It looks like you are trying to use a "
                                   "wrong runner. No tests will be run in "
                                   "this group this time. Reply %s"), e)
                        self.failure_indicator = CAError.WRONG_RUNNER
                except Exception:
                    self.failure_indicator = CAError.UNKNOWN_EXCEPTION
                    LOG.error("Something went wrong. "
                              "Please check mcvconsoler logs")
                    LOG.debug(traceback.format_exc())
                else:
                    dispatch_result[key]['results'] = run_failures
                    dispatch_result[key]['batch'] = batch

        return dispatch_result
コード例 #4
0
ファイル: runner.py プロジェクト: AlBartash/mcv-consoler
    def run_batch(self, tasks, *args, **kwargs):
        """Runs a bunch of tasks."""

        tool_name = kwargs["tool_name"]
        all_time = kwargs["all_time"]
        elapsed_time = kwargs["elapsed_time"]
        try:
            max_failed_tests = CONF[tool_name]['max_failed_tests']
        except NoSuchOptError:
            max_failed_tests = config.DEFAULT_FAILED_TEST_LIMIT

        LOG.debug("The following tests will be run:")
        LOG.debug("\n".join(tasks))

        failures = 0

        # Note(ayasakov): the database execution time of each test.
        # In the first run for each test tool calculate the multiplier,
        # which shows the difference of execution time between testing
        # on our cloud and the current cloud.

        db = kwargs.get('db')
        first_run = True
        multiplier = 1.0
        current_time = 0
        all_time -= elapsed_time

        self._validate_test_params(**kwargs)

        for task in tasks:
            LOG.info("-" * 60)
            if kwargs.get('event').is_set():
                LOG.info("Caught keyboard interrupt. "
                         "Task %s won't start", task)
                break
            time_start = datetime.datetime.utcnow()
            LOG.debug("Running task %s", task)
            LOG.debug("Time start: %s UTC", time_start)
            if not CONF.times.update:
                try:
                    current_time = db[tool_name][task]
                except KeyError:
                    current_time = 0

                msg = "Expected time to complete %s: %s"
                t = utils.seconds_to_humantime(current_time * multiplier)
                if not current_time:
                    LOG.debug(msg, task, t)
                else:
                    LOG.info(msg, task, t)

            # FIXME(dbogun): sort out exceptions handling
            try:
                if self.run_individual_task(task, *args, **kwargs):
                    self.test_success.append(task)
                else:
                    failures += 1
            except Exception:
                failures += 1
                LOG.debug(traceback.format_exc())

            time_end = datetime.datetime.utcnow()
            duration = time_end - time_start
            duration = duration.total_seconds()
            LOG.debug("Time end: %s UTC" % str(time_end))

            if CONF.times.update:
                if tool_name in db.keys():
                    db[tool_name].update({task: int(duration)})
                else:
                    db.update({tool_name: {task: int(duration)}})
            else:
                if first_run:
                    first_run = False
                    if current_time:
                        multiplier = duration / current_time
                all_time -= current_time
                percent = 1.0
                if kwargs["all_time"]:
                    percent -= float(all_time) / float(kwargs["all_time"])
                percent = int(percent * 100)
                percent = 100 if percent > 100 else percent

                line = 'Completed {} %'.format(percent)
                if all_time and multiplier:
                    t = utils.seconds_to_humantime(all_time * multiplier)
                    line = '{} and remaining time {}'.format(line, t)
                LOG.info(line)
                LOG.info("-" * 60)

            if failures >= max_failed_tests:
                LOG.info('*LIMIT OF FAILED TESTS EXCEEDED! STOP RUNNING.*')
                self.failure_indicator = self.get_error_code(tool_name)
                break

        if CONF.times.update:
            with open(config.TIMES_DB_PATH, "w") as f:
                json.dump(db, f)

        return {"test_failures": self.test_failures,
                "test_success": self.test_success,
                "test_not_found": self.test_not_found,
                "test_skipped": self.test_skipped,
                "time_of_tests": self.time_of_tests,
                "test_without_report": self.test_without_report}