Exemple #1
0
    def compare(self,
                testie: Testie,
                variable_list,
                all_results: Dataset,
                build,
                old_all_results,
                last_build,
                allow_supplementary=True,
                init_done=False) -> Tuple[int, int]:
        """
        Compare two sets of results for the given list of variables and returns the amount of failing test
        :param init_done: True if initialization for current testie is already done (init sections for the testie and its import)
        :param testie: One testie to get the config from
        :param variable_list:
        :param all_results:
        :param build:
        :param old_all_results:
        :param last_build:
        :param allow_supplementary:
        :return: the amount of failed tests (0 means all passed)
        """

        if not old_all_results:
            return 0, 0

        tests_passed = 0
        tests_total = 0
        supp_done = False
        tot_runs = testie.config["n_runs"] + testie.config[
            "n_supplementary_runs"]
        for v in variable_list:
            tests_total += 1
            run = Run(v)
            results_types = all_results.get(run)
            # TODO : some config could implement acceptable range no matter the old value
            if results_types is None or len(results_types) == 0:
                continue

            need_supp = False
            for result_type, result in results_types.items():
                if run in old_all_results and not old_all_results[run] is None:
                    old_result = old_all_results[run].get(result_type, None)
                    if old_result is None:
                        continue

                    ok, diff = self.accept_diff(testie, result, old_result)
                    if not ok and len(
                            result) < tot_runs and allow_supplementary:
                        need_supp = True
                        break
                elif last_build:
                    if not testie.options.quiet_regression:
                        print("No old values for %s for version %s." %
                              (run, last_build.version))
                    if old_all_results:
                        old_all_results[run] = {}

            if need_supp:
                if not testie.options.quiet_regression:
                    print(
                        "Difference of %.2f%% is outside acceptable margin for %s. Running supplementary tests..."
                        % (diff * 100, run.format_variables()))

                if not init_done:
                    testie.do_init_all(build=build,
                                       options=testie.options,
                                       do_test=testie.options.do_test)
                    init_done = True
                if hasattr(testie, 'late_variables'):
                    v = testie.late_variables.execute(v, testie)
                new_results_types, output, err = testie.execute(
                    build,
                    run,
                    v,
                    n_runs=testie.config["n_supplementary_runs"],
                    allowed_types={SectionScript.TYPE_SCRIPT})

                for result_type, results in new_results_types.items():
                    results_types[result_type] += results

                if not testie.options.quiet_regression:
                    print("Result after supplementary tests done :",
                          results_types)

                if new_results_types is not None:
                    supp_done = True
                    all_results[run] = results_types
                    for result_type, result in results_types.items():
                        old_result = old_all_results[run].get(
                            result_type, None)
                        if old_result is None:
                            continue
                        ok, diff = self.accept_diff(testie, result, old_result)
                        if ok is False:
                            break
                else:
                    ok = True

            if len(results_types) > 0:
                if not ok:
                    print(
                        "ERROR: Test %s is outside acceptable margin between %s and %s : difference of %.2f%% !"
                        % (testie.filename, build.version, last_build.version,
                           diff * 100))
                else:
                    tests_passed += 1
                    if not testie.options.quiet_regression:
                        print("Acceptable difference of %.2f%% for %s" %
                              ((diff * 100), run.format_variables()))

        if supp_done:
            build.writeversion(testie, all_results)
        return tests_passed, tests_total
Exemple #2
0
    def execute_all(
            self,
            build,
            options,
            prev_results: Dataset = None,
            do_test=True,
            allowed_types=SectionScript.ALL_TYPES_SET) -> Tuple[Dataset, bool]:
        """Execute script for all variables combinations. All tools reliy on this function for execution of the testie
        :param allowed_types:Tyeps of scripts allowed to run. Set with either init, scripts or both
        :param do_test: Actually run the tests
        :param options: NPF options object
        :param build: A build object
        :param prev_results: Previous set of result for the same build to update or retrieve
        :return: Dataset(Dict of variables as key and arrays of results as value)
        """

        init_done = False

        if not SectionScript.TYPE_SCRIPT in allowed_types:
            # If scripts is not in allowed_types, we have to run the init by force now
            self.do_init_all(build,
                             options,
                             do_test=do_test,
                             allowed_types=allowed_types)
            return {}, True

        all_results = {}
        for variables in self.variables:
            run = Run(variables)
            if hasattr(self, 'late_variables'):
                variables = self.late_variables.execute(variables, self)
            r_status, r_out, r_err = self.test_require(variables, build)
            if not r_status:
                if not self.options.quiet:
                    print("Requirement not met for %s" %
                          run.format_variables(self.config["var_hide"]))
                    if r_out.strip():
                        print(output.strip())
                    if r_err.strip():
                        print(err.strip())

                continue

            if prev_results and prev_results is not None and not options.force_test:
                run_results = prev_results.get(run, {})
                if run_results is None:
                    run_results = {}
            else:
                run_results = {}

            if not run_results and options.use_last:
                for version in build.repo.method.get_history(build.version):
                    oldb = Build(build.repo, version)
                    r = oldb.load_results(self)
                    if r and run in r:
                        run_results = r[run]
                        break

            for result_type in self.config['results_expect']:
                if result_type not in run_results:
                    run_results = {}

            have_new_results = False

            n_runs = self.config["n_runs"] - (
                0 if options.force_test or len(run_results) == 0 else min([
                    len(results)
                    for result_type, results in run_results.items()
                ]))
            if n_runs > 0 and do_test:
                if not init_done:
                    self.do_init_all(build,
                                     options,
                                     do_test,
                                     allowed_types=allowed_types)
                    init_done = True
                if not self.options.quiet:
                    print(run.format_variables(self.config["var_hide"]))

                new_results, output, err = self.execute(
                    build,
                    run,
                    variables,
                    n_runs,
                    n_retry=self.config["n_retry"],
                    allowed_types={SectionScript.TYPE_SCRIPT})
                if new_results:
                    if self.options.show_full:
                        print("stdout:")
                        print(output)
                        print("stderr:")
                        print(err)
                    for k, v in new_results.items():
                        run_results.setdefault(k, []).extend(v)
                        have_new_results = True
            else:
                if not self.options.quiet:
                    print(run.format_variables(self.config["var_hide"]))

            if len(run_results) > 0:
                if not self.options.quiet:
                    if len(run_results) == 1:
                        print(list(run_results.values())[0])
                    else:
                        print(run_results)
                all_results[run] = run_results
            else:
                all_results[run] = {}

            # Save results
            if all_results and have_new_results:
                if prev_results:
                    prev_results[run] = all_results[run]
                    build.writeversion(self, prev_results)
                else:
                    build.writeversion(self, all_results)

        return all_results, init_done