def run_tests(handler, config, res_printer):
    test_data = split_tsv_file(
        os.path.join(config.test_data_dir, config.test_data_file))
    handler.parse_signature(test_data[0])

    test_nr = 0
    tests_passed = 0
    total_tests = len(test_data) - 1
    durations = []
    result = TestResult.FAILED

    for test_case in test_data[1:]:
        test_nr += 1

        # Since the last field of test_data is test_explanation, which is not
        # used for running test, we extract that here.
        test_explanation = test_case.pop()

        test_timer = None
        test_failure = TestFailure()

        try:
            test_timer = handler.run_test(config.timeout_seconds, test_case)
            result = TestResult.PASSED
            tests_passed += 1
            durations.append(test_timer.get_microseconds())
        except TestFailure as exc:
            result = TestResult.FAILED
            test_failure = exc
        except TimeoutException as exc:
            result = TestResult.TIMEOUT
            test_timer = exc.get_timer()
        except RecursionError:
            result = TestResult.STACK_OVERFLOW
        except RuntimeError:
            raise
        except Exception as exc:
            result = TestResult.UNKNOWN_EXCEPTION
            test_failure = TestFailure(exc.__class__.__name__).with_property(
                PropertyName.EXCEPTION_MESSAGE, str(exc))

        print_test_info(result, test_nr, total_tests,
                        test_failure.get_description(), test_timer)

        if result != TestResult.PASSED:
            if config.verbose:
                if not handler.expected_is_void():
                    test_case.pop()
                if test_explanation not in ('', 'TODO'):
                    test_failure.with_property(PropertyName.EXPLANATION,
                                               test_explanation)
                print_failed_test(handler.param_names(), test_case,
                                  test_failure, res_printer)
            tests_not_passed = test_nr - tests_passed
            if tests_not_passed >= config.num_failed_tests_before_stop:
                break

    def update_test_passed(test_file, tests_passed):
        problem_mapping_file_path = get_file_path_in_judge_dir(
            'problem_mapping.js')
        JS_BEGIN_PATTERN, JS_END_PATTERN = 'run(', ');'
        with open(problem_mapping_file_path) as problem_mapping_file:
            chapter_to_problem_to_language_solution_mapping = json.loads(
                problem_mapping_file.read().replace(JS_BEGIN_PATTERN,
                                                    '').replace(
                                                        JS_END_PATTERN, ''))

        test_file = 'Python: ' + test_file
        for chapter in chapter_to_problem_to_language_solution_mapping.values(
        ):
            for _, language_solution_mapping in chapter.items():
                if test_file in language_solution_mapping:
                    language_solution_mapping[test_file][
                        'passed'] = tests_passed
                    with open(problem_mapping_file_path,
                              'w') as problem_mapping_file:
                        problem_mapping_file.write(JS_BEGIN_PATTERN)
                        json.dump(
                            chapter_to_problem_to_language_solution_mapping,
                            problem_mapping_file,
                            indent=4)
                        problem_mapping_file.write(JS_END_PATTERN)
                    return

    if config.update_js:
        update_test_passed(config.test_file, tests_passed)

    print()

    if config.verbose:
        print_post_run_stats(tests_passed, total_tests, durations)
    return result
def run_tests(handler, config, res_printer):
    test_data = split_tsv_file(
        os.path.join(config.test_data_dir, config.test_data_file))
    handler.parse_signature(test_data[0])

    metric_names = config.metric_names_override(handler.metric_names())

    test_nr = 0
    tests_passed = 0
    total_tests = len(test_data) - 1
    metrics = []
    durations = []
    result = TestResult.FAILED

    for test_case in test_data[1:]:
        test_nr += 1

        # Since the last field of test_data is test_explanation, which is not
        # used for running test, we extract that here.
        test_explanation = test_case.pop()

        test_output = TestOutput()
        test_failure = TestFailure()

        try:
            test_output = handler.run_test(config.timeout_seconds,
                                           config.metrics_override, test_case)
            result = TestResult.PASSED
            tests_passed += 1
            metrics.append(test_output.metrics)
            durations.append(test_output.timer.get_microseconds())
        except TestFailure as exc:
            result = TestResult.FAILED
            test_failure = exc
        except TimeoutException as exc:
            result = TestResult.TIMEOUT
            test_output.timer = exc.get_timer()
        except RecursionError:
            result = TestResult.STACK_OVERFLOW
        except RuntimeError:
            raise
        except Exception as exc:
            result = TestResult.UNKNOWN_EXCEPTION
            test_failure = TestFailure(exc.__class__.__name__).with_property(
                PropertyName.EXCEPTION_MESSAGE, str(exc))

        print_test_info(result, test_nr, total_tests,
                        test_failure.get_description(), test_output.timer)

        if result != TestResult.PASSED:
            if not handler.expected_is_void():
                test_case.pop()
            if test_explanation not in ('', 'TODO'):
                test_failure.with_property(PropertyName.EXPLANATION,
                                           test_explanation)
            print_failed_test(handler.param_names(), test_case, test_failure,
                              res_printer)
            tests_not_passed = test_nr - tests_passed
            if tests_not_passed >= config.num_failed_tests_before_stop:
                break

    if config.update_js:
        update_test_passed(config.test_file, tests_passed)

    print()

    if durations:
        complexity = ''
        if metric_names and metrics and config.analyze_complexity:
            show_complexity_notification()

        print_post_run_stats(tests_passed, total_tests, complexity, durations)
    return result
示例#3
0
def run_tests(test_data_path, handler, timeout, stop_on_error, res_printer):
    test_data = split_tsv_file(test_data_path)

    handler.parse_signature(test_data[0])

    param_names = handler.param_names()
    first_test_idx = 1
    test_nr = 0
    total_tests = len(test_data) - first_test_idx
    tests_passed = 0
    durations = []

    for test_case in test_data[first_test_idx:]:
        test_nr += 1

        test_explanation = test_case.pop()

        result = TestResult.FAILED
        test_output = None
        diagnostic = ''

        try:
            if timeout != 0:
                with concurrent.futures.ThreadPoolExecutor(
                        max_workers=1) as executor:
                    future = executor.submit(handler.run_test, test_case)
                    test_output = future.result(timeout=timeout)
                    result = TestResult.PASSED if test_output.comparison_result \
                        else TestResult.FAILED
            else:
                test_output = handler.run_test(test_case)
                result = TestResult.PASSED if test_output.comparison_result \
                    else TestResult.FAILED
        except TestFailureException as exc:
            result = TestResult.FAILED
            diagnostic = str(exc)
        except concurrent.futures.TimeoutError:
            result = TestResult.TIMEOUT
        except RecursionError:
            result = TestResult.STACK_OVERFLOW
        except RuntimeError:
            raise
        except Exception as exc:
            result = TestResult.UNKNOWN_EXCEPTION
            diagnostic = exc.__class__.__name__ + ': ' + str(exc)

        if test_output is None:
            test_output = TestOutput(False, TestTimer())
            # Append expected value if execution ended due to an exception
            if not handler.expected_is_void():
                test_output.expected = test_case[-1]

        print_test_info(result, test_nr, total_tests, diagnostic,
                        test_output.timer)
        tests_passed += 1 if result == TestResult.PASSED else 0
        if test_output.timer.has_valid_result():
            durations.append(test_output.timer.get_microseconds())

        if result != TestResult.PASSED and stop_on_error:
            if not handler.expected_is_void():
                test_case = test_case[:-1]
            print_failed_test(param_names, test_case, test_output,
                              test_explanation, res_printer)
            break

    print()
    if stop_on_error:
        if len(durations):
            print("Average running time: {}".format(
                duration_to_string(statistics.mean(durations))))
            print("Median running time:  {}".format(
                duration_to_string(statistics.median(durations))))
        if tests_passed < total_tests:
            print("*** You've passed {}/{} tests. ***".format(
                tests_passed, total_tests))
        else:
            print("*** You've passed ALL tests. Congratulations! ***")