Ejemplo n.º 1
0
def run_tests(handler, config, res_printer):
    test_data = split_tsv_file(
        os.path.join(config.test_data_dir, config.test_data_file))
    handler.parse_signature(test_data[0])

    metric_names = config.metric_names_override(handler.metric_names())

    test_nr = 0
    tests_passed = 0
    total_tests = len(test_data) - 1
    metrics = []
    durations = []
    result = TestResult.FAILED

    for test_case in test_data[1:]:
        test_nr += 1

        # Since the last field of test_data is test_explanation, which is not
        # used for running test, we extract that here.
        test_explanation = test_case.pop()

        test_output = TestOutput()
        test_failure = TestFailure()

        try:
            test_output = handler.run_test(config.timeout_seconds,
                                           config.metrics_override, test_case)
            result = TestResult.PASSED
            tests_passed += 1
            metrics.append(test_output.metrics)
            durations.append(test_output.timer.get_microseconds())
        except TestFailure as exc:
            result = TestResult.FAILED
            test_failure = exc
        except TimeoutException as exc:
            result = TestResult.TIMEOUT
            test_output.timer = exc.get_timer()
        except RecursionError:
            result = TestResult.STACK_OVERFLOW
        except RuntimeError:
            raise
        except Exception as exc:
            result = TestResult.UNKNOWN_EXCEPTION
            test_failure = TestFailure(exc.__class__.__name__).with_property(
                PropertyName.EXCEPTION_MESSAGE, str(exc))

        print_test_info(result, test_nr, total_tests,
                        test_failure.get_description(), test_output.timer)

        if result != TestResult.PASSED:
            if not handler.expected_is_void():
                test_case.pop()
            if test_explanation not in ('', 'TODO'):
                test_failure.with_property(PropertyName.EXPLANATION,
                                           test_explanation)
            print_failed_test(handler.param_names(), test_case, test_failure,
                              res_printer)
            tests_not_passed = test_nr - tests_passed
            if tests_not_passed >= config.num_failed_tests_before_stop:
                break

    if config.update_js:
        update_test_passed(config.test_file, tests_passed)

    print()

    if durations:
        complexity = ''
        if metric_names and metrics and config.analyze_complexity:
            show_complexity_notification()

        print_post_run_stats(tests_passed, total_tests, complexity, durations)
    return result
Ejemplo n.º 2
0
def run_tests(handler, config, res_printer):
    test_data = split_tsv_file(
        os.path.join(config.test_data_dir, config.test_data_file))
    handler.parse_signature(test_data[0])

    test_nr = 0
    tests_passed = 0
    total_tests = len(test_data) - 1
    durations = []
    result = TestResult.FAILED

    for test_case in test_data[1:]:
        test_nr += 1

        # Since the last field of test_data is test_explanation, which is not
        # used for running test, we extract that here.
        test_explanation = test_case.pop()

        test_timer = None
        test_failure = TestFailure()

        try:
            test_timer = handler.run_test(config.timeout_seconds, test_case)
            result = TestResult.PASSED
            tests_passed += 1
            durations.append(test_timer.get_microseconds())
        except TestFailure as exc:
            result = TestResult.FAILED
            test_failure = exc
        except TimeoutException as exc:
            result = TestResult.TIMEOUT
            test_timer = exc.get_timer()
        except RecursionError:
            result = TestResult.STACK_OVERFLOW
        except RuntimeError:
            raise
        except Exception as exc:
            result = TestResult.UNKNOWN_EXCEPTION
            test_failure = TestFailure(exc.__class__.__name__).with_property(
                PropertyName.EXCEPTION_MESSAGE, str(exc))

        print_test_info(result, test_nr, total_tests,
                        test_failure.get_description(), test_timer)

        if result != TestResult.PASSED:
            if config.verbose:
                if not handler.expected_is_void():
                    test_case.pop()
                if test_explanation not in ('', 'TODO'):
                    test_failure.with_property(PropertyName.EXPLANATION,
                                               test_explanation)
                print_failed_test(handler.param_names(), test_case,
                                  test_failure, res_printer)
            tests_not_passed = test_nr - tests_passed
            if tests_not_passed >= config.num_failed_tests_before_stop:
                break

    def update_test_passed(test_file, tests_passed):
        problem_mapping_file_path = get_file_path_in_judge_dir(
            'problem_mapping.js')
        JS_BEGIN_PATTERN, JS_END_PATTERN = 'run(', ');'
        with open(problem_mapping_file_path) as problem_mapping_file:
            chapter_to_problem_to_language_solution_mapping = json.loads(
                problem_mapping_file.read().replace(JS_BEGIN_PATTERN,
                                                    '').replace(
                                                        JS_END_PATTERN, ''))

        test_file = 'Python: ' + test_file
        for chapter in chapter_to_problem_to_language_solution_mapping.values(
        ):
            for _, language_solution_mapping in chapter.items():
                if test_file in language_solution_mapping:
                    language_solution_mapping[test_file][
                        'passed'] = tests_passed
                    with open(problem_mapping_file_path,
                              'w') as problem_mapping_file:
                        problem_mapping_file.write(JS_BEGIN_PATTERN)
                        json.dump(
                            chapter_to_problem_to_language_solution_mapping,
                            problem_mapping_file,
                            indent=4)
                        problem_mapping_file.write(JS_END_PATTERN)
                    return

    if config.update_js:
        update_test_passed(config.test_file, tests_passed)

    print()

    if config.verbose:
        print_post_run_stats(tests_passed, total_tests, durations)
    return result