def update_test_passed(test_file, tests_passed): problem_mapping_file_path = get_file_path_in_judge_dir( 'problem_mapping.js') JS_BEGIN_PATTERN, JS_END_PATTERN = 'run(', ');' with open(problem_mapping_file_path) as problem_mapping_file: chapter_to_problem_to_language_solution_mapping = json.loads( problem_mapping_file.read().replace(JS_BEGIN_PATTERN, '').replace( JS_END_PATTERN, '')) test_file = 'Python: ' + test_file for chapter in chapter_to_problem_to_language_solution_mapping.values( ): for _, language_solution_mapping in chapter.items(): if test_file in language_solution_mapping: language_solution_mapping[test_file][ 'passed'] = tests_passed with open(problem_mapping_file_path, 'w') as problem_mapping_file: problem_mapping_file.write(JS_BEGIN_PATTERN) json.dump( chapter_to_problem_to_language_solution_mapping, problem_mapping_file, indent=4) problem_mapping_file.write(JS_END_PATTERN) return
def generic_test_main(test_data_file, test_func, comparator=None, res_printer=None): """ The main test starter. :param test_data_file - file with test data :param test_func - function to be tested :param comparator - custom comparator. A function that accepts (expected, computed result) and returns a boolean value :param res_printer - function for customized printing """ try: with open( get_file_path_in_judge_dir('config.json')) as config_file_data: config_override = json.load(config_file_data) commandline_args = sys.argv[1:] config = TestConfig.from_command_line( test_data_file, config_override['timeoutSeconds'], config_override['numFailedTestsBeforeStop'], commandline_args) set_output_opts(config.tty_mode, config.color_mode) test_handler = GenericTestHandler(test_func, comparator=comparator) return run_tests(test_handler, config, res_printer) except RuntimeError as e: print('\nCritical error({}): {}'.format(e.__class__.__name__, e), file=sys.stderr) return TestResult.RUNTIME_ERROR
def update_test_passed(test_file, tests_passed): problem_mapping_file_path = get_file_path_in_judge_dir( 'problem_mapping.js') js_begin_pattern, js_end_pattern = 'problem_mapping = ', ';' with open(problem_mapping_file_path) as problem_mapping_file: chapter_to_problem_to_language_solution_mapping = json.loads( problem_mapping_file.read().replace(js_begin_pattern, '').replace( js_end_pattern, '')) test_file = 'Python: ' + test_file for chapter in chapter_to_problem_to_language_solution_mapping.values(): for _, language_solution_mapping in chapter.items(): if test_file in language_solution_mapping: language_solution_mapping[test_file]['passed'] = tests_passed with open(problem_mapping_file_path, 'w') as problem_mapping_file: problem_mapping_file.write(js_begin_pattern) json.dump(chapter_to_problem_to_language_solution_mapping, problem_mapping_file, indent=4) problem_mapping_file.write(js_end_pattern) return
def update_test_passed(test_file, tests_passed): problem_mapping_file_path = get_file_path_in_judge_dir( "problem_mapping.js") js_begin_pattern, js_end_pattern = "problem_mapping = ", ";" with open(problem_mapping_file_path) as problem_mapping_file: chapter_to_problem_to_language_solution_mapping = json.loads( problem_mapping_file.read().replace(js_begin_pattern, "").replace( js_end_pattern, "")) test_file = "Python: " + test_file for chapter in chapter_to_problem_to_language_solution_mapping.values(): for _, language_solution_mapping in chapter.items(): if test_file in language_solution_mapping: language_solution_mapping[test_file]["passed"] = tests_passed with open(problem_mapping_file_path, "w") as problem_mapping_file: problem_mapping_file.write(js_begin_pattern) json.dump( chapter_to_problem_to_language_solution_mapping, problem_mapping_file, indent=4, ) problem_mapping_file.write(js_end_pattern) return