def report(cls): for _error in cls.errors: cprint(colors.ERROR, _error) if not cls.errors: if cls.warnings: cprint(colors.WARN, "verified, but with some warnings.") else: cprint(colors.OK, "verified.") for _warning in cls.warnings: cprint(colors.WARN, _warning)
def main(): if len(sys.argv) > 1 and sys.argv[1] == '--bash-completion': sys.argv.pop(1) bc.print_all(bash_completion_list(sys.argv)) sys.exit(0) parser = argparse.ArgumentParser( prog="tps export CMS", description="Exporter for CMS -- Contest Management System for IOI.", formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "-v", "--verbose", action="store_true", help= "Prints verbose details on values, decisions, and commands being executed.", ) parser.add_argument( "-o", "--output-name", metavar="<export-output-name>", help="Creates the export output with the given name.", ) parser.add_argument( "-a", "--archive-format", metavar="<archive-format>", choices=get_archive_format_names(), default="zip", help="""\ Creates the export archive with the given format. Available archive formats: {} Default archive format is '%(default)s'. """.format("\n".join([ " {} {}".format(f[0].ljust(10), f[1]) for f in get_archive_formats() ])), ) args = parser.parse_args() vp.enabled = args.verbose file_name = args.output_name if args.output_name else create_export_file_name( ) try: export_file = export(file_name, args.archive_format) if warnings: cprint( colors.WARN, "Successfully exported to '{}', but with warnings.".format( export_file)) else: cprint(colors.SUCCESS, "Successfully exported to '{}'.".format(export_file)) except ExportFailureException as e: cprint(colors.FAIL, "Exporting failed: {}".format(e))
check_test_pattern_exists_in_list(test_name_list, SPECIFIED_TESTS_PATTERN) test_name_list = filter( lambda test_name: test_name_matches_pattern( test_name, SPECIFIED_TESTS_PATTERN), test_name_list) missing_tests = [] available_tests = [] for test_name in test_name_list: if test_exists(tests_dir, test_name): available_tests.append(test_name) else: missing_tests.append(test_name) if missing_tests: cprint(colors.WARN, "Missing tests: " + (", ".join(missing_tests))) for test_name in available_tests: command = [ 'bash', os.path.join(INTERNALS_DIR, 'invoke_test.sh'), tests_dir, test_name, ] wait_process_success(subprocess.Popen(command)) if missing_tests: cprint( colors.WARN, "Missing {} {}!".format( len(missing_tests), "tests" if len(missing_tests) != 1 else "test"))
if is_verdict_expected(subtask_result[0], subtask_result[1], expected_verdict): expected_verdict_args = ["match with expected"] else: expected_verdict_args = ["expected: {}".format(expected_verdict)] command = [ 'bash', os.path.join(INTERNALS_DIR, 'subtask_summary.sh'), subtask, str(len(tests)), str(testcases_run), '{:g}'.format(round(subtask_score, 2)), str(subtasks_data[subtask]['score']), subtask_result[1], subtask_result[2] ] + expected_verdict_args wait_process_success(subprocess.Popen(command)) total_points += subtask_score total_full_points += subtasks_data[subtask]['score'] color = colors.OK if total_points == 0: color = colors.ERROR elif total_points < total_full_points: color = colors.WARN cprint(color, "{:g}/{} pts".format(round(total_points, 2), total_full_points)) if missing_tests: cprinterr(colors.WARN, "Missing {} {}!".format(len(missing_tests), "tests" if len(missing_tests) != 1 else "test"))
cprinterr(colors.WARN, "Missing tests: "+(", ".join(missing_tests))) for test_name in available_tests: command = [ 'bash', os.path.join(INTERNALS_DIR, 'invoke_test.sh'), tests_dir, test_name, ] wait_process_success(subprocess.Popen(command)) print() print("Subtask summary") if solution_data is None: cprint(colors.WARN, "Solution does not exist in solutions.json. Skipped checking verdict") subtasks_data = dict(load_json(SUBTASKS_JSON))['subtasks'] total_points = total_full_points = 0 unmatched_verdicts = [] for subtask_index, (subtask, tests) in enumerate(tu.get_subtasks_tests_dict_from_tests_dir(tests_dir).items()): subtask_result = None max_execution_time = None testcases_run = 0 for test in tests: score = verdict = execution_time = None if not SKIP_CHECK: try: with open(os.path.join(LOGS_DIR, "{}.score".format(test)), 'r') as sf: score = float(sf.readlines()[0].strip('\n'))
print(header_line) subtasks_data = dict(load_json(SUBTASKS_JSON))['subtasks'] solutions_data = dict(load_json(SOLUTIONS_JSON)) unmatched_verdicts = [] for solution_filename, solution_data in solutions_data.items(): command = [ 'bash', os.path.join(INTERNALS_DIR, 'compile_solution.sh'), os.path.join(SOLUTION_DIR, solution_filename) ] ret = subprocess.Popen(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).wait() if ret != 0: cprint(colors.FAIL, "{} does not compile".format(solution_filename)) unmatched_verdicts.append( (solution_filename, '-', 'Compilation Failed', solution_data.get("verdict", None))) continue for test_name in available_tests: command = [ 'bash', os.path.join(INTERNALS_DIR, 'invoke_test.sh'), tests_dir, test_name, ] wait_process_success(subprocess.Popen(command)) total_points = 0
def warn(message): warnings.append(message) cprint(colors.WARN, message)