Example #1
0
def run_all_tests(exec_file, in_sample_file_list, out_sample_file_list,
                  timeout_sec: int, knock_out: bool) -> bool:
    if len(in_sample_file_list) != len(out_sample_file_list):
        logging.error("{0}{1}{2}".format(
            "The number of the sample inputs and outputs are different.\n",
            "# of sample inputs: {}\n".format(len(in_sample_file_list)),
            "# of sample outputs: {}\n".format(len(out_sample_file_list))))
        raise IrregularSampleFileError
    samples = []
    for in_sample_file, out_sample_file in zip(in_sample_file_list,
                                               out_sample_file_list):
        validate_sample_pair(in_sample_file, out_sample_file)
        samples.append((in_sample_file, out_sample_file))

    success_count = run_for_samples(exec_file, samples, timeout_sec, knock_out)

    if len(samples) == 0:
        print("No test cases")
        return False
    elif success_count != len(samples):
        print("{msg} (passed {success_count} of {total})".format(
            msg=with_color("Some cases FAILED", Fore.LIGHTRED_EX),
            success_count=success_count,
            total=len(samples),
        ))
        return False
    else:
        print(with_color("Passed all test cases!!!", Fore.LIGHTGREEN_EX))
        return True
Example #2
0
def run_all_tests(exec_file, in_sample_file_list, out_sample_file_list, timeout_sec: int, knock_out: bool,
                  skip_stderr_on_success: bool, judge_method, cwd) -> bool:
    if len(in_sample_file_list) != len(out_sample_file_list):
        logger.error("{0}{1}{2}".format(
            "The number of the sample inputs and outputs are different.\n",
            "# of sample inputs: {}\n".format(len(in_sample_file_list)),
            "# of sample outputs: {}\n".format(len(out_sample_file_list))))
        raise IrregularSampleFileError
    samples = []
    for in_sample_file, out_sample_file in zip(in_sample_file_list, out_sample_file_list):
        validate_sample_pair(in_sample_file, out_sample_file)
        samples.append((in_sample_file, out_sample_file))

    test_summary = run_for_samples(
        exec_file, samples, timeout_sec, judge_method, knock_out, skip_stderr_on_success, cwd=cwd)

    if len(samples) == 0:
        print("No test cases")
        return False
    elif test_summary.success_count != len(samples):
        print("{msg} (passed {success_count} of {total})".format(
            msg=with_color("Some cases FAILED", Fore.LIGHTRED_EX),
            success_count=test_summary.success_count,
            total=len(samples),
        ))
        return False
    elif test_summary.has_error_output:
        print(with_color(
            "Passed all test case but with stderr. (Please remove stderr!)", Fore.LIGHTYELLOW_EX))
        return False
    else:
        print(with_color("Passed all test cases!!!", Fore.LIGHTGREEN_EX))
        return True
Example #3
0
def build_details_str(exec_res: ExecResult, input_file: str, output_file: str) -> str:
    res = ""

    def append(text: str, end='\n'):
        nonlocal res
        res += text + end

    with open(output_file, "r") as f:
        expected_output = f.read()

    append(with_color("[Input]", Fore.LIGHTMAGENTA_EX))
    with open(input_file, "r") as f:
        append(f.read(), end='')

    append(with_color("[Expected]", Fore.LIGHTMAGENTA_EX))
    append(expected_output, end='')

    append(with_color("[Received]", Fore.LIGHTMAGENTA_EX))
    append(exec_res.output, end='')

    if exec_res.status != ExecStatus.NORMAL:
        append(with_color("Aborted ({})\n".format(
            exec_res.status.name), Fore.LIGHTYELLOW_EX))

    if exec_res.has_stderr():
        append(with_color("[Error]", Fore.LIGHTYELLOW_EX))
        append(exec_res.stderr, end='')
    return res
Example #4
0
def run_all_tests(exec_file, in_sample_file_list, out_sample_file_list, timeout_sec: int, knock_out: bool,
                  skip_stderr_on_success: bool) -> bool:
    if len(in_sample_file_list) != len(out_sample_file_list):
        logging.error("{0}{1}{2}".format(
            "The number of the sample inputs and outputs are different.\n",
            "# of sample inputs: {}\n".format(len(in_sample_file_list)),
            "# of sample outputs: {}\n".format(len(out_sample_file_list))))
        raise IrregularSampleFileError
    samples = []
    for in_sample_file, out_sample_file in zip(in_sample_file_list, out_sample_file_list):
        validate_sample_pair(in_sample_file, out_sample_file)
        samples.append((in_sample_file, out_sample_file))

    test_summary = run_for_samples(
        exec_file, samples, timeout_sec, knock_out, skip_stderr_on_success)

    if len(samples) == 0:
        print("No test cases")
        return False
    elif test_summary.success_count != len(samples):
        print("{msg} (passed {success_count} of {total})".format(
            msg=with_color("Some cases FAILED", Fore.LIGHTRED_EX),
            success_count=test_summary.success_count,
            total=len(samples),
        ))
        return False
    elif test_summary.has_error_output:
        print(with_color(
            "Passed all test case but with stderr. (Please remove stderr!)", Fore.LIGHTYELLOW_EX))
        return False
    else:
        print(with_color("Passed all test cases!!!", Fore.LIGHTGREEN_EX))
        return True
Example #5
0
def build_details_str(exec_res: ExecResult, input_file: str,
                      output_file: str) -> str:
    res = ""

    def append(text: str, end='\n'):
        nonlocal res
        res += text + end

    append(with_color("[Input]", Fore.LIGHTMAGENTA_EX))
    with open(input_file, "r") as f:
        append(f.read(), end='')

    append(with_color("[Expected]", Fore.LIGHTMAGENTA_EX))
    with open(output_file, "r") as f:
        append(f.read(), end='')

    append(with_color("[Received]", Fore.LIGHTMAGENTA_EX))
    append(exec_res.output, end='')
    if exec_res.status != ExecStatus.NORMAL:
        append(
            with_color("Aborted ({})\n".format(exec_res.status.name),
                       Fore.LIGHTYELLOW_EX))

    if exec_res.has_stderr():
        append(with_color("[Error]", Fore.LIGHTYELLOW_EX))
        append(exec_res.stderr, end='')
    return res
Example #6
0
def generate_code(atcoder_client: AtCoderClient,
                  problem_url: str,
                  config: Config,
                  output_file: IOBase):
    problem = get_problem_from_url(problem_url)
    template_code_path = config.code_style_config.template_file
    lang = config.code_style_config.lang

    def emit_error(text):
        logging.error(with_color(text, Fore.RED))

    def emit_warning(text):
        logging.warning(text)

    def emit_info(text):
        logging.info(text)

    emit_info('{} is used for template'.format(template_code_path))

    # Fetch problem data from the statement
    try:
        content = atcoder_client.download_problem_content(problem)
    except InputFormatDetectionError as e:
        emit_error("Failed to download input format.")
        raise e
    except SampleDetectionError as e:
        emit_error("Failed to download samples.")
        raise e

    try:
        prediction_result = predict_format(content)
        emit_info(
            with_color("Format prediction succeeded", Fore.LIGHTGREEN_EX))
    except (NoPredictionResultError, MultiplePredictionResultsError) as e:
        prediction_result = FormatPredictionResult.empty_result()
        if isinstance(e, NoPredictionResultError):
            msg = "No prediction -- Failed to understand the input format"
        else:
            msg = "Too many prediction -- Failed to understand the input format"
        emit_warning(with_color(msg, Fore.LIGHTRED_EX))

    constants = predict_constants(content.original_html)
    code_generator = config.code_style_config.code_generator
    with open(template_code_path, "r") as f:
        template = f.read()

    output_splitter()

    output_file.write(code_generator(
        CodeGenArgs(
            template,
            prediction_result.format,
            constants,
            config.code_style_config
        )))
Example #7
0
def run_for_samples(exec_file: str,
                    sample_pair_list: List[Tuple[str, str]],
                    timeout_sec: int,
                    judge_method: Judge = NormalJudge(),
                    knock_out: bool = False,
                    skip_io_on_success: bool = False) -> TestSummary:
    success_count = 0
    has_error_output = False
    for in_sample_file, out_sample_file in sample_pair_list:
        # Run program
        exec_res = run_program(exec_file,
                               in_sample_file,
                               timeout_sec=timeout_sec)

        # Output header
        with open(out_sample_file, 'r') as f:
            answer_text = f.read()

        is_correct = exec_res.is_correct_output(answer_text, judge_method)
        has_error_output = has_error_output or exec_res.has_stderr()

        if is_correct:
            if exec_res.has_stderr():
                message = with_color(
                    "CORRECT but with stderr (Please remove stderr!)",
                    Fore.LIGHTYELLOW_EX)
            else:
                message = "{} {elapsed} ms".format(with_color(
                    "PASSED", Fore.LIGHTGREEN_EX),
                                                   elapsed=exec_res.elapsed_ms)
            success_count += 1
        else:
            if exec_res.status == ExecStatus.NORMAL:
                message = with_color("WA", Fore.LIGHTRED_EX)
            else:
                message = with_color(exec_res.status.name, Fore.LIGHTYELLOW_EX)

        print("# {case_name} ... {message}".format(
            case_name=os.path.basename(in_sample_file),
            message=message,
        ))

        # Output details for incorrect results or has stderr.
        if not is_correct or (exec_res.has_stderr()
                              and not skip_io_on_success):
            print('{}\n'.format(
                build_details_str(exec_res, in_sample_file, out_sample_file)))

        if knock_out and not is_correct:
            print('Stop testing ...')
            break
    return TestSummary(success_count, has_error_output)
Example #8
0
def generate_code(atcoder_client: AtCoderClient, problem_url: str,
                  config: Config, output_file: IOBase):
    problem = get_problem_from_url(problem_url)
    template_code_path = config.code_style_config.template_file
    lang = config.code_style_config.lang

    def emit_error(text):
        logging.error(with_color(text, Fore.RED))

    def emit_warning(text):
        logging.warning(text)

    def emit_info(text):
        logging.info(text)

    emit_info('{} is used for template'.format(template_code_path))

    # Fetch problem data from the statement
    try:
        content = atcoder_client.download_problem_content(problem)
    except InputFormatDetectionError as e:
        emit_error("Failed to download input format.")
        raise e
    except SampleDetectionError as e:
        emit_error("Failed to download samples.")
        raise e

    try:
        prediction_result = predict_format(content)
        emit_info(with_color("Format prediction succeeded",
                             Fore.LIGHTGREEN_EX))
    except (NoPredictionResultError, MultiplePredictionResultsError) as e:
        prediction_result = FormatPredictionResult.empty_result()
        if isinstance(e, NoPredictionResultError):
            msg = "No prediction -- Failed to understand the input format"
        else:
            msg = "Too many prediction -- Failed to understand the input format"
        emit_warning(with_color(msg, Fore.LIGHTRED_EX))

    constants = predict_constants(content.original_html)
    code_generator = config.code_style_config.code_generator
    with open(template_code_path, "r") as f:
        template = f.read()

    output_splitter()

    output_file.write(
        code_generator(
            CodeGenArgs(template, prediction_result.format, constants,
                        config.code_style_config)))
Example #9
0
    def test_build_details_str__show_testcase_if_there_is_stderr(self):
        in_out = 'correct\n'
        stderr = 'stderr\n'
        expected = (with_color('[Input]', Fore.LIGHTMAGENTA_EX) + '\n'
                    + in_out + with_color('[Expected]',
                                          Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('[Received]',
                                 Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('[Error]', Fore.LIGHTYELLOW_EX) + '\n' + stderr)
        io_mock = mock_open(read_data=in_out)

        with patch('atcodertools.tools.tester.open', io_mock):
            result = build_details_str(ExecResult(
                ExecStatus.NORMAL, in_out, stderr), 'in.txt', 'out.txt')
            self.assertEqual(expected, result)
    def test_build_details_str__on_runtime_failure(self):
        in_out = 'correct\n'
        stderr = ''
        expected = (with_color('[Input]', Fore.LIGHTMAGENTA_EX) + '\n'
                    + in_out + with_color('[Expected]',
                                          Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('[Received]',
                                 Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('Aborted ({})\n'.format(ExecStatus.RE.name), Fore.LIGHTYELLOW_EX) + '\n')
        io_mock = mock_open(read_data=in_out)

        with patch('atcodertools.tools.tester.open', io_mock):
            result = build_details_str(ExecResult(
                ExecStatus.RE, in_out, stderr), 'in.txt', 'out.txt')
            self.assertEqual(expected, result)
    def test_build_details_str__show_testcase_if_there_is_stderr(self):
        in_out = 'correct\n'
        stderr = 'stderr\n'
        expected = (with_color('[Input]', Fore.LIGHTMAGENTA_EX) + '\n'
                    + in_out + with_color('[Expected]',
                                          Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('[Received]',
                                 Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('[Error]', Fore.LIGHTYELLOW_EX) + '\n' + stderr)
        io_mock = mock_open(read_data=in_out)

        with patch('atcodertools.tools.tester.open', io_mock):
            result = build_details_str(ExecResult(
                ExecStatus.NORMAL, in_out, stderr), 'in.txt', 'out.txt')
            self.assertEqual(expected, result)
Example #12
0
    def test_build_details_str__on_runtime_failure(self):
        in_out = 'correct\n'
        stderr = ''
        expected = (with_color('[Input]', Fore.LIGHTMAGENTA_EX) + '\n'
                    + in_out + with_color('[Expected]',
                                          Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('[Received]',
                                 Fore.LIGHTMAGENTA_EX) + '\n' + in_out
                    + with_color('Aborted ({})\n'.format(ExecStatus.RE.name), Fore.LIGHTYELLOW_EX) + '\n')
        io_mock = mock_open(read_data=in_out)

        with patch('atcodertools.tools.tester.open', io_mock):
            result = build_details_str(ExecResult(
                ExecStatus.RE, in_out, stderr), 'in.txt', 'out.txt')
            self.assertEqual(expected, result)
Example #13
0
def run_for_samples(exec_file: str, sample_pair_list: List[Tuple[str, str]], timeout_sec: int, knock_out: bool = False,
                    skip_io_on_success: bool = False) -> TestSummary:
    success_count = 0
    has_error_output = False
    for in_sample_file, out_sample_file in sample_pair_list:
        # Run program
        exec_res = run_program(exec_file, in_sample_file,
                               timeout_sec=timeout_sec)

        # Output header
        with open(out_sample_file, 'r') as f:
            answer_text = f.read()

        is_correct = exec_res.is_correct_output(answer_text)
        has_error_output = has_error_output or exec_res.has_stderr()

        if is_correct:
            if exec_res.has_stderr():
                message = with_color(
                    "CORRECT but with stderr (Please remove stderr!)", Fore.LIGHTYELLOW_EX)
            else:
                message = "{} {elapsed} ms".format(
                    with_color("PASSED", Fore.LIGHTGREEN_EX),
                    elapsed=exec_res.elapsed_ms)
            success_count += 1
        else:
            if exec_res.status == ExecStatus.NORMAL:
                message = with_color("WA", Fore.LIGHTRED_EX)
            else:
                message = with_color(
                    exec_res.status.name, Fore.LIGHTYELLOW_EX)

        print("# {case_name} ... {message}".format(
            case_name=os.path.basename(in_sample_file),
            message=message,
        ))

        # Output details for incorrect results or has stderr.
        if not is_correct or (exec_res.has_stderr() and not skip_io_on_success):
            print('{}\n'.format(build_details_str(
                exec_res, in_sample_file, out_sample_file)))

        if knock_out and not is_correct:
            print('Stop testing ...')
            break
    return TestSummary(success_count, has_error_output)
Example #14
0
def run_for_samples(exec_file: str,
                    sample_pair_list: List[Tuple[str, str]],
                    timeout_sec: int,
                    knock_out: bool = False):
    success_count = 0
    for in_sample_file, out_sample_file in sample_pair_list:
        # Run program
        exec_res = run_program(exec_file,
                               in_sample_file,
                               timeout_sec=timeout_sec)

        # Output header
        with open(out_sample_file, 'r') as f:
            answer_text = f.read()

        is_correct = exec_res.is_correct_output(answer_text)
        if is_correct:
            message = "{} {elapsed} ms".format(with_color(
                "PASSED", Fore.LIGHTGREEN_EX),
                                               elapsed=exec_res.elapsed_ms)
            success_count += 1
        else:
            if exec_res.status == ExecStatus.NORMAL:
                message = with_color("WA", Fore.LIGHTRED_EX)
            else:
                message = with_color(exec_res.status.name, Fore.LIGHTYELLOW_EX)

        print("# {case_name} ... {message}".format(
            case_name=os.path.basename(in_sample_file),
            message=message,
        ))

        # Output details for incorrect results.
        if not is_correct:
            print('{}\n'.format(
                build_details_str(exec_res, in_sample_file, out_sample_file)))
            if knock_out:
                print('Stop testing ...')
                break
    return success_count
Example #15
0
def main(prog, args):
    parser = argparse.ArgumentParser(
        prog=prog,
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument("contest_id",
                        help="Contest ID (e.g. arc001)")

    parser.add_argument("--without-login",
                        action="store_true",
                        help="Download data without login")

    parser.add_argument("--workspace",
                        help="Path to workspace's root directory. This script will create files"
                             " in {{WORKSPACE}}/{{contest_name}}/{{alphabet}}/ e.g. ./your-workspace/arc001/A/\n"
                             "[Default] {}".format(DEFAULT_WORKSPACE_DIR_PATH))

    parser.add_argument("--lang",
                        help="Programming language of your template code, {}.\n"
                        .format(" or ".join([lang.name for lang in ALL_LANGUAGES])) + "[Default] {}".format(CPP.name))

    parser.add_argument("--template",
                        help="File path to your template code\n{}".format(
                            "\n".join(
                                ["[Default ({dname})] {path}".format(
                                    dname=lang.display_name,
                                    path=lang.default_template_path
                                ) for lang in ALL_LANGUAGES]
                            ))
                        )

    # Deleted functionality
    parser.add_argument('--replacement', help=argparse.SUPPRESS)

    parser.add_argument("--parallel",
                        action="store_true",
                        help="Prepare problem directories asynchronously using multi processors.",
                        default=None)

    parser.add_argument("--save-no-session-cache",
                        action="store_true",
                        help="Save no session cache to avoid security risk",
                        default=None)

    parser.add_argument("--config",
                        help="File path to your config file\n{0}{1}".format("[Default (Primary)] {}\n".format(
                            USER_CONFIG_PATH),
                            "[Default (Secondary)] {}\n".format(
                                get_default_config_path()))
                        )

    args = parser.parse_args(args)

    if args.replacement is not None:
        logging.error(with_color("Sorry! --replacement argument no longer exists"
                                 " and you can only use --template."
                                 " See the official document for details.", Fore.LIGHTRED_EX))
        raise DeletedFunctionalityError

    config = get_config(args)

    try:
        import AccountInformation  # noqa
        raise BannedFileDetectedError(
            "We abolished the logic with AccountInformation.py. Please delete the file.")
    except ImportError:
        pass

    client = AtCoderClient()
    if not config.etc_config.download_without_login:
        try:
            client.login(
                save_session_cache=not config.etc_config.save_no_session_cache)
            logging.info("Login successful.")
        except LoginError:
            logging.error(
                "Failed to login (maybe due to wrong username/password combination?)")
            sys.exit(-1)
    else:
        logging.info("Downloading data without login.")

    prepare_contest(client,
                    args.contest_id,
                    config)
Example #16
0
def run_for_samples(exec_file: str, sample_pair_list: List[Tuple[str, str]], timeout_sec: int,
                    judge_method: Judge = NormalJudge(), knock_out: bool = False,
                    skip_io_on_success: bool = False, cwd: str = "./") -> TestSummary:
    success_count = 0
    has_error_output = False
    for in_sample_file, out_sample_file in sample_pair_list:
        if judge_method.judge_type == JudgeType.Interactive:
            exec_res = run_interactive_program(exec_file,
                                               judge_method.judge_code_lang.get_test_command(
                                                   'judge', cwd),
                                               in_sample_file, out_sample_file,
                                               timeout_sec=timeout_sec,
                                               current_working_dir=cwd
                                               )
            is_correct = exec_res.is_correct_output(judge_method=judge_method)
        else:
            # Run program
            exec_res = run_program(exec_file, in_sample_file,
                                   timeout_sec=timeout_sec, current_working_dir=cwd)

            if judge_method.judge_type == JudgeType.MultiSolution:
                is_correct = exec_res.is_correct_output(
                    judge_method=judge_method, sample_input_file=in_sample_file, sample_output_file=out_sample_file, cwd=cwd)
            else:
                # Output header
                with open(out_sample_file, 'r') as f:
                    expected_answer_text = f.read()

                is_correct = exec_res.is_correct_output(
                    expected_answer_text, judge_method)

        if exec_res.output is None:
            exec_res.output = ""
        elif isinstance(exec_res.output, bytes):
            exec_res.output = exec_res.output.decode()
        if exec_res.stderr is None:
            exec_res.stderr = ""
        elif isinstance(exec_res.stderr, bytes):
            exec_res.stderr = exec_res.stderr.decode()

        has_error_output = has_error_output or exec_res.has_stderr()

        if is_correct:
            if exec_res.has_stderr():
                message = with_color(
                    "CORRECT but with stderr (Please remove stderr!)", Fore.LIGHTYELLOW_EX)
            else:
                message = "{} {elapsed} ms".format(
                    with_color("PASSED", Fore.LIGHTGREEN_EX),
                    elapsed=exec_res.elapsed_ms)
            success_count += 1
        else:
            if exec_res.status == ExecStatus.NORMAL:
                message = with_color("WA", Fore.LIGHTRED_EX)
            else:
                message = with_color(
                    exec_res.status.name, Fore.LIGHTYELLOW_EX)

        print("# {case_name} ... {message}".format(
            case_name=os.path.basename(in_sample_file),
            message=message,
        ))

        # Output details for incorrect results or has stderr.
        if not is_correct or (exec_res.has_stderr() and not skip_io_on_success):
            print('{}\n'.format(build_details_str(
                exec_res, in_sample_file, out_sample_file)))

        if knock_out and not is_correct:
            print('Stop testing ...')
            break
    return TestSummary(success_count, has_error_output)
Example #17
0
 def emit_error(text):
     logging.error(with_color(text, Fore.RED))
Example #18
0
def main(prog, args):
    parser = argparse.ArgumentParser(
        prog=prog, formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument("contest_id", help="Contest ID (e.g. arc001)")

    parser.add_argument("--without-login",
                        action="store_true",
                        help="Download data without login")

    parser.add_argument(
        "--workspace",
        help="Path to workspace's root directory. This script will create files"
        " in {{WORKSPACE}}/{{contest_name}}/{{alphabet}}/ e.g. ./your-workspace/arc001/A/\n"
        "[Default] {}".format(DEFAULT_WORKSPACE_DIR_PATH))

    parser.add_argument(
        "--lang",
        help="Programming language of your template code, {}.\n".format(
            " or ".join([lang.name for lang in ALL_LANGUAGES])) +
        "[Default] {}".format(CPP.name))

    parser.add_argument("--template",
                        help="File path to your template code\n{}".format(
                            "\n".join([
                                "[Default ({dname})] {path}".format(
                                    dname=lang.display_name,
                                    path=lang.default_template_path)
                                for lang in ALL_LANGUAGES
                            ])))

    # Deleted functionality
    parser.add_argument('--replacement', help=argparse.SUPPRESS)

    parser.add_argument(
        "--parallel",
        action="store_true",
        help=
        "Prepare problem directories asynchronously using multi processors.",
        default=None)

    parser.add_argument("--save-no-session-cache",
                        action="store_true",
                        help="Save no session cache to avoid security risk",
                        default=None)

    parser.add_argument(
        "--config",
        help="File path to your config file\n{0}{1}".format(
            "[Default (Primary)] {}\n".format(USER_CONFIG_PATH),
            "[Default (Secondary)] {}\n".format(get_default_config_path())))

    args = parser.parse_args(args)

    if args.replacement is not None:
        logger.error(
            with_color(
                "Sorry! --replacement argument no longer exists"
                " and you can only use --template."
                " See the official document for details.", Fore.LIGHTRED_EX))
        raise DeletedFunctionalityError

    config = get_config(args)

    try:
        import AccountInformation  # noqa
        raise BannedFileDetectedError(
            "We abolished the logic with AccountInformation.py. Please delete the file."
        )
    except ImportError:
        pass

    client = AtCoderClient()
    if not config.etc_config.download_without_login:
        try:
            client.login(
                save_session_cache=not config.etc_config.save_no_session_cache)
            logger.info("Login successful.")
        except LoginError:
            logger.error(
                "Failed to login (maybe due to wrong username/password combination?)"
            )
            sys.exit(-1)
    else:
        logger.info("Downloading data without login.")

    prepare_contest(client, args.contest_id, config)
Example #19
0
 def emit_error(text):
     logger.error(with_color("Problem {}: {}".format(pid, text), Fore.RED))
Example #20
0
def main(prog,
         args,
         credential_supplier=None,
         use_local_session_cache=True) -> bool:
    parser = argparse.ArgumentParser(
        prog=prog, formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument(
        "--exec",
        '-e',
        help=
        "File path to the execution target. [Default] Automatically detected exec file",
        default=None)

    parser.add_argument(
        "--dir",
        '-d',
        help="Target directory to test. [Default] Current directory",
        default=".")

    parser.add_argument("--timeout",
                        '-t',
                        help="Timeout for each test cases (sec) [Default] 1",
                        type=int,
                        default=1)

    parser.add_argument(
        "--code",
        '-c',
        help=
        "Path to the source code to submit [Default] Code path written in metadata.json",
        type=str,
        default=None)

    parser.add_argument(
        "--force",
        "-f",
        action="store_true",
        help=
        "Submit the code regardless of the local test result [Default] False",
        default=False)

    parser.add_argument("--save-no-session-cache",
                        action="store_true",
                        help="Save no session cache to avoid security risk",
                        default=False)

    parser.add_argument(
        "--unlock-safety",
        "-u",
        action="store_true",
        help=
        "By default, this script only submits the first code per problem. However, you can remove"
        " the safety by this option in order to submit codes twice or more.",
        default=False)

    args = parser.parse_args(args)

    metadata_file = os.path.join(args.dir, "metadata.json")
    try:
        metadata = Metadata.load_from(metadata_file)
    except IOError:
        logger.error(
            "{0} is not found! You need {0} to use this submission functionality."
            .format(metadata_file))
        return False

    try:
        client = AtCoderClient()
        client.login(
            save_session_cache=args.save_no_session_cache,
            credential_supplier=credential_supplier,
            use_local_session_cache=use_local_session_cache,
        )
    except LoginError:
        logger.error("Login failed. Try again.")
        return False

    tester_args = []
    if args.exec:
        tester_args += ["-e", args.exec]
    if args.dir:
        tester_args += ["-d", args.dir]
    if args.timeout:
        tester_args += ["-t", str(args.timeout)]

    if args.force or tester.main("", tester_args):
        submissions = client.download_submission_list(metadata.problem.contest)
        if not args.unlock_safety:
            for submission in submissions:
                if submission.problem_id == metadata.problem.problem_id:
                    logger.error(
                        with_color(
                            "Cancel submitting because you already sent some code to the problem. Please "
                            "specify -u to send the code. {}".format(
                                metadata.problem.contest.get_submissions_url(
                                    submission)), Fore.LIGHTRED_EX))
                    return False

        code_path = args.code or os.path.join(args.dir, metadata.code_filename)
        with open(code_path, 'r') as f:
            source = f.read()
        logger.info("Submitting {} as {}".format(code_path,
                                                 metadata.lang.name))
        submission = client.submit_source_code(metadata.problem.contest,
                                               metadata.problem, metadata.lang,
                                               source)
        logger.info("{} {}".format(
            with_color("Done!", Fore.LIGHTGREEN_EX),
            metadata.problem.contest.get_submissions_url(submission)))
Example #21
0
def prepare_procedure(atcoder_client: AtCoderClient, problem: Problem,
                      config: Config):
    workspace_root_path = config.code_style_config.workspace_dir
    template_code_path = config.code_style_config.template_file
    lang = config.code_style_config.lang

    pid = problem.get_alphabet()
    problem_dir_path = os.path.join(workspace_root_path,
                                    problem.get_contest().get_id(), pid)

    def emit_error(text):
        logger.error(with_color("Problem {}: {}".format(pid, text), Fore.RED))

    def emit_warning(text):
        logger.warning("Problem {}: {}".format(pid, text))

    def emit_info(text):
        logger.info("Problem {}: {}".format(pid, text))

    emit_info('{} is used for template'.format(template_code_path))

    original_html = atcoder_client.download_problem_content_raw_html(problem)
    constants = predict_constants(original_html)

    if constants.judge_method.judge_type != JudgeType.Interactive:
        # Fetch problem data from the statement
        try:
            content = get_problem_content(original_html)
        except InputFormatDetectionError as e:
            emit_error("Failed to download input format.")
            raise e
        except SampleDetectionError as e:
            emit_error("Failed to download samples.")
            raise e

        # Store examples to the directory path
        if len(content.get_samples()) == 0:
            emit_info("No samples.")
        else:
            os.makedirs(problem_dir_path, exist_ok=True)
            create_examples(content.get_samples(), problem_dir_path,
                            config.etc_config.in_example_format,
                            config.etc_config.out_example_format)
            emit_info("Created examples.")

    code_file_path = os.path.join(problem_dir_path,
                                  "main.{}".format(lang.extension))

    # If there is an existing code, just create backup
    if os.path.exists(code_file_path):
        backup_id = 1
        while True:
            backup_name = "{}.{}".format(code_file_path, backup_id)
            if not os.path.exists(backup_name):
                new_path = backup_name
                shutil.copy(code_file_path, backup_name)
                break
            backup_id += 1
        emit_info("Backup for existing code '{}' -> '{}'".format(
            code_file_path, new_path))

    if constants.judge_method.judge_type != JudgeType.Interactive:
        try:
            prediction_result = predict_format(content)
            emit_info(
                with_color("Format prediction succeeded", Fore.LIGHTGREEN_EX))
        except (NoPredictionResultError, MultiplePredictionResultsError) as e:
            prediction_result = FormatPredictionResult.empty_result()
            if isinstance(e, NoPredictionResultError):
                msg = "No prediction -- Failed to understand the input format"
            else:
                msg = "Too many prediction -- Failed to understand the input format"
            emit_warning(with_color(msg, Fore.LIGHTRED_EX))
    else:
        prediction_result = FormatPredictionResult.empty_result()

    code_generator = config.code_style_config.code_generator
    with open(template_code_path, "r") as f:
        template = f.read()

    create_code(
        code_generator(
            CodeGenArgs(template, prediction_result.format, constants,
                        config.code_style_config)), code_file_path)
    emit_info("Saved code to {}".format(code_file_path))

    # Save metadata
    metadata_path = os.path.join(problem_dir_path, "metadata.json")
    Metadata(
        problem,
        os.path.basename(code_file_path),
        config.etc_config.in_example_format.replace("{}", "*"),
        config.etc_config.out_example_format.replace("{}", "*"),
        lang,
        constants.judge_method,
    ).save_to(metadata_path)
    emit_info("Saved metadata to {}".format(metadata_path))

    if config.postprocess_config.exec_cmd_on_problem_dir is not None:
        emit_info(
            _message_on_execution(
                problem_dir_path,
                config.postprocess_config.exec_cmd_on_problem_dir))
        config.postprocess_config.execute_on_problem_dir(problem_dir_path)

    output_splitter()
Example #22
0
def prepare_procedure(atcoder_client: AtCoderClient,
                      problem: Problem,
                      config: Config):
    workspace_root_path = config.code_style_config.workspace_dir
    template_code_path = config.code_style_config.template_file
    lang = config.code_style_config.lang

    pid = problem.get_alphabet()
    problem_dir_path = os.path.join(
        workspace_root_path,
        problem.get_contest().get_id(),
        pid)

    def emit_error(text):
        logging.error(with_color("Problem {}: {}".format(pid, text), Fore.RED))

    def emit_warning(text):
        logging.warning("Problem {}: {}".format(pid, text))

    def emit_info(text):
        logging.info("Problem {}: {}".format(pid, text))

    emit_info('{} is used for template'.format(template_code_path))

    # Fetch problem data from the statement
    try:
        content = atcoder_client.download_problem_content(problem)
    except InputFormatDetectionError as e:
        emit_error("Failed to download input format.")
        raise e
    except SampleDetectionError as e:
        emit_error("Failed to download samples.")
        raise e

    # Store examples to the directory path
    if len(content.get_samples()) == 0:
        emit_info("No samples.")
    else:
        os.makedirs(problem_dir_path, exist_ok=True)
        create_examples(content.get_samples(), problem_dir_path,
                        IN_EXAMPLE_FORMAT, OUT_EXAMPLE_FORMAT)
        emit_info("Created examples.")

    code_file_path = os.path.join(
        problem_dir_path,
        "main.{}".format(lang.extension))

    # If there is an existing code, just create backup
    if os.path.exists(code_file_path):
        backup_id = 1
        while True:
            backup_name = "{}.{}".format(code_file_path, backup_id)
            if not os.path.exists(backup_name):
                new_path = backup_name
                shutil.copy(code_file_path, backup_name)
                break
            backup_id += 1
        emit_info(
            "Backup for existing code '{}' -> '{}'".format(
                code_file_path,
                new_path))

    try:
        prediction_result = predict_format(content)
        emit_info(
            with_color("Format prediction succeeded", Fore.LIGHTGREEN_EX))
    except (NoPredictionResultError, MultiplePredictionResultsError) as e:
        prediction_result = FormatPredictionResult.empty_result()
        if isinstance(e, NoPredictionResultError):
            msg = "No prediction -- Failed to understand the input format"
        else:
            msg = "Too many prediction -- Failed to understand the input format"
        emit_warning(with_color(msg, Fore.LIGHTRED_EX))

    constants = predict_constants(content.original_html)
    code_generator = config.code_style_config.code_generator
    with open(template_code_path, "r") as f:
        template = f.read()

    create_code(code_generator(
        CodeGenArgs(
            template,
            prediction_result.format,
            constants,
            config.code_style_config
        )),
        code_file_path)
    emit_info("Saved code to {}".format(code_file_path))

    # Save metadata
    metadata_path = os.path.join(problem_dir_path, "metadata.json")
    Metadata(problem,
             os.path.basename(code_file_path),
             IN_EXAMPLE_FORMAT.replace("{}", "*"),
             OUT_EXAMPLE_FORMAT.replace("{}", "*"),
             lang,
             ).save_to(metadata_path)
    emit_info("Saved metadata to {}".format(metadata_path))

    if config.postprocess_config.exec_cmd_on_problem_dir is not None:
        emit_info(_message_on_execution(problem_dir_path,
                                        config.postprocess_config.exec_cmd_on_problem_dir))
        config.postprocess_config.execute_on_problem_dir(
            problem_dir_path)

    output_splitter()
Example #23
0
 def emit_error(text):
     logger.error(with_color(text, Fore.RED))
Example #24
0
 def emit_error(text):
     logging.error(with_color("Problem {}: {}".format(pid, text), Fore.RED))
Example #25
0
    def load(cls, fp: TextIO, args: Optional[ProgramArgs] = None):
        """
        :param fp: .toml file's file pointer
        :param args: command line arguments
        :return: Config instance
        """
        config_dic = toml.load(fp)
        # Root 'codestyle' is common code style
        common_code_style_config_dic = config_dic.get(
            _CODE_STYLE_CONFIG_KEY, {})

        postprocess_config_dic = config_dic.get(_POST_PROCESS_CONFIG_KEY, {})
        etc_config_dic = config_dic.get('etc', {})
        run_config_dic = config_dic.get(_RUN_CONFIG_KEY, {})
        code_style_config_dic = {**common_code_style_config_dic}

        # Handle config override strategy in the following code
        # (Most preferred) program arguments > lang-specific > common config (Least preferred)
        lang = (args and args.lang) or common_code_style_config_dic.get(
            "lang", DEFAULT_LANGUAGE)
        code_style_config_dic = _update_config_dict(
            code_style_config_dic, dict(lang=lang))

        if lang in config_dic:
            lang_specific_config_dic = config_dic[lang]  # e.g. [cpp.codestyle]
            if _CODE_STYLE_CONFIG_KEY in lang_specific_config_dic:
                lang_code_style = lang_specific_config_dic[_CODE_STYLE_CONFIG_KEY]
                if "lang" in lang_code_style:
                    logger.warn(
                        with_color("'lang' is only valid in common code style config, "
                                   "but detected in language-specific code style config. It will be ignored.",
                                   Fore.RED))
                    del lang_code_style["lang"]

                code_style_config_dic = _update_config_dict(code_style_config_dic,
                                                            lang_code_style)

            # e.g. [cpp.postprocess]
            if _POST_PROCESS_CONFIG_KEY in lang_specific_config_dic:
                postprocess_config_dic = _update_config_dict(postprocess_config_dic,
                                                             lang_specific_config_dic[_POST_PROCESS_CONFIG_KEY])

            if _RUN_CONFIG_KEY in lang_specific_config_dic:  # e.g. [cpp.run]
                run_config_dic = _update_config_dict(run_config_dic,
                                                     lang_specific_config_dic[_RUN_CONFIG_KEY])

        if args:
            code_style_config_dic = _update_config_dict(
                code_style_config_dic,
                dict(template_file=args.template,
                     workspace_dir=args.workspace)
            )
            etc_config_dic = _update_config_dict(
                etc_config_dic,
                dict(
                    download_without_login=args.without_login,
                    parallel_download=args.parallel,
                    save_no_session_cache=args.save_no_session_cache,
                    compile_before_testing=args.compile_before_testing,
                    compile_only_when_diff_detected=args.compile_only_when_diff_detected
                )
            )

        return Config(
            code_style_config=CodeStyleConfig(**code_style_config_dic),
            postprocess_config=PostprocessConfig(**postprocess_config_dic),
            etc_config=EtcConfig(**etc_config_dic),
            run_config=RunConfig(**run_config_dic)
        )
Example #26
0
def main(prog, args, credential_supplier=None, use_local_session_cache=True) -> bool:
    parser = argparse.ArgumentParser(
        prog=prog,
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument("--exec", '-e',
                        help="File path to the execution target. [Default] Automatically detected exec file",
                        default=None)

    parser.add_argument("--dir", '-d',
                        help="Target directory to test. [Default] Current directory",
                        default=".")

    parser.add_argument("--timeout", '-t',
                        help="Timeout for each test cases (sec) [Default] 1",
                        type=int,
                        default=1)

    parser.add_argument("--code", '-c',
                        help="Path to the source code to submit [Default] Code path written in metadata.json",
                        type=str,
                        default=None)

    parser.add_argument("--force", "-f",
                        action="store_true",
                        help="Submit the code regardless of the local test result [Default] False",
                        default=False)

    parser.add_argument("--save-no-session-cache",
                        action="store_true",
                        help="Save no session cache to avoid security risk",
                        default=False)

    parser.add_argument("--unlock-safety", "-u",
                        action="store_true",
                        help="By default, this script only submits the first code per problem. However, you can remove"
                             " the safety by this option in order to submit codes twice or more.",
                        default=False)

    args = parser.parse_args(args)

    metadata_file = os.path.join(args.dir, "metadata.json")
    try:
        metadata = Metadata.load_from(metadata_file)
    except IOError:
        logging.error(
            "{0} is not found! You need {0} to use this submission functionality.".format(metadata_file))
        return False

    try:
        client = AtCoderClient()
        client.login(save_session_cache=args.save_no_session_cache,
                     credential_supplier=credential_supplier,
                     use_local_session_cache=use_local_session_cache,
                     )
    except LoginError:
        logging.error("Login failed. Try again.")
        return False

    tester_args = []
    if args.exec:
        tester_args += ["-e", args.exec]
    if args.dir:
        tester_args += ["-d", args.dir]
    if args.timeout:
        tester_args += ["-t", str(args.timeout)]

    if args.force or tester.main("", tester_args):
        submissions = client.download_submission_list(metadata.problem.contest)
        if not args.unlock_safety:
            for submission in submissions:
                if submission.problem_id == metadata.problem.problem_id:
                    logging.error(with_color("Cancel submitting because you already sent some code to the problem. Please "
                                             "specify -u to send the code. {}".format(
                                                 metadata.problem.contest.get_submissions_url(submission)), Fore.LIGHTRED_EX))
                    return False

        code_path = args.code or os.path.join(args.dir, metadata.code_filename)
        with open(code_path, 'r') as f:
            source = f.read()
        logging.info(
            "Submitting {} as {}".format(code_path, metadata.lang.name))
        submission = client.submit_source_code(
            metadata.problem.contest, metadata.problem, metadata.lang, source)
        logging.info("{} {}".format(
            with_color("Done!", Fore.LIGHTGREEN_EX),
            metadata.problem.contest.get_submissions_url(submission)))