Example #1
0
def _main():
    if len(sys.argv) < 5:
        simple_usage_message(
            "<soft-time-limit> <hard-time-limit> <output-file> <command...>")

    soft_time_limit = float(sys.argv[1])
    hard_time_limit = float(sys.argv[2])
    output_file = sys.argv[3]

    command = sys.argv[4:]

    data = timer(hard_time_limit, command)

    del data.process
    with open(output_file, 'w') as f:
        f.write("duration %.3f\n" % data.duration)
        f.write("terminated %s\n" % data.terminated_str)
        f.write("ret %d\n" % data.ret)

    if data.terminated or data.duration > soft_time_limit:
        sys.exit(124)
    else:
        sys.exit(data.ret)
Example #2
0
File: gen.py Project: ioi-2022/tps
def _main():
    if len(sys.argv) != 3:
        simple_usage_message("<gen-data-file> <tests-dir>")
    gen_data_file = sys.argv[1]
    tests_dir = sys.argv[2]

    task_data = load_json(PROBLEM_JSON)
    with open(gen_data_file, 'r') as gdf:
        gen_data = gdf.readlines()

    if SPECIFIC_TESTS:
        tu.check_pattern_exists_in_test_names(
            SPECIFIED_TESTS_PATTERN,
            tu.get_test_names_by_gen_data(gen_data, task_data))

    summary_visitor = SummaryVisitor()
    parse_data(gen_data, task_data, summary_visitor)
    summary_visitor.make_gen_summary_file(tests_dir)

    mapping_visitor = MappingVisitor()
    parse_data(gen_data, task_data, mapping_visitor)
    mapping_visitor.make_mapping_file(tests_dir)

    parse_data(gen_data, task_data, GeneratingVisitor(tests_dir))
Example #3
0
import sys
import os
import subprocess

from util import get_bool_environ, simple_usage_message, wait_process_success
from color_util import cprinterr, colors
import tests_util as tu

INTERNALS_DIR = os.environ.get('INTERNALS')
SPECIFIC_TESTS = get_bool_environ('SPECIFIC_TESTS')
SPECIFIED_TESTS_PATTERN = os.environ.get('SPECIFIED_TESTS_PATTERN')

if __name__ == '__main__':
    if len(sys.argv) != 2:
        simple_usage_message("<tests-dir>")
    tests_dir = sys.argv[1]

    try:
        test_name_list = tu.get_test_names_from_tests_dir(tests_dir)
    except tu.MalformedTestsException as e:
        cprinterr(colors.ERROR, "Error:")
        sys.stderr.write("{}\n".format(e))
        sys.exit(4)

    if SPECIFIC_TESTS:
        tu.check_pattern_exists_in_test_names(SPECIFIED_TESTS_PATTERN,
                                              test_name_list)
        test_name_list = tu.filter_test_names_by_pattern(
            test_name_list, SPECIFIED_TESTS_PATTERN)

    available_tests, missing_tests = tu.divide_tests_by_availability(
import sys
import os
from color_util import colored, colors, InvalidColorNameException
'''
This script gets a color_name as an argument and prints its input with that color.
For example "echo hello | python colored_cat red" prints hello with red color. 
'''
if __name__ == '__main__':
    if len(sys.argv) != 2:
        from util import simple_usage_message
        simple_usage_message("<color-name>")

    color_name = sys.argv[1].upper()
    try:
        for line in sys.stdin:
            sys.stdout.write(colored(colors.get(color_name), line))
    except InvalidColorNameException:
        sys.stderr.write('Invalid color name: {}\n'.format(color_name))
        exit(4)
    except KeyboardInterrupt:
        sys.stdout.write(colors.RESET)
        exit(1)
import subprocess

from util import check_file_exists, wait_process_success
from color_util import cprint, colors
from gen_data_parser import check_test_pattern_exists_in_list, test_name_matches_pattern
from test_exists import test_exists

INTERNALS_DIR = os.environ.get('INTERNALS')
SPECIFIC_TESTS = os.environ.get('SPECIFIC_TESTS')
SPECIFIED_TESTS_PATTERN = os.environ.get('SPECIFIED_TESTS_PATTERN')

if __name__ == '__main__':

    if len(sys.argv) != 3:
        from util import simple_usage_message
        simple_usage_message("<tests-dir> <gen-summary-file>")

    tests_dir = sys.argv[1]
    gen_summary_file = sys.argv[2]

    if not os.path.isdir(tests_dir):
        sys.stderr.write(
            "The tests directory not found or not a valid directory: {}.\n".
            format(tests_dir))
        exit(4)
    check_file_exists(gen_summary_file,
                      "The tests are not correctly generated.\n")

    with open(gen_summary_file) as gsf:
        test_name_list = [
            line.split()[0] for line in map(str.strip, gsf.readlines())
Example #6
0
"""This script is mainly used for testing purposes.
"""

import sys
from util import simple_usage_message
from test_exists import test_exists

if __name__ == '__main__':
    if len(sys.argv) != 3:
        simple_usage_message("<tests-dir> <test-name>")

    tests_dir = sys.argv[1]
    test_name = sys.argv[2]

    sys.exit(0 if test_exists(tests_dir, test_name) else 1)
Example #7
0
        return verdict == "Wrong Answer"
    elif expected_verdict == "runtime_error":
        return verdict == "Runtime Error"
    elif expected_verdict == "failed":
        return verdict != "Correct" or score == 0
    elif expected_verdict == "time_limit_and_runtime_error":
        return verdict in ["Time Limit Exceeded", "Runtime Error"]
    elif expected_verdict == "partially_correct":
        return 0 < score < 1
    else:
        raise ValueError("Invalid verdict")


if __name__ == '__main__':
    if len(sys.argv) != 3:
        simple_usage_message("<tests-dir> <solution-path>")
    tests_dir = sys.argv[1]
    solution_filename = os.path.basename(sys.argv[2])

    solutions_data = dict(load_json(SOLUTIONS_JSON))
    solution_data = solutions_data.get(solution_filename, None)

    try:
        test_name_list = tu.get_test_names_from_tests_dir(tests_dir)
    except tu.MalformedTestsException as e:
        cprinterr(colors.ERROR, "Error:")
        sys.stderr.write("{}\n".format(e))
        sys.exit(4)

    if SPECIFIC_TESTS:
        tu.check_pattern_exists_in_test_names(SPECIFIED_TESTS_PATTERN, test_name_list)
        test_validators += [
            validator.format(subtask=subtask)
            for validator in subtask_sensitive_validators
        ]
        test_validators += navigate_json(data, 'subtasks/%s' % subtask,
                                         SUBTASKS_JSON).get('validators', [])

    if len(test_validators) == 0:
        log_warning("There is no validator for test {}.".format(test_name))

    def unify_list(l):
        seen = []
        for e in l:
            if e not in seen:
                seen.append(e)
        return seen

    return unify_list(test_validators)


if __name__ == '__main__':
    if len(sys.argv) != 3:
        from util import simple_usage_message
        simple_usage_message("<test-name> <mapping-file>")

    test_validators = get_test_validators(test_name=sys.argv[1],
                                          mapping_file=sys.argv[2])

    for validator in test_validators:
        print(validator)
            else:
                raise KeyError
        except (KeyError, IndexError):
            sys.stderr.write("Requested key '%s' not found in '%s'\n" % (path, os.path.basename(json_file_name)))
            exit(4)
    return data


def navigate_json_file(file, path):
    return navigate_json(load_json(file), path, file)


if __name__ == '__main__':
    if len(sys.argv) != 3:
        from util import simple_usage_message
        simple_usage_message("<json-file> <json-path>")

    json_file = sys.argv[1]
    json_path = sys.argv[2]

    result = navigate_json_file(json_file, json_path)

    if isinstance(result, dict):
        for key in result.keys():
            print(key)
    elif isinstance(result, list):
        for item in result:
            print(item)
    elif isinstance(result, bool):
        print("true" if result else "false")
    else:
Example #10
0
"""This script is mainly used for testing purposes.
"""

import sys
from util import simple_usage_message, load_json
from test_name import get_test_name

if __name__ == '__main__':
    if len(sys.argv) < 8:
        simple_usage_message(
            "<task_data file (problem.json)> <testset_name> <testset_index> " +
            "<subtask_index> <test_index> <test_offset> <gen_arguments...>")

    test_name = get_test_name(
        task_data=load_json(sys.argv[1]),
        testset_name=sys.argv[2],
        testset_index=int(sys.argv[3]),
        subtask_index=int(sys.argv[4]),
        test_index=int(sys.argv[5]),
        test_offset=int(sys.argv[6]),
        gen_line=sys.argv[7:],
    )

    print(test_name)
Example #11
0
        data.ret = p.wait()
    except KeyboardInterrupt as e:
        t.cancel()
        exit(130)
    t.cancel()

    end_time = datetime.datetime.now()
    data.duration = (end_time - start_time).total_seconds()

    return data


if __name__ == '__main__':
    if len(sys.argv) < 5:
        from util import simple_usage_message
        simple_usage_message(
            "<soft-time-limit> <hard-time-limit> <output-file> <command...>")

    soft_time_limit = float(sys.argv[1])
    hard_time_limit = float(sys.argv[2])
    output_file = sys.argv[3]

    command = sys.argv[4:]

    data = timer(hard_time_limit, command)

    del data.process
    with open(output_file, 'w') as f:
        f.write("duration %.3f\n" % data.duration)
        f.write("terminated %s\n" % ("true" if data.terminated else "false"))
        f.write("ret %d\n" % data.ret)
Example #12
0
import sys
import os

from util import simple_usage_message, load_json
from tests_util import get_test_names_by_gen_data

if __name__ == '__main__':
    if len(sys.argv) != 2:
        simple_usage_message("<gen-data-file>")
    gen_data_file = sys.argv[1]

    PROBLEM_JSON = os.environ.get('PROBLEM_JSON')
    task_data = load_json(PROBLEM_JSON)
    with open(gen_data_file, 'r') as gdf:
        gen_data = gdf.readlines()
    tests = get_test_names_by_gen_data(gen_data, task_data)

    for test in tests:
        print(test)