Beispiel #1
0
def test_create_dummy_results(tmpdir, max_instances_done_per_file: int):
    parser: Parser = create_dummy_parser(write_result=True)
    algorithm: Algorithm = create_dummy_algorithm()
    base_context: AlgTesterContext = create_dummy_context(
        parser=parser.get_name(), algorithms=[algorithm.get_name()])
    base_context.max_files_to_check = None
    base_context.output_dir = tmpdir.strpath
    num_of_input_files = len(os.listdir(base_context.input_dir))

    assert len(os.listdir(base_context.output_dir)) == 0

    create_dummy_results(base_context, algorithm, parser, log_filename,
                         max_instances_done_per_file)

    assert len(os.listdir(base_context.output_dir)) == num_of_input_files + 1
    assert log_filename in os.listdir(base_context.output_dir)

    with open(f'{base_context.output_dir}/{log_filename}', "r") as log_file:
        for num_of_logs, _ in enumerate(log_file):
            pass
        num_of_logs += 1

    assert num_of_logs != 0

    if max_instances_done_per_file is not None:
        assert num_of_logs == num_of_input_files * max_instances_done_per_file
Beispiel #2
0
def test_run_tester(tmpdir):
    parser: Parser = create_dummy_parser()
    algorithms = ["Alg1", "Alg2"]
    communicators = ["Slack"]
    ctx: AlgTesterContext = create_dummy_context(parser=parser, algorithms=algorithms, communicators=communicators)
    ctx.output_dir = f'{tmpdir.strpath}/Test'

    input_files = list()
    for _, _, files in os.walk(ctx.input_dir):
        for filename in files:
            input_files.append(filename)

    flexmock(Plugins)
    (Plugins.should_receive("get_parser").and_return(parser))

    flexmock(BaseRunner)
    (BaseRunner.should_receive("compute_results")
        .with_args(object, input_files)
        .and_return(None)
        .once())

    assert not os.path.isdir(ctx.output_dir)
    
    tester_logic.run_tester(ctx.algorithm_names, ctx.concurrency_runner_name, ctx.check_time, ctx.time_retries, ctx.parser_name, ctx.communicator_names, ctx.max_files_to_check, ctx.is_forced, ctx.min_time_between_communications, ctx.input_dir, ctx.output_dir, ctx.extra_options)

    assert os.path.isdir(ctx.output_dir)

    print
def test_get_solution_for_instance(algorithm: Algorithm):
    base_context = create_dummy_context(algorithms=[algorithm.get_name()])
    base_data = get_base_parsed_data(base_context, algorithm)
    base_data.update({"id": 0, "item_count": 0})

    res: Dict[str, object] = _runner.get_solution_for_instance(
        base_context, algorithm, base_data)

    assert "algorithm" in res
    assert "algorithm_name" in res
    assert "output_filename" in res

    print
def test_run_tester_for_file(algorithms: Algorithm, tmpdir):
    output_dir = tmpdir
    parser = create_dummy_parser()
    base_context: AlgTesterContext = create_dummy_context(
        algorithms=[alg.get_name() for alg in algorithms],
        parser=parser.get_name())
    base_context.num_of_instances = 500 * len(algorithms)
    base_context.output_dir = output_dir.strpath

    notification_vars = {
        "last_comm_time": 0,
        "instances_done": 0,
        "instances_failed": 0
    }

    instances_logger: InstancesLogger = InstancesLogger(
        base_context.output_dir, base_context.is_forced)
    create_path(base_context.output_dir)

    flexmock(Plugins)
    Plugins.should_receive("get_parser").and_return(parser)

    for algorithm in algorithms:
        (Plugins.should_receive("get_algorithm").with_args(
            algorithm.get_name()).and_return(algorithm))

    flexmock(BaseRunner)
    BaseRunner.should_receive("notify_communicators").times(
        base_context.num_of_instances + 1)

    flexmock(parser).should_receive("write_result_to_file").times(
        base_context.num_of_instances)

    _runner.init(instances_logger)
    _runner.run_tester_for_file(base_context,
                                f'{base_context.input_dir}/4_inst.dat',
                                notification_vars)

    assert notification_vars["instances_done"] == base_context.num_of_instances

    assert not instances_logger._instance_log.closed
    instances_logger.close_log()
    assert instances_logger._instance_log.closed

    instances_logger.load_instances()
    assert instances_logger.get_num_of_done_instances(
    ) == base_context.num_of_instances

    print
Beispiel #5
0
def test_count_instances():
    parser: Parser = create_dummy_parser()
    ctx: AlgTesterContext = create_dummy_context(parser=parser, algorithms=["Alg1", "Alg2"])

    input_files = list()
    for _, _, files in os.walk(ctx.input_dir):
        for filename in files:
            input_files.append(filename)

    flexmock(Plugins)
    (Plugins.should_receive("get_parser").and_return(parser))

    tester_logic.count_instances(ctx, input_files)

    assert ctx.num_of_instances == 500*2*len(ctx.algorithm_names)
    print
def test_get_parsed_instances_data(algorithm: Algorithm):
    parser = create_dummy_parser()
    base_context = create_dummy_context(algorithms=[algorithm.get_name()],
                                        parser=parser.get_name())
    base_data = get_base_parsed_data(base_context, algorithm)

    with open(f'{base_context.input_dir}/4_inst.dat', "r") as input_file:
        res: List[Dict[str, object]] = _runner.get_parsed_instances_data(
            base_context, input_file, parser, algorithm)

    assert len(res) > 0
    assert "algorithm" in res[0]
    assert "algorithm_name" in res[0]
    assert "output_filename" in res[0]

    print
Beispiel #7
0
def test_get_base_instances():
    parser = create_dummy_parser()
    base_context: AlgTesterContext = create_dummy_context(parser=parser)
    base_context.num_of_instances = 500 * 2
    input_files = _get_input_files(base_context.input_dir)

    instance_cnt = 0
    for (instance, file) in _runner.get_base_instances(input_files, parser):
        assert instance is not None
        assert file is not None
        assert file.closed == False
        instance_cnt += 1

    assert instance_cnt == base_context.num_of_instances

    _runner.close_all_files(input_files)
def test_run_tester_for_file_exceptions(tmpdir):
    output_dir = tmpdir
    parser = create_dummy_parser()
    algorithms = [
        create_dummy_algorithm(),
        create_dummy_algorithm(name="AlgFailure",
                               perform_func=_dummy_failing_func)
    ]
    base_context: AlgTesterContext = create_dummy_context(
        parser=parser.get_name(),
        algorithms=[alg.get_name() for alg in algorithms])
    base_context.num_of_instances = 500
    base_context.output_dir = output_dir.strpath

    notification_vars = {
        "last_comm_time": 0,
        "instances_done": 0,
        "instances_failed": 0
    }

    instances_logger: InstancesLogger = InstancesLogger(
        base_context.output_dir, base_context.is_forced)
    create_path(base_context.output_dir)

    flexmock(Plugins)
    Plugins.should_receive("get_parser").and_return(parser)

    for algorithm in algorithms:
        (Plugins.should_receive("get_algorithm").with_args(
            algorithm.get_name()).and_return(algorithm))

    flexmock(BaseRunner)
    BaseRunner.should_receive("notify_communicators").times(
        base_context.num_of_instances + 1)

    flexmock(parser).should_receive("write_result_to_file").times(
        base_context.num_of_instances)

    _runner.init(instances_logger)
    _runner.run_tester_for_file(base_context,
                                f'{base_context.input_dir}/4_inst.dat',
                                notification_vars)

    assert notification_vars["instances_done"] == base_context.num_of_instances
    assert notification_vars[
        "instances_failed"] == base_context.num_of_instances
    print
Beispiel #9
0
def prepare_objects(output_dir, is_change_forced: bool,
                    max_instances_done_per_file: int):
    parser: Parser = create_dummy_parser(write_result=True)
    algorithm: Algorithm = create_dummy_algorithm()
    base_context: AlgTesterContext = create_dummy_context(
        parser=parser.get_name(), algorithms=[algorithm.get_name()])
    base_context.max_files_to_check = None
    base_context.is_forced = is_change_forced
    base_context.output_dir = output_dir

    create_dummy_results(base_context, algorithm, parser, log_filename,
                         max_instances_done_per_file)

    instances_logger = InstancesLogger(base_context.output_dir,
                                       base_context.is_forced)

    return base_context, algorithm, parser, instances_logger
Beispiel #10
0
def test_write_result(tmpdir):
    parser = create_dummy_parser()
    base_context: AlgTesterContext = create_dummy_context(parser=parser)
    base_context.output_dir = tmpdir.strpath
    output_files = {
        'output_1.dat': open(f'{base_context.output_dir}/output_1.dat', "w")
    }

    flexmock(parser).should_receive("write_result_to_file").times(3)

    _runner.write_result(base_context, parser, output_files,
                         {"output_filename": "output_1.dat"})
    assert len(output_files.keys()) == 1

    _runner.write_result(base_context, parser, output_files,
                         {"output_filename": "output_2.dat"})
    assert len(output_files.keys()) == 2

    _runner.write_result(base_context, parser, output_files,
                         {"output_filename": "output_1.dat"})
    assert len(output_files.keys()) == 2

    _runner.close_all_files(output_files)
    print


# def tests_run_tester_for_data(tmpdir):
#     parser = create_dummy_parser()
#     algorithms = [create_dummy_algorithm(name="DummyAlg1"), create_dummy_algorithm(name="DummyAlg2")]
#     base_context: AlgTesterContext = create_dummy_context(parser=parser)
#     base_context.num_of_instances = 500*2*len(algorithms)
#     base_context.output_dir = tmpdir.strpath
#     input_files_dict = _get_input_files(base_context.input_dir)
#     output_files_dict = dict()

#     (flexmock(_runner._base_runner).should_receive("notify_communicators")
#         .and_return(None)
#         .times(base_context.num_of_instances + 1))

#     (flexmock(_runner).should_receive("write_result")
#         .and_return(None)
#         .times(base_context.num_of_instances)
#         )

#     _runner.run_tester_for_data(base_context, algorithms, parser, list(), input_files_dict, output_files_dict)
#     print
Beispiel #11
0
def test_get_data_for_executor():
    parser = create_dummy_parser()
    algorithms = [
        create_dummy_algorithm(name="DummyAlg1"),
        create_dummy_algorithm(name="DummyAlg2")
    ]
    base_context: AlgTesterContext = create_dummy_context(parser=parser)
    base_context.num_of_instances = 500 * 2 * len(algorithms)
    input_files = _get_input_files(base_context.input_dir)

    instance_cnt = 0
    for (algorithm,
         data) in _runner.get_data_for_executor(base_context, input_files,
                                                parser, algorithms):
        assert algorithm in algorithms
        instance_cnt += 1

    assert instance_cnt == base_context.num_of_instances

    _runner.close_all_files(input_files)
def test_create_columns_description_file(tmpdir):
    base_context = create_dummy_context()
    output_dir = tmpdir
    base_context.output_dir = output_dir.strpath

    algorithm = create_dummy_algorithm()

    concurrency_runners.create_columns_description_file(
        base_context, algorithm)

    contents = output_dir.listdir()
    assert len(contents) == 1
    assert algorithm.get_name() in contents[0].basename

    with open(contents[0].strpath) as columns_file:
        line: str = columns_file.read().strip()

    assert line is not None
    assert ' '.join(algorithm.get_columns()) == line

    print
def test_compute_results(is_change_forced: bool):
    base_context: AlgTesterContext = create_dummy_context()
    base_context.max_files_to_check = None
    base_context.is_forced = is_change_forced
    instances_logger: InstancesLogger = InstancesLogger(
        base_context.output_dir, base_context.is_forced)

    input_files = list()
    for root, _, files in os.walk(base_context.input_dir):
        for filename in files:
            input_files.append(f'{root}/{filename}')

    flexmock(BaseRunner)
    (BaseRunner.should_receive("run_tester_for_file").with_args(
        base_context, re.compile(f'{base_context.input_dir}/.*'),
        object).and_return(None).times(len(input_files)))

    _runner.init(instances_logger)
    _runner.compute_results(base_context, input_files)

    print
def test_notify_communicators_timing():
    base_context = create_dummy_context()
    notification_vars = {"last_comm_time": 0, "instances_done": 0}

    res = _runner.notify_communicators(base_context, [], {}, notification_vars)
    new_last_comm_time = notification_vars["last_comm_time"]
    assert notification_vars["last_comm_time"] != 0
    assert res == True

    res = _runner.notify_communicators(base_context, [], {}, notification_vars)
    assert res == False
    assert notification_vars["last_comm_time"] == new_last_comm_time

    notification_vars[
        "last_comm_time"] -= base_context.min_time_between_communications + 1
    new_last_comm_time = notification_vars["last_comm_time"]
    res = _runner.notify_communicators(base_context, [], {}, notification_vars)
    assert res == True
    assert notification_vars["last_comm_time"] != new_last_comm_time

    print