Exemple #1
0
def test_create_dummy_results(tmpdir, max_instances_done_per_file: int):
    parser: Parser = create_dummy_parser(write_result=True)
    algorithm: Algorithm = create_dummy_algorithm()
    base_context: AlgTesterContext = create_dummy_context(
        parser=parser.get_name(), algorithms=[algorithm.get_name()])
    base_context.max_files_to_check = None
    base_context.output_dir = tmpdir.strpath
    num_of_input_files = len(os.listdir(base_context.input_dir))

    assert len(os.listdir(base_context.output_dir)) == 0

    create_dummy_results(base_context, algorithm, parser, log_filename,
                         max_instances_done_per_file)

    assert len(os.listdir(base_context.output_dir)) == num_of_input_files + 1
    assert log_filename in os.listdir(base_context.output_dir)

    with open(f'{base_context.output_dir}/{log_filename}', "r") as log_file:
        for num_of_logs, _ in enumerate(log_file):
            pass
        num_of_logs += 1

    assert num_of_logs != 0

    if max_instances_done_per_file is not None:
        assert num_of_logs == num_of_input_files * max_instances_done_per_file
def test_run_tester_for_file_exceptions(tmpdir):
    output_dir = tmpdir
    parser = create_dummy_parser()
    algorithms = [
        create_dummy_algorithm(),
        create_dummy_algorithm(name="AlgFailure",
                               perform_func=_dummy_failing_func)
    ]
    base_context: AlgTesterContext = create_dummy_context(
        parser=parser.get_name(),
        algorithms=[alg.get_name() for alg in algorithms])
    base_context.num_of_instances = 500
    base_context.output_dir = output_dir.strpath

    notification_vars = {
        "last_comm_time": 0,
        "instances_done": 0,
        "instances_failed": 0
    }

    instances_logger: InstancesLogger = InstancesLogger(
        base_context.output_dir, base_context.is_forced)
    create_path(base_context.output_dir)

    flexmock(Plugins)
    Plugins.should_receive("get_parser").and_return(parser)

    for algorithm in algorithms:
        (Plugins.should_receive("get_algorithm").with_args(
            algorithm.get_name()).and_return(algorithm))

    flexmock(BaseRunner)
    BaseRunner.should_receive("notify_communicators").times(
        base_context.num_of_instances + 1)

    flexmock(parser).should_receive("write_result_to_file").times(
        base_context.num_of_instances)

    _runner.init(instances_logger)
    _runner.run_tester_for_file(base_context,
                                f'{base_context.input_dir}/4_inst.dat',
                                notification_vars)

    assert notification_vars["instances_done"] == base_context.num_of_instances
    assert notification_vars[
        "instances_failed"] == base_context.num_of_instances
    print
Exemple #3
0
def test_get_data_for_executor():
    parser = create_dummy_parser()
    algorithms = [
        create_dummy_algorithm(name="DummyAlg1"),
        create_dummy_algorithm(name="DummyAlg2")
    ]
    base_context: AlgTesterContext = create_dummy_context(parser=parser)
    base_context.num_of_instances = 500 * 2 * len(algorithms)
    input_files = _get_input_files(base_context.input_dir)

    instance_cnt = 0
    for (algorithm,
         data) in _runner.get_data_for_executor(base_context, input_files,
                                                parser, algorithms):
        assert algorithm in algorithms
        instance_cnt += 1

    assert instance_cnt == base_context.num_of_instances

    _runner.close_all_files(input_files)
Exemple #4
0
def prepare_objects(output_dir, is_change_forced: bool,
                    max_instances_done_per_file: int):
    parser: Parser = create_dummy_parser(write_result=True)
    algorithm: Algorithm = create_dummy_algorithm()
    base_context: AlgTesterContext = create_dummy_context(
        parser=parser.get_name(), algorithms=[algorithm.get_name()])
    base_context.max_files_to_check = None
    base_context.is_forced = is_change_forced
    base_context.output_dir = output_dir

    create_dummy_results(base_context, algorithm, parser, log_filename,
                         max_instances_done_per_file)

    instances_logger = InstancesLogger(base_context.output_dir,
                                       base_context.is_forced)

    return base_context, algorithm, parser, instances_logger
def test_create_columns_description_file(tmpdir):
    base_context = create_dummy_context()
    output_dir = tmpdir
    base_context.output_dir = output_dir.strpath

    algorithm = create_dummy_algorithm()

    concurrency_runners.create_columns_description_file(
        base_context, algorithm)

    contents = output_dir.listdir()
    assert len(contents) == 1
    assert algorithm.get_name() in contents[0].basename

    with open(contents[0].strpath) as columns_file:
        line: str = columns_file.read().strip()

    assert line is not None
    assert ' '.join(algorithm.get_columns()) == line

    print
                                                                   object]):
    """
    A function that does not return input data that is further needed in the algorithm.
    
    Arguments:
        context {AlgTesterContext} -- Used context.
        parsed_data {Dict[str, object]} -- Input data.
    
    Returns:
        Dict[str, object] -- Modified input data.
    """
    return {"id": parsed_data["id"], "item_count": parsed_data["item_count"]}


@pytest.mark.parametrize(
    'algorithm', (create_dummy_algorithm(),
                  create_dummy_algorithm(name="DummyRemovingAlgorithm",
                                         perform_func=_removing_perform)))
def test_get_solution_for_instance(algorithm: Algorithm):
    base_context = create_dummy_context(algorithms=[algorithm.get_name()])
    base_data = get_base_parsed_data(base_context, algorithm)
    base_data.update({"id": 0, "item_count": 0})

    res: Dict[str, object] = _runner.get_solution_for_instance(
        base_context, algorithm, base_data)

    assert "algorithm" in res
    assert "algorithm_name" in res
    assert "output_filename" in res

    print