def test_run_tester(tmpdir): parser: Parser = create_dummy_parser() algorithms = ["Alg1", "Alg2"] communicators = ["Slack"] ctx: AlgTesterContext = create_dummy_context(parser=parser, algorithms=algorithms, communicators=communicators) ctx.output_dir = f'{tmpdir.strpath}/Test' input_files = list() for _, _, files in os.walk(ctx.input_dir): for filename in files: input_files.append(filename) flexmock(Plugins) (Plugins.should_receive("get_parser").and_return(parser)) flexmock(BaseRunner) (BaseRunner.should_receive("compute_results") .with_args(object, input_files) .and_return(None) .once()) assert not os.path.isdir(ctx.output_dir) tester_logic.run_tester(ctx.algorithm_names, ctx.concurrency_runner_name, ctx.check_time, ctx.time_retries, ctx.parser_name, ctx.communicator_names, ctx.max_files_to_check, ctx.is_forced, ctx.min_time_between_communications, ctx.input_dir, ctx.output_dir, ctx.extra_options) assert os.path.isdir(ctx.output_dir) print
def test_create_dummy_results(tmpdir, max_instances_done_per_file: int): parser: Parser = create_dummy_parser(write_result=True) algorithm: Algorithm = create_dummy_algorithm() base_context: AlgTesterContext = create_dummy_context( parser=parser.get_name(), algorithms=[algorithm.get_name()]) base_context.max_files_to_check = None base_context.output_dir = tmpdir.strpath num_of_input_files = len(os.listdir(base_context.input_dir)) assert len(os.listdir(base_context.output_dir)) == 0 create_dummy_results(base_context, algorithm, parser, log_filename, max_instances_done_per_file) assert len(os.listdir(base_context.output_dir)) == num_of_input_files + 1 assert log_filename in os.listdir(base_context.output_dir) with open(f'{base_context.output_dir}/{log_filename}', "r") as log_file: for num_of_logs, _ in enumerate(log_file): pass num_of_logs += 1 assert num_of_logs != 0 if max_instances_done_per_file is not None: assert num_of_logs == num_of_input_files * max_instances_done_per_file
def test_run_tester_for_file(algorithms: Algorithm, tmpdir): output_dir = tmpdir parser = create_dummy_parser() base_context: AlgTesterContext = create_dummy_context( algorithms=[alg.get_name() for alg in algorithms], parser=parser.get_name()) base_context.num_of_instances = 500 * len(algorithms) base_context.output_dir = output_dir.strpath notification_vars = { "last_comm_time": 0, "instances_done": 0, "instances_failed": 0 } instances_logger: InstancesLogger = InstancesLogger( base_context.output_dir, base_context.is_forced) create_path(base_context.output_dir) flexmock(Plugins) Plugins.should_receive("get_parser").and_return(parser) for algorithm in algorithms: (Plugins.should_receive("get_algorithm").with_args( algorithm.get_name()).and_return(algorithm)) flexmock(BaseRunner) BaseRunner.should_receive("notify_communicators").times( base_context.num_of_instances + 1) flexmock(parser).should_receive("write_result_to_file").times( base_context.num_of_instances) _runner.init(instances_logger) _runner.run_tester_for_file(base_context, f'{base_context.input_dir}/4_inst.dat', notification_vars) assert notification_vars["instances_done"] == base_context.num_of_instances assert not instances_logger._instance_log.closed instances_logger.close_log() assert instances_logger._instance_log.closed instances_logger.load_instances() assert instances_logger.get_num_of_done_instances( ) == base_context.num_of_instances print
def test_count_instances(): parser: Parser = create_dummy_parser() ctx: AlgTesterContext = create_dummy_context(parser=parser, algorithms=["Alg1", "Alg2"]) input_files = list() for _, _, files in os.walk(ctx.input_dir): for filename in files: input_files.append(filename) flexmock(Plugins) (Plugins.should_receive("get_parser").and_return(parser)) tester_logic.count_instances(ctx, input_files) assert ctx.num_of_instances == 500*2*len(ctx.algorithm_names) print
def test_get_parsed_instances_data(algorithm: Algorithm): parser = create_dummy_parser() base_context = create_dummy_context(algorithms=[algorithm.get_name()], parser=parser.get_name()) base_data = get_base_parsed_data(base_context, algorithm) with open(f'{base_context.input_dir}/4_inst.dat', "r") as input_file: res: List[Dict[str, object]] = _runner.get_parsed_instances_data( base_context, input_file, parser, algorithm) assert len(res) > 0 assert "algorithm" in res[0] assert "algorithm_name" in res[0] assert "output_filename" in res[0] print
def test_get_base_instances(): parser = create_dummy_parser() base_context: AlgTesterContext = create_dummy_context(parser=parser) base_context.num_of_instances = 500 * 2 input_files = _get_input_files(base_context.input_dir) instance_cnt = 0 for (instance, file) in _runner.get_base_instances(input_files, parser): assert instance is not None assert file is not None assert file.closed == False instance_cnt += 1 assert instance_cnt == base_context.num_of_instances _runner.close_all_files(input_files)
def test_run_tester_for_file_exceptions(tmpdir): output_dir = tmpdir parser = create_dummy_parser() algorithms = [ create_dummy_algorithm(), create_dummy_algorithm(name="AlgFailure", perform_func=_dummy_failing_func) ] base_context: AlgTesterContext = create_dummy_context( parser=parser.get_name(), algorithms=[alg.get_name() for alg in algorithms]) base_context.num_of_instances = 500 base_context.output_dir = output_dir.strpath notification_vars = { "last_comm_time": 0, "instances_done": 0, "instances_failed": 0 } instances_logger: InstancesLogger = InstancesLogger( base_context.output_dir, base_context.is_forced) create_path(base_context.output_dir) flexmock(Plugins) Plugins.should_receive("get_parser").and_return(parser) for algorithm in algorithms: (Plugins.should_receive("get_algorithm").with_args( algorithm.get_name()).and_return(algorithm)) flexmock(BaseRunner) BaseRunner.should_receive("notify_communicators").times( base_context.num_of_instances + 1) flexmock(parser).should_receive("write_result_to_file").times( base_context.num_of_instances) _runner.init(instances_logger) _runner.run_tester_for_file(base_context, f'{base_context.input_dir}/4_inst.dat', notification_vars) assert notification_vars["instances_done"] == base_context.num_of_instances assert notification_vars[ "instances_failed"] == base_context.num_of_instances print
def prepare_objects(output_dir, is_change_forced: bool, max_instances_done_per_file: int): parser: Parser = create_dummy_parser(write_result=True) algorithm: Algorithm = create_dummy_algorithm() base_context: AlgTesterContext = create_dummy_context( parser=parser.get_name(), algorithms=[algorithm.get_name()]) base_context.max_files_to_check = None base_context.is_forced = is_change_forced base_context.output_dir = output_dir create_dummy_results(base_context, algorithm, parser, log_filename, max_instances_done_per_file) instances_logger = InstancesLogger(base_context.output_dir, base_context.is_forced) return base_context, algorithm, parser, instances_logger
def test_write_result(tmpdir): parser = create_dummy_parser() base_context: AlgTesterContext = create_dummy_context(parser=parser) base_context.output_dir = tmpdir.strpath output_files = { 'output_1.dat': open(f'{base_context.output_dir}/output_1.dat', "w") } flexmock(parser).should_receive("write_result_to_file").times(3) _runner.write_result(base_context, parser, output_files, {"output_filename": "output_1.dat"}) assert len(output_files.keys()) == 1 _runner.write_result(base_context, parser, output_files, {"output_filename": "output_2.dat"}) assert len(output_files.keys()) == 2 _runner.write_result(base_context, parser, output_files, {"output_filename": "output_1.dat"}) assert len(output_files.keys()) == 2 _runner.close_all_files(output_files) print # def tests_run_tester_for_data(tmpdir): # parser = create_dummy_parser() # algorithms = [create_dummy_algorithm(name="DummyAlg1"), create_dummy_algorithm(name="DummyAlg2")] # base_context: AlgTesterContext = create_dummy_context(parser=parser) # base_context.num_of_instances = 500*2*len(algorithms) # base_context.output_dir = tmpdir.strpath # input_files_dict = _get_input_files(base_context.input_dir) # output_files_dict = dict() # (flexmock(_runner._base_runner).should_receive("notify_communicators") # .and_return(None) # .times(base_context.num_of_instances + 1)) # (flexmock(_runner).should_receive("write_result") # .and_return(None) # .times(base_context.num_of_instances) # ) # _runner.run_tester_for_data(base_context, algorithms, parser, list(), input_files_dict, output_files_dict) # print
def test_get_data_for_executor(): parser = create_dummy_parser() algorithms = [ create_dummy_algorithm(name="DummyAlg1"), create_dummy_algorithm(name="DummyAlg2") ] base_context: AlgTesterContext = create_dummy_context(parser=parser) base_context.num_of_instances = 500 * 2 * len(algorithms) input_files = _get_input_files(base_context.input_dir) instance_cnt = 0 for (algorithm, data) in _runner.get_data_for_executor(base_context, input_files, parser, algorithms): assert algorithm in algorithms instance_cnt += 1 assert instance_cnt == base_context.num_of_instances _runner.close_all_files(input_files)