Esempio n. 1
0
    def get_solution_for_instance(
            self, context: AlgTesterContext, algorithm: Algorithm,
            parsed_instance_data: Dict[str, object]) -> Dict[str, object]:
        """
        Invoke the selected algorithm's perform_algorithm method on the parsed instance data.
        
        Arguments:
            context {AlgTesterContext} -- Current application context.
            algorithm {Algorithm} -- Currently tested algorithm.
            parsed_instance_data {Dict[str, object]} -- Instance data of 1 instance parsed from the instance file.
        
        Returns:
            Dict[str, object] -- Contains result data of the current instance. Used keys are column names specified by the algorithm.
        """

        if context.check_time:
            # Use timeit to get time
            t = timeit.Timer(lambda: algorithm._run_perform_algorithm_func(
                context, parsed_instance_data))
            elapsed_time, solution = t.timeit(number=context.time_retries)
            solution["elapsed_time"] = round(
                (elapsed_time * 1000) / context.time_retries,
                10)  # Store in millis
        else:
            solution = algorithm._run_perform_algorithm_func(
                context, parsed_instance_data)

        return solution
Esempio n. 2
0
    def get_parsed_instances_data(self, context: AlgTesterContext,
                                  input_file: IO, parser: Parser,
                                  algorithm: Algorithm) -> Dict[str, object]:
        """
        Parses instances from the input file.
        
        Arguments:
            context {AlgTesterContext} -- Current application context.
            input_file {IO} -- The opened input file.
            parser {Parser} -- Currently used instances parser.
            algorithm {Algorithm} -- Currently tested algorithm.
        
        Returns:
            Dict[str, object] -- One parsed instance from the input file.
        """
        output = list()
        parsed_instance_data = parser.get_next_instance(input_file)

        click_options: Dict[str,
                            object] = get_click_options(context, algorithm)

        output_filename: str = parser.get_output_file_name(
            context, input_file, click_options)

        while parsed_instance_data is not None:
            parsed_instance_data["output_filename"] = output_filename
            parsed_instance_data["algorithm_name"] = algorithm.get_name()
            parsed_instance_data["algorithm"] = algorithm
            parsed_instance_data.update(context.extra_options)

            output.append(parsed_instance_data)

            parsed_instance_data = parser.get_next_instance(input_file)

        return output
Esempio n. 3
0
def get_base_parsed_data(base_context: AlgTesterContext, algorithm: Algorithm) -> Dict[str, object]:
    dummy_data = dict()
    dummy_data["output_filename"] = "output_filename"
    dummy_data["algorithm_name"] = algorithm.get_name()
    dummy_data["algorithm"] = algorithm

    return dummy_data
def test_get_solution_for_instance(algorithm: Algorithm):
    base_context = create_dummy_context(algorithms=[algorithm.get_name()])
    base_data = get_base_parsed_data(base_context, algorithm)
    base_data.update({"id": 0, "item_count": 0})

    res: Dict[str, object] = _runner.get_solution_for_instance(
        base_context, algorithm, base_data)

    assert "algorithm" in res
    assert "algorithm_name" in res
    assert "output_filename" in res

    print
Esempio n. 5
0
def create_dummy_results(context: AlgTesterContext,
                         algorithm: Algorithm,
                         parser: Parser,
                         log_filename: str,
                         max_instances_done_per_file: int = None):
    """
    Generates test results and log.
    
    Arguments:
        context {AlgTesterContext} -- [description]
        algorithm {Algorithm} -- [description]
        parser {Parser} -- [description]
        log_filename {str} -- [description]
    
    Keyword Arguments:
        max_instances_done_per_file {int} -- [description] (default: {None})
    """
    with open(f'{context.output_dir}/{log_filename}', "w") as log_file:
        for root, _, files in os.walk(context.input_dir):
            for filename in files:
                with open(f'{root}/{filename}', "r") as input_file:
                    real_num_of_instances: int = parser.get_num_of_instances(
                        context, input_file)
                    input_file.seek(0)
                    if max_instances_done_per_file is None:
                        max_instances_done_per_file = real_num_of_instances

                    click_options = get_click_options(context, algorithm)
                    output_filename: str = parser.get_output_file_name(
                        context, input_file, click_options)
                    with open(f'{context.output_dir}/{output_filename}',
                              "w") as output_file:
                        for _ in range(
                                min(real_num_of_instances,
                                    max_instances_done_per_file)):
                            # Create results
                            instance_data = parser.get_next_instance(
                                input_file)
                            instance_data["output_filename"] = output_filename
                            instance_data[
                                "algorithm_name"] = algorithm.get_name()
                            instance_data["algorithm"] = algorithm
                            instance_identifier: str = parser._get_complete_instance_identifier(
                                algorithm, instance_data)
                            log_file.write(f'{instance_identifier}\n')
                            parser.write_result_to_file(
                                output_file, instance_data)

    print
Esempio n. 6
0
def create_columns_description_file(context: AlgTesterContext,
                                    algorithm: Algorithm):
    """
    Create an output file with names of columns for a specific algorithm.
    
    Args:
        algorithm (str): Name of the algorithm whose columns are persisted.
        output_dir (str): Output directory.
    """
    column_descriptions = algorithm.get_columns()

    with open(
            f'{context.output_dir}/column_description_{algorithm.get_name()}.dat',
            "w") as f:
        f.write(f'{" ".join(column_descriptions)}\n')
def test_get_parsed_instances_data(algorithm: Algorithm):
    parser = create_dummy_parser()
    base_context = create_dummy_context(algorithms=[algorithm.get_name()],
                                        parser=parser.get_name())
    base_data = get_base_parsed_data(base_context, algorithm)

    with open(f'{base_context.input_dir}/4_inst.dat', "r") as input_file:
        res: List[Dict[str, object]] = _runner.get_parsed_instances_data(
            base_context, input_file, parser, algorithm)

    assert len(res) > 0
    assert "algorithm" in res[0]
    assert "algorithm_name" in res[0]
    assert "output_filename" in res[0]

    print
Esempio n. 8
0
def get_click_options(context: AlgTesterContext,
                      algorithm: Algorithm) -> Dict[str, object]:
    """
    Create a dictionary of options and arguments given to the Click CLI. 
    Used by Parser to generate name of the output file.
    
    Arguments:
        context {AlgTesterContext} -- Current application context.
        algorithm {Algorithm} -- Currently tested algorithm.
    
    Returns:
        Dict[str, object] -- Dictionary with data that the algorithm additionally needs to identify the output file name.
    """
    click_options: Dict[str, object] = context.get_options()
    click_options["algorithm_name"] = algorithm.get_name()
    click_options["algorithm"] = algorithm

    return click_options