def test_configuration_file_correct_types(correct_arguments, correct_types, tmpdir): """Checks that the configuration file was saved to the directory""" parsed_arguments = arguments.parse(correct_arguments) directory_prefix = str(tmpdir) + "/" configuration.save(directory_prefix + constants.CONFIGURATION, vars(parsed_arguments)) assert len(tmpdir.listdir()) == 1 tada_configuration_dict = configuration.read(directory_prefix + constants.CONFIGURATION) assert configuration.get_types(tada_configuration_dict) == correct_types
analyzed_module = importlib.import_module( configuration.get_module(tada_configuration_dict)) # reflectively access the chosen function analyzed_function = getattr( analyzed_module, configuration.get_function(tada_configuration_dict)) # read the chosen_size chosen_size = read.read_experiment_size() # configure perf runner = pyperf.Runner() # give a by-configuration name to the experiment current_experiment_name = configuration.get_experiment_name( tada_configuration_dict, chosen_size) # set the name of the experiment for perf runner.metadata[constants.DESCRIPTION_METANAME] = current_experiment_name # read the chosen types func_type = configuration.get_types(tada_configuration_dict) # initialize path for schema path = None gen_func = None # using hypothesis to generate experiment data if func_type[0] == "hypothesis": # read path from arguments path = configuration.get_schema_path(tada_configuration_dict) if func_type[0] == "custom": data_directory = configuration.get_data_directory( tada_configuration_dict) if data_directory != "": package.add_data_sys_path( configuration.get_data_directory(tada_configuration_dict)) data_module = importlib.import_module( configuration.get_data_module(tada_configuration_dict))
if __name__ == "__main__": # read the configuration file to access the configuration dictionary tada_configuration_dict = configuration.read(constants.CONFIGURATION) # add the specified directory to the system path package.add_sys_path(configuration.get_directory(tada_configuration_dict)) # reflectively import the chosen module analyzed_module = importlib.import_module( configuration.get_module(tada_configuration_dict)) # reflectively access the chosen function analyzed_function = getattr( analyzed_module, configuration.get_function(tada_configuration_dict)) # read the chosen_size chosen_size = read.read_experiment_size() # configure perf runner = perf.Runner() # give a by-configuration name to the experiment current_experiment_name = configuration.get_experiment_name( tada_configuration_dict, chosen_size) # set the name of the experiment for perf runner.metadata[constants.DESCRIPTION_METANAME] = current_experiment_name # run the benchmark using the bench_func from perf current_benchmark = runner.bench_func( current_experiment_name, run.run_benchmark, analyzed_function, *generate.generate_data( configuration.get_types(tada_configuration_dict), chosen_size), ) # save the perf results from running the benchmark save.save_benchmark_results(current_benchmark, current_experiment_name)