def test_generate_int_list__makes_size_default(): """Checks that requesting generated ints returns them""" # request a single tuple with an int in it requested_types = ["int_list"] # assume the doubling experiment is at 100 current_size = 100 # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data is not None
def test_generate_char_list_makes_letter_default(): """Checks that requesting a generated char returns one""" # request a single tuple with an int in it requested_types = ["char_list"] # assume the doubling experiment is at 100; not needed for this test current_size = 100 # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data is not None
def test_generate_floats_makes_size_default(): """Checks that requesting a generated float returns one""" # request a single tuple with an float in it requested_types = ["float", "float"] # assume the doubling experiment is at 100 current_size = 100 # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data is not None
def test_generate_string_makes_string_default(): """Checks that requesting a generated string returns one""" # request a single tuple with a string in it requested_types = ["string"] # assume the doubling experiment is at 100; not needed for this test current_size = 100 # the default generator will return a tuple with the default string in it expected_tuple = (generate.generate_string(current_size), ) # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data == expected_tuple
def test_generate_ints_makes_size_default(): """Checks that requesting generated ints returns them""" # request a single tuple with an int in it requested_types = ["int", "int"] # assume the doubling experiment is at 100 current_size = 100 # the default generator will return a tuple with two 100 in it # expected_tuple = (generate.generate_int(current_size), generate.generate_int(current_size),) # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data is not None
def test_generate_float_makes_size_default(): """Checks that requesting a generated float returns one""" # request a single tuple with an float in it requested_types = ["float"] # assume the doubling experiment is at 100 current_size = 100 # the default generator will return a tuple with 100 in it expected_tuple = (current_size, ) # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data == expected_tuple
def test_generate_boolean_makes_boolean_default(): """Checks that requesting a generated bolean returns one""" # request a single tuple with an int in it requested_types = ["boolean"] # assume the doubling experiment is at 100; not needed for this test current_size = 100 # the default generator will return a tuple with the default character in it expected_tuple = (generate.DEFAULT_VALUE_BOOLEAN, ) # generate the data for the requested_types and the current_size generated_data = generate.generate_data(requested_types, current_size) assert generated_data == expected_tuple
def test_generate_data_with_gen_func(generate_int_test): """Checks that requesting a generated hypothesis data returns one""" # assume the doubling experiment is at 100 current_size = 100 level = 1 position = [0] requested_types = ["custom"] path = None gen_fuc = generate_int_test generated_data = generate.generate_data(requested_types, current_size, level, position, path, gen_fuc) assert generated_data is not None
def test_generate_data_with_hypothesis(tmpdir): """Checks that requesting a generated hypothesis data returns one""" path = tmpdir.mkdir("sub").join("hello.txt") path.write('[{"type": "array", "items": {"type": "integer"}}]') # assume the doubling experiment is at 100 current_size = 100 level = 1 position = [0] requested_types = ["hypothesis"] requested_oath = str(path) generated_data = generate.generate_data(requested_types, current_size, level, position, requested_oath) assert generated_data is not None
if func_type[0] == "custom": data_directory = configuration.get_data_directory( tada_configuration_dict) if data_directory != "": package.add_data_sys_path( configuration.get_data_directory(tada_configuration_dict)) data_module = importlib.import_module( configuration.get_data_module(tada_configuration_dict)) data_function = getattr( data_module, configuration.get_data_function(tada_configuration_dict)) gen_func = data_function level = configuration.get_level(tada_configuration_dict) position = configuration.get_position(tada_configuration_dict) # generate data data = generate.generate_data(func_type, chosen_size, level, position, path, gen_func) # run benchmark if configuration.get_sortinput(tada_configuration_dict): for t in data: if isinstance(t, list): t.sort() current_benchmark = runner.bench_func( current_experiment_name, run.run_benchmark, analyzed_function, *data, ) # save the perf results from running the benchmark save.save_benchmark_results(current_benchmark, current_experiment_name)
if __name__ == "__main__": # read the configuration file to access the configuration dictionary tada_configuration_dict = configuration.read(constants.CONFIGURATION) # add the specified directory to the system path package.add_sys_path(configuration.get_directory(tada_configuration_dict)) # reflectively import the chosen module analyzed_module = importlib.import_module( configuration.get_module(tada_configuration_dict)) # reflectively access the chosen function analyzed_function = getattr( analyzed_module, configuration.get_function(tada_configuration_dict)) # read the chosen_size chosen_size = read.read_experiment_size() # configure perf runner = perf.Runner() # give a by-configuration name to the experiment current_experiment_name = configuration.get_experiment_name( tada_configuration_dict, chosen_size) # set the name of the experiment for perf runner.metadata[constants.DESCRIPTION_METANAME] = current_experiment_name # run the benchmark using the bench_func from perf current_benchmark = runner.bench_func( current_experiment_name, run.run_benchmark, analyzed_function, *generate.generate_data( configuration.get_types(tada_configuration_dict), chosen_size), ) # save the perf results from running the benchmark save.save_benchmark_results(current_benchmark, current_experiment_name)