def test_general(self): experiment = read_text_file('data/text/one_parameter_6.txt') # initialize model generator model_generator = ModelGenerator(experiment, RefiningModeler()) # create models from data model_generator.model_all() models = experiment.modelers[0].models cp0 = Callpath('met1'), Metric('') self.assertIsInstance(models[cp0].hypothesis, ConstantHypothesis) self.assertAlmostEqual(models[cp0].hypothesis.function.constant_coefficient, 4.068) cp1 = Callpath('met2'), Metric('') self.assertIsInstance(models[cp1].hypothesis, SingleParameterHypothesis) self.assertEqual(len(models[cp1].hypothesis.function.compound_terms), 1) self.assertEqual(len(models[cp1].hypothesis.function.compound_terms[0].simple_terms), 1) self.assertEqual(models[cp1].hypothesis.function.compound_terms[0].simple_terms[0].term_type, 'polynomial') self.assertAlmostEqual(models[cp1].hypothesis.function.compound_terms[0].simple_terms[0].exponent, 2.0) cp2 = Callpath('met3'), Metric('') self.assertIsInstance(models[cp2].hypothesis, SingleParameterHypothesis) self.assertEqual(len(models[cp2].hypothesis.function.compound_terms), 1) self.assertEqual(len(models[cp2].hypothesis.function.compound_terms[0].simple_terms), 1) self.assertEqual(models[cp2].hypothesis.function.compound_terms[0].simple_terms[0].term_type, 'polynomial') self.assertAlmostEqual(models[cp2].hypothesis.function.compound_terms[0].simple_terms[0].exponent, 2.0) cp3 = Callpath('met4'), Metric('') self.assertIsInstance(models[cp3].hypothesis, SingleParameterHypothesis) self.assertEqual(len(models[cp3].hypothesis.function.compound_terms), 1) self.assertEqual(len(models[cp3].hypothesis.function.compound_terms[0].simple_terms), 1) self.assertEqual(models[cp3].hypothesis.function.compound_terms[0].simple_terms[0].term_type, 'polynomial') self.assertAlmostEqual(models[cp3].hypothesis.function.compound_terms[0].simple_terms[0].exponent, 2.0)
def test_3parameters_reversed(self): experiment = read_text_file('data/text/three_parameter_1.txt') modeler = MultiParameterModeler() measurements = experiment.measurements[(Callpath('reg'), Metric('metr'))] measurements = list(reversed(measurements)) f_msm = modeler.find_best_measurement_points(measurements) self.assertEqual(len(f_msm), 3) self.assertListEqual([m.coordinate for m in f_msm[0]], [ Coordinate(60), Coordinate(50), Coordinate(40), Coordinate(30), Coordinate(20) ]) self.assertListEqual([m.coordinate for m in f_msm[1]], [ Coordinate(5), Coordinate(4), Coordinate(3), Coordinate(2), Coordinate(1) ]) self.assertListEqual([m.coordinate for m in f_msm[2]], [ Coordinate(500), Coordinate(400), Coordinate(300), Coordinate(200), Coordinate(100) ])
def test_2parameters_random(self): experiment = read_text_file('data/text/two_parameter_1.txt') modeler = MultiParameterModeler() measurements = experiment.measurements[(Callpath('reg'), Metric('metr'))] for _ in range(len(measurements)): shuffle(measurements) f_msm = modeler.find_first_measurement_points(measurements) self.assertEqual(len(f_msm), 2) self.assertSetEqual( set(m.coordinate for m in f_msm[0]), { Coordinate(20), Coordinate(30), Coordinate(40), Coordinate(50), Coordinate(60) }) self.assertSetEqual( set(m.coordinate for m in f_msm[1]), { Coordinate(1), Coordinate(2), Coordinate(3), Coordinate(4), Coordinate(5) })
def setUpClass(cls) -> None: cls.experiment = read_text_file("data/text/one_parameter_1.txt") ModelGenerator(cls.experiment).model_all() schema = ExperimentSchema() # print(json.dumps(schema.dump(cls.experiment), indent=1)) exp_str = schema.dumps(cls.experiment) cls.reconstructed: Experiment = schema.loads(exp_str)
def test_2parameters_reversed(self): experiment = read_text_file('data/text/two_parameter_1.txt') modeler = MultiParameterModeler() measurements = experiment.measurements[(Callpath('reg'), Metric('metr'))] measurements = list(reversed(measurements)) f_msm = modeler.find_best_measurement_points(measurements) self.assertEqual(len(f_msm), 2) self.assertListEqual([m.coordinate for m in f_msm[0]], [ Coordinate((60, )), Coordinate((50, )), Coordinate((40, )), Coordinate((30, )), Coordinate((20, )) ]) self.assertListEqual([m.coordinate for m in f_msm[1]], [ Coordinate((5, )), Coordinate((4, )), Coordinate((3, )), Coordinate((2, )), Coordinate((1, )) ])
def test_additional_keys_in_measurements(self): experiment = read_text_file("data/text/one_parameter_1.txt") schema = ExperimentSchema() exp_data = schema.dump(experiment) # print(json.dumps(exp_data, indent=1)) exp_data['measurements']['TEST_ATTRIBUTE'] = 'TEST_ATTRIBUTE' self.assertRaises(ValidationError, schema.load, exp_data)
def test_additional_keys_in_experiment(self): experiment = read_text_file("data/text/one_parameter_1.txt") schema = ExperimentSchema() exp_data = schema.dump(experiment) # print(json.dumps(exp_data, indent=1)) exp_data['TEST_ATTRIBUTE'] = 'TEST_ATTRIBUTE' reconstructed: Experiment = schema.load(exp_data) self.assertFalse(hasattr(reconstructed, 'TEST_ATTRIBUTE'))
def test_default_multi_parameter_modeling(self): import logging logging.basicConfig(level=logging.DEBUG) files = [ 'data/text/two_parameter_1.txt', "data/text/three_parameter_1.txt", "data/text/three_parameter_2.txt", "data/text/three_parameter_3.txt" ] for f in files: experiment = read_text_file(f) # initialize model generator model_generator = ModelGenerator(experiment) # create models from data model_generator.model_all()
def test_two_parameter_modeling(self): import logging logging.basicConfig(level=logging.DEBUG) experiment = read_text_file("data/text/two_parameter_3.txt") # initialize model generator model_generator = ModelGenerator(experiment) # create models from data model_generator.model_all() first = next(iter(experiment.modelers[0].models.values())) for model in experiment.modelers[0].models.values(): self.assertApproxFunction(first.hypothesis.function, model.hypothesis.function) self.assertEqual(first.hypothesis, model.hypothesis)
def main(): experiment = text_file_reader.read_text_file( "tests/data/text/two_parameter_1.txt") modeller = GPUDirectMultiParameterModeler() model = modeller.create_model(experiment.measurements[(Callpath('reg'), Metric('metr'))])
def setUpClass(cls) -> None: cls.experiment = read_text_file("data/text/two_parameter_3.txt") ModelGenerator(cls.experiment).model_all() with tempfile.TemporaryFile() as tmp: write_experiment(cls.experiment, tmp) cls.reconstructed = read_experiment(tmp)
def main(args=None, prog=None): # argparse modelers_list = list(set(k.lower() for k in chain(single_parameter.all_modelers.keys(), multi_parameter.all_modelers.keys()))) parser = argparse.ArgumentParser(prog=prog, description=extrap.__description__, add_help=False) positional_arguments = parser.add_argument_group("Positional arguments") basic_arguments = parser.add_argument_group("Optional arguments") basic_arguments.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit') basic_arguments.add_argument("--version", action="version", version=extrap.__title__ + " " + extrap.__version__, help="Show program's version number and exit") basic_arguments.add_argument("--log", action="store", dest="log_level", type=str.lower, default='warning', choices=['debug', 'info', 'warning', 'error', 'critical'], help="Set program's log level (default: warning)") input_options = parser.add_argument_group("Input options") group = input_options.add_mutually_exclusive_group(required=True) group.add_argument("--cube", action="store_true", default=False, dest="cube", help="Load data from CUBE files") group.add_argument("--text", action="store_true", default=False, dest="text", help="Load data from text files") group.add_argument("--talpas", action="store_true", default=False, dest="talpas", help="Load data from Talpas data format") group.add_argument("--json", action="store_true", default=False, dest="json", help="Load data from JSON or JSON Lines file") group.add_argument("--extra-p-3", action="store_true", default=False, dest="extrap3", help="Load data from Extra-P 3 experiment") input_options.add_argument("--scaling", action="store", dest="scaling_type", default="weak", type=str.lower, choices=["weak", "strong"], help="Set weak or strong scaling when loading data from CUBE files (default: weak)") modeling_options = parser.add_argument_group("Modeling options") modeling_options.add_argument("--median", action="store_true", dest="median", help="Use median values for computation instead of mean values") modeling_options.add_argument("--modeler", action="store", dest="modeler", default='default', type=str.lower, choices=modelers_list, help="Selects the modeler for generating the performance models") modeling_options.add_argument("--options", dest="modeler_options", default={}, nargs='+', metavar="KEY=VALUE", action=ModelerOptionsAction, help="Options for the selected modeler") modeling_options.add_argument("--help-modeler", choices=modelers_list, type=str.lower, help="Show help for modeler options and exit", action=ModelerHelpAction) output_options = parser.add_argument_group("Output options") output_options.add_argument("--out", action="store", metavar="OUTPUT_PATH", dest="out", help="Specify the output path for Extra-P results") output_options.add_argument("--print", action="store", dest="print_type", default="all", choices=["all", "callpaths", "metrics", "parameters", "functions"], help="Set which information should be displayed after modeling " "(default: all)") output_options.add_argument("--save-experiment", action="store", metavar="EXPERIMENT_PATH", dest="save_experiment", help="Saves the experiment including all models as Extra-P experiment " "(if no extension is specified, '.extra-p' is appended)") positional_arguments.add_argument("path", metavar="FILEPATH", type=str, action="store", help="Specify a file path for Extra-P to work with") arguments = parser.parse_args(args) # set log level loglevel = logging.getLevelName(arguments.log_level.upper()) # set output print type printtype = arguments.print_type.upper() # set log format location etc. if loglevel == logging.DEBUG: # import warnings # warnings.simplefilter('always', DeprecationWarning) # check if log file exists and create it if necessary # if not os.path.exists("../temp/extrap.log"): # log_file = open("../temp/extrap.log","w") # log_file.close() # logging.basicConfig(format="%(levelname)s - %(asctime)s - %(filename)s:%(lineno)s - %(funcName)10s(): # %(message)s", level=loglevel, datefmt="%m/%d/%Y %I:%M:%S %p", filename="../temp/extrap.log", filemode="w") logging.basicConfig( format="%(levelname)s - %(asctime)s - %(filename)s:%(lineno)s - %(funcName)10s(): %(message)s", level=loglevel, datefmt="%m/%d/%Y %I:%M:%S %p") else: logging.basicConfig( format="%(levelname)s: %(message)s", level=loglevel) # check scaling type scaling_type = arguments.scaling_type # set use mean or median for computation use_median = arguments.median # save modeler output to file? print_path = None if arguments.out is not None: print_output = True print_path = arguments.out else: print_output = False if arguments.path is not None: with ProgressBar(desc='Loading file') as pbar: if arguments.cube: # load data from cube files if os.path.isdir(arguments.path): experiment = read_cube_file(arguments.path, scaling_type) else: logging.error("The given path is not valid. It must point to a directory.") sys.exit(1) elif os.path.isfile(arguments.path): if arguments.text: # load data from text files experiment = read_text_file(arguments.path, pbar) elif arguments.talpas: # load data from talpas format experiment = read_talpas_file(arguments.path, pbar) elif arguments.json: # load data from json file experiment = read_json_file(arguments.path, pbar) elif arguments.extrap3: # load data from Extra-P 3 file experiment = read_extrap3_experiment(arguments.path, pbar) else: logging.error("The file format specifier is missing.") sys.exit(1) else: logging.error("The given file path is not valid.") sys.exit(1) experiment.debug() # initialize model generator model_generator = ModelGenerator( experiment, modeler=arguments.modeler, use_median=use_median) # apply modeler options modeler = model_generator.modeler if isinstance(modeler, MultiParameterModeler) and arguments.modeler_options: # set single-parameter modeler of multi-parameter modeler single_modeler = arguments.modeler_options[SINGLE_PARAMETER_MODELER_KEY] if single_modeler is not None: modeler.single_parameter_modeler = single_parameter.all_modelers[single_modeler]() # apply options of single-parameter modeler if modeler.single_parameter_modeler is not None: for name, value in arguments.modeler_options[SINGLE_PARAMETER_OPTIONS_KEY].items(): if value is not None: setattr(modeler.single_parameter_modeler, name, value) for name, value in arguments.modeler_options.items(): if value is not None: setattr(modeler, name, value) with ProgressBar(desc='Generating models') as pbar: # create models from data model_generator.model_all(pbar) if arguments.save_experiment: try: with ProgressBar(desc='Saving experiment') as pbar: if not os.path.splitext(arguments.save_experiment)[1]: arguments.save_experiment += '.extra-p' experiment_io.write_experiment(experiment, arguments.save_experiment, pbar) except RecoverableError as err: logging.error('Saving experiment: ' + str(err)) sys.exit(1) # format modeler output into text text = format_output(experiment, printtype) # print formatted output to command line print(text) # save formatted output to text file if print_output: save_output(text, print_path) else: logging.error("No file path given to load files.") sys.exit(1)
def setUpClass(cls) -> None: cls.experiment = read_text_file("data/text/two_parameter_3.txt") schema = ExperimentSchema() # print(json.dumps(schema.dump(cls.experiment), indent=1)) exp_str = schema.dumps(cls.experiment) cls.reconstructed: Experiment = schema.loads(exp_str)
def test_validation(self): experiment = read_text_file("data/text/one_parameter_1.txt") schema = ExperimentSchema() exp_data = schema.dump(experiment) val_erros = schema.validate(exp_data) self.assertDictEqual({}, val_erros)
def setUpClass(cls) -> None: cls.experiment = read_text_file("data/text/two_parameter_3.txt") ModelGenerator(cls.experiment).model_all()