Ejemplo n.º 1
0
 def model_experiment(self, experiment):
     # initialize model generator
     model_generator = ModelGenerator(experiment, use_median=self.median, name="Default Model")
     with ProgressWindow(self, 'Modeling') as pbar:
         # create models from data
         model_generator.model_all(pbar)
     self.setExperiment(experiment)
Ejemplo n.º 2
0
    def test_general(self):
        experiment = read_text_file('data/text/one_parameter_6.txt')
        # initialize model generator
        model_generator = ModelGenerator(experiment, RefiningModeler())

        # create models from data
        model_generator.model_all()

        models = experiment.modelers[0].models
        cp0 = Callpath('met1'), Metric('')
        self.assertIsInstance(models[cp0].hypothesis, ConstantHypothesis)
        self.assertAlmostEqual(models[cp0].hypothesis.function.constant_coefficient, 4.068)

        cp1 = Callpath('met2'), Metric('')
        self.assertIsInstance(models[cp1].hypothesis, SingleParameterHypothesis)
        self.assertEqual(len(models[cp1].hypothesis.function.compound_terms), 1)
        self.assertEqual(len(models[cp1].hypothesis.function.compound_terms[0].simple_terms), 1)
        self.assertEqual(models[cp1].hypothesis.function.compound_terms[0].simple_terms[0].term_type, 'polynomial')
        self.assertAlmostEqual(models[cp1].hypothesis.function.compound_terms[0].simple_terms[0].exponent, 2.0)

        cp2 = Callpath('met3'), Metric('')
        self.assertIsInstance(models[cp2].hypothesis, SingleParameterHypothesis)
        self.assertEqual(len(models[cp2].hypothesis.function.compound_terms), 1)
        self.assertEqual(len(models[cp2].hypothesis.function.compound_terms[0].simple_terms), 1)
        self.assertEqual(models[cp2].hypothesis.function.compound_terms[0].simple_terms[0].term_type, 'polynomial')
        self.assertAlmostEqual(models[cp2].hypothesis.function.compound_terms[0].simple_terms[0].exponent, 2.0)

        cp3 = Callpath('met4'), Metric('')
        self.assertIsInstance(models[cp3].hypothesis, SingleParameterHypothesis)
        self.assertEqual(len(models[cp3].hypothesis.function.compound_terms), 1)
        self.assertEqual(len(models[cp3].hypothesis.function.compound_terms[0].simple_terms), 1)
        self.assertEqual(models[cp3].hypothesis.function.compound_terms[0].simple_terms[0].term_type, 'polynomial')
        self.assertAlmostEqual(models[cp3].hypothesis.function.compound_terms[0].simple_terms[0].exponent, 2.0)
Ejemplo n.º 3
0
    def remodel(self):
        # set the modeler options
        if self.model_mean_radio.isChecked():
            use_median = False
        elif self.model_median_radio.isChecked():
            use_median = True

        # initialize model generator
        experiment = self.main_widget.getExperiment()
        if experiment is None:
            return

        model_generator = ModelGenerator(experiment, use_median=use_median, modeler=self._modeler)

        model_generator.name = self.model_name_edit.text()
        # print(QCoreApplication.hasPendingEvents())
        with ProgressWindow(self.main_widget, 'Generating models') as pbar:
            # create models from data
            model_generator.model_all(pbar)

            self.main_widget.selector_widget.updateModelList()
            self.main_widget.selector_widget.selectLastModel()
            self.main_widget.updateMinMaxValue()

            # must happen before 'valuesChanged' to update the color boxes
            self.main_widget.selector_widget.tree_model.valuesChanged()
            self.main_widget.update()
Ejemplo n.º 4
0
 def test_complete_matrix_2p(self):
     experiment = read_jsonlines_file('data/jsonlines/complete_matrix_2p.jsonl')
     modeler = MultiParameterModeler()
     modeler.single_parameter_point_selection = 'all'
     # initialize model generator
     model_generator = ModelGenerator(experiment, modeler)
     # create models from data
     model_generator.model_all()
Ejemplo n.º 5
0
    def test_default_multi_parameter_modeling(self):
        import logging
        logging.basicConfig(level=logging.DEBUG)
        files = [
            'data/text/two_parameter_1.txt', "data/text/three_parameter_1.txt",
            "data/text/three_parameter_2.txt",
            "data/text/three_parameter_3.txt"
        ]
        for f in files:
            experiment = read_text_file(f)

            # initialize model generator
            model_generator = ModelGenerator(experiment)
            # create models from data
            model_generator.model_all()
Ejemplo n.º 6
0
    def test_two_parameter_modeling(self):
        import logging
        logging.basicConfig(level=logging.DEBUG)
        experiment = read_text_file("data/text/two_parameter_3.txt")

        # initialize model generator
        model_generator = ModelGenerator(experiment)
        # create models from data
        model_generator.model_all()

        first = next(iter(experiment.modelers[0].models.values()))

        for model in experiment.modelers[0].models.values():
            self.assertApproxFunction(first.hypothesis.function,
                                      model.hypothesis.function)
            self.assertEqual(first.hypothesis, model.hypothesis)
Ejemplo n.º 7
0
def deserialize_MultiParameterModelGenerator(exp, supports_sparse, ioHelper):
    userName = ioHelper.readString()

    # read the options
    generate_strategy = ioHelper.readString()

    use_median = False
    # convert generate model options to enum
    if generate_strategy == "GENERATE_MODEL_MEAN":
        use_median = False
    elif generate_strategy == "GENERATE_MODEL_MEDIAN":
        use_median = True
    else:
        logging.error("Invalid ModelOptions found in File.")

    with ioHelper.begin_transaction():

        min_number_points = ioHelper.readInt()
        single_strategy = ioHelper.readString()
        add_points = ioHelper.readInt()
        number_add_points = ioHelper.readInt()
        multi_strategy = ioHelper.readString()

        # convert ints to bool values
        use_add_points = bool(add_points)

        decode_strategy(multi_strategy, single_strategy, supports_sparse)

    return ModelGenerator(exp, MultiParameterModeler(), userName, use_median)
Ejemplo n.º 8
0
 def setUpClass(cls) -> None:
     cls.experiment = read_text_file("data/text/one_parameter_1.txt")
     ModelGenerator(cls.experiment).model_all()
     schema = ExperimentSchema()
     # print(json.dumps(schema.dump(cls.experiment), indent=1))
     exp_str = schema.dumps(cls.experiment)
     cls.reconstructed: Experiment = schema.loads(exp_str)
Ejemplo n.º 9
0
 def test_matrix_3p(self):
     experiment = read_jsonlines_file('data/jsonlines/matrix_3p.jsonl')
     modeler = MultiParameterModeler()
     modeler.single_parameter_point_selection = 'all'
     # initialize model generator
     model_generator = ModelGenerator(experiment, modeler)
     # create models from data
     self.assertWarns(UserWarning, model_generator.model_all)
Ejemplo n.º 10
0
def deserialize_SingleParameterModelGenerator(exp, supports_sparse, ioHelper):
    userName = ioHelper.readString()
    cvMethod = ioHelper.readString()
    if cvMethod == "CROSSVALIDATION_NONE":
        pass
    elif cvMethod == "CROSSVALIDATION_LEAVE_ONE_OUT":
        pass
    elif cvMethod == "CROSSVALIDATION_LEAVE_P_OUT":
        pass
    elif cvMethod == "CROSSVALIDATION_K_FOLD":
        pass
    elif cvMethod == "CROSSVALIDATION_TWO_FOLD":
        pass
    else:
        logging.error(
            "Invalid Crossvalidation Method found in File. Defaulting to No crossvalidation."
        )
    eps = ioHelper.readValue()

    # read the options
    generate_strategy = ioHelper.readString()
    # convert generate model options to enum
    if generate_strategy == "GENERATE_MODEL_MEAN":
        use_median = False
    elif generate_strategy == "GENERATE_MODEL_MEDIAN":
        use_median = True
    else:
        use_median = False
        logging.error("Invalid ModelOptions found in File.")

    with ioHelper.begin_transaction():
        min_number_points = ioHelper.readInt()
        single_strategy = ioHelper.readString()
        add_points = ioHelper.readInt()
        number_add_points = ioHelper.readInt()
        multi_strategy = ioHelper.readString()
        # convert ints to bool values
        use_add_points = add_points

        decode_strategy(multi_strategy, single_strategy, supports_sparse)

    return ModelGenerator(exp, SingleParameterModeler(), userName, use_median)
Ejemplo n.º 11
0
 def setUpClass(cls) -> None:
     cls.experiment = read_text_file("data/text/two_parameter_3.txt")
     ModelGenerator(cls.experiment).model_all()
     with tempfile.TemporaryFile() as tmp:
         write_experiment(cls.experiment, tmp)
         cls.reconstructed = read_experiment(tmp)
Ejemplo n.º 12
0
def main(args=None, prog=None):
    # argparse
    modelers_list = list(set(k.lower() for k in
                             chain(single_parameter.all_modelers.keys(), multi_parameter.all_modelers.keys())))
    parser = argparse.ArgumentParser(prog=prog, description=extrap.__description__, add_help=False)
    positional_arguments = parser.add_argument_group("Positional arguments")
    basic_arguments = parser.add_argument_group("Optional arguments")
    basic_arguments.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
                                 help='Show this help message and exit')

    basic_arguments.add_argument("--version", action="version", version=extrap.__title__ + " " + extrap.__version__,
                                 help="Show program's version number and exit")
    basic_arguments.add_argument("--log", action="store", dest="log_level", type=str.lower, default='warning',
                                 choices=['debug', 'info', 'warning', 'error', 'critical'],
                                 help="Set program's log level (default: warning)")

    input_options = parser.add_argument_group("Input options")
    group = input_options.add_mutually_exclusive_group(required=True)
    group.add_argument("--cube", action="store_true", default=False, dest="cube", help="Load data from CUBE files")
    group.add_argument("--text", action="store_true", default=False, dest="text", help="Load data from text files")
    group.add_argument("--talpas", action="store_true", default=False, dest="talpas",
                       help="Load data from Talpas data format")
    group.add_argument("--json", action="store_true", default=False, dest="json",
                       help="Load data from JSON or JSON Lines file")
    group.add_argument("--extra-p-3", action="store_true", default=False, dest="extrap3",
                       help="Load data from Extra-P 3 experiment")
    input_options.add_argument("--scaling", action="store", dest="scaling_type", default="weak", type=str.lower,
                               choices=["weak", "strong"],
                               help="Set weak or strong scaling when loading data from CUBE files (default: weak)")

    modeling_options = parser.add_argument_group("Modeling options")
    modeling_options.add_argument("--median", action="store_true", dest="median",
                                  help="Use median values for computation instead of mean values")
    modeling_options.add_argument("--modeler", action="store", dest="modeler", default='default', type=str.lower,
                                  choices=modelers_list,
                                  help="Selects the modeler for generating the performance models")
    modeling_options.add_argument("--options", dest="modeler_options", default={}, nargs='+', metavar="KEY=VALUE",
                                  action=ModelerOptionsAction,
                                  help="Options for the selected modeler")
    modeling_options.add_argument("--help-modeler", choices=modelers_list, type=str.lower,
                                  help="Show help for modeler options and exit",
                                  action=ModelerHelpAction)

    output_options = parser.add_argument_group("Output options")
    output_options.add_argument("--out", action="store", metavar="OUTPUT_PATH", dest="out",
                                help="Specify the output path for Extra-P results")
    output_options.add_argument("--print", action="store", dest="print_type", default="all",
                                choices=["all", "callpaths", "metrics", "parameters", "functions"],
                                help="Set which information should be displayed after modeling "
                                     "(default: all)")
    output_options.add_argument("--save-experiment", action="store", metavar="EXPERIMENT_PATH", dest="save_experiment",
                                help="Saves the experiment including all models as Extra-P experiment "
                                     "(if no extension is specified, '.extra-p' is appended)")

    positional_arguments.add_argument("path", metavar="FILEPATH", type=str, action="store",
                                      help="Specify a file path for Extra-P to work with")
    arguments = parser.parse_args(args)

    # set log level
    loglevel = logging.getLevelName(arguments.log_level.upper())
    # set output print type
    printtype = arguments.print_type.upper()

    # set log format location etc.
    if loglevel == logging.DEBUG:
        # import warnings
        # warnings.simplefilter('always', DeprecationWarning)
        # check if log file exists and create it if necessary
        # if not os.path.exists("../temp/extrap.log"):
        #    log_file = open("../temp/extrap.log","w")
        #    log_file.close()
        # logging.basicConfig(format="%(levelname)s - %(asctime)s - %(filename)s:%(lineno)s - %(funcName)10s():
        # %(message)s", level=loglevel, datefmt="%m/%d/%Y %I:%M:%S %p", filename="../temp/extrap.log", filemode="w")
        logging.basicConfig(
            format="%(levelname)s - %(asctime)s - %(filename)s:%(lineno)s - %(funcName)10s(): %(message)s",
            level=loglevel, datefmt="%m/%d/%Y %I:%M:%S %p")
    else:
        logging.basicConfig(
            format="%(levelname)s: %(message)s", level=loglevel)

    # check scaling type
    scaling_type = arguments.scaling_type

    # set use mean or median for computation
    use_median = arguments.median

    # save modeler output to file?
    print_path = None
    if arguments.out is not None:
        print_output = True
        print_path = arguments.out
    else:
        print_output = False

    if arguments.path is not None:
        with ProgressBar(desc='Loading file') as pbar:
            if arguments.cube:
                # load data from cube files
                if os.path.isdir(arguments.path):
                    experiment = read_cube_file(arguments.path, scaling_type)
                else:
                    logging.error("The given path is not valid. It must point to a directory.")
                    sys.exit(1)
            elif os.path.isfile(arguments.path):
                if arguments.text:
                    # load data from text files
                    experiment = read_text_file(arguments.path, pbar)
                elif arguments.talpas:
                    # load data from talpas format
                    experiment = read_talpas_file(arguments.path, pbar)
                elif arguments.json:
                    # load data from json file
                    experiment = read_json_file(arguments.path, pbar)
                elif arguments.extrap3:
                    # load data from Extra-P 3 file
                    experiment = read_extrap3_experiment(arguments.path, pbar)
                else:
                    logging.error("The file format specifier is missing.")
                    sys.exit(1)
            else:
                logging.error("The given file path is not valid.")
                sys.exit(1)

        experiment.debug()

        # initialize model generator
        model_generator = ModelGenerator(
            experiment, modeler=arguments.modeler, use_median=use_median)

        # apply modeler options
        modeler = model_generator.modeler
        if isinstance(modeler, MultiParameterModeler) and arguments.modeler_options:
            # set single-parameter modeler of multi-parameter modeler
            single_modeler = arguments.modeler_options[SINGLE_PARAMETER_MODELER_KEY]
            if single_modeler is not None:
                modeler.single_parameter_modeler = single_parameter.all_modelers[single_modeler]()
            # apply options of single-parameter modeler
            if modeler.single_parameter_modeler is not None:
                for name, value in arguments.modeler_options[SINGLE_PARAMETER_OPTIONS_KEY].items():
                    if value is not None:
                        setattr(modeler.single_parameter_modeler, name, value)

        for name, value in arguments.modeler_options.items():
            if value is not None:
                setattr(modeler, name, value)

        with ProgressBar(desc='Generating models') as pbar:
            # create models from data
            model_generator.model_all(pbar)

        if arguments.save_experiment:
            try:
                with ProgressBar(desc='Saving experiment') as pbar:
                    if not os.path.splitext(arguments.save_experiment)[1]:
                        arguments.save_experiment += '.extra-p'
                    experiment_io.write_experiment(experiment, arguments.save_experiment, pbar)
            except RecoverableError as err:
                logging.error('Saving experiment: ' + str(err))
                sys.exit(1)

        # format modeler output into text
        text = format_output(experiment, printtype)

        # print formatted output to command line
        print(text)

        # save formatted output to text file
        if print_output:
            save_output(text, print_path)

    else:
        logging.error("No file path given to load files.")
        sys.exit(1)
Ejemplo n.º 13
0
 def test_input_1(self):
     experiment = read_jsonlines_file('data/jsonlines/input_1.jsonl')
     # initialize model generator
     model_generator = ModelGenerator(experiment)
     # create models from data
     model_generator.model_all()
Ejemplo n.º 14
0
 def setUpClass(cls) -> None:
     cls.experiment = read_text_file("data/text/two_parameter_3.txt")
     ModelGenerator(cls.experiment).model_all()