示例#1
0
    def evaluation_by_type(self, current_configuration: Configuration):
        """
        Forwards a call to specific Repeater Type to evaluate if Configuration was measured precisely.
        :param current_configuration: instance of Configuration class.
        :returns: int Number of measurements of Configuration which need to be performed, EvaluationStatus
        """

        if self._type is None:
            raise TypeError("Repeater evaluation Type was not selected!")
        else:
            if current_configuration.status == Configuration.Status.NEW:
                number_of_measurements = self._type.evaluate(
                    current_configuration=current_configuration,
                    experiment=self.experiment)
                current_configuration.status = Configuration.Status.EVALUATED
                if self.experiment.try_add_configuration(current_configuration) or \
                        current_configuration.type == Configuration.Type.DEFAULT:  # for repeating default configuration
                    return number_of_measurements
                else:
                    current_configuration.status = Configuration.Status.EVALUATED
                    return 0
            else:
                if current_configuration.results:
                    number_of_measurements = self._type.evaluate(
                        current_configuration=current_configuration,
                        experiment=self.experiment)
                    if number_of_measurements > 0:
                        current_configuration.status = Configuration.Status.REPEATED_MEASURING
                        return number_of_measurements
                    else:
                        current_configuration.status = Configuration.Status.MEASURED
                        return 0
                else:
                    current_configuration.Status = Configuration.Status.EVALUATED
                    return 0
示例#2
0
def measure_task(configurations_sample: list, tasks_sample: list,
                 experiment_description: dict, search_space: Hyperparameter,
                 measured_tasks: int, config_type: Configuration.Type,
                 config_status: Configuration.Status):
    """
    Test function for Repeater module.
    Main steps:
    0. Take default tasks sample.
    1. Create instances of Repeater, Experiment, Default Configuration according to test requirements.
    2. Create instance of current measurement.
    3. Call Repeater function.

    :param configurations_sample: default sample of measured configurations
    :param tasks_sample: default sample of measured tasks
    :param experiment_description: Experiment Description sample in json format
    :param search_space: Search Space sample
    :param measured_tasks: specify number of measured tasks in current measurement.
    :param config_type: specify current measurement configuration type.
    :param config_status: specify current measurement configuration status.

    :return: list of configuration status and number of tasks to measure.
    """
    experiment = Experiment(experiment_description, search_space)
    Configuration.set_task_config(experiment.description["TaskConfiguration"])
    configuration = Configuration(configurations_sample[1]["Params"],
                                  config_type, experiment.unique_id)
    configuration.status = config_status
    for i in range(0, measured_tasks):
        configuration.add_task(tasks_sample[i])
    orchestrator = RepeaterOrchestration(experiment)
    if config_type == Configuration.Type.DEFAULT:
        orchestrator._type = orchestrator.get_repeater(True)
    else:
        orchestrator._type = orchestrator.get_repeater()
        default_configuration = Configuration(
            configurations_sample[0]["Params"], Configuration.Type.DEFAULT,
            experiment.unique_id)
        default_configuration.status = Configuration.Status.MEASURED
        default_configuration._task_number = configurations_sample[0]["Tasks"]
        default_configuration.results = configurations_sample[0]["Results"]
        default_configuration._standard_deviation = configurations_sample[0][
            "STD"]
        experiment.default_configuration = default_configuration
    task = json.dumps({"configuration": configuration.to_json()})

    dummy_channel = None
    dummy_method = None
    dummy_properties = None

    results_measurement = orchestrator.measure_configurations(
        dummy_channel, dummy_method, dummy_properties, task)

    return results_measurement
示例#3
0
 def test_1_write_exp_state_record(self):
     # Test #1. Format and write Experiment state (ES) record to the database
     # Expected result: record can be read from the database and contains all required ES fields. The Experiment id matches
     experiment, _ = self.initialize_exeriment()
     c1 = Configuration(OrderedDict({
         "frequency": 2900.0,
         "threads": 32
     }), Configuration.Type.DEFAULT, experiment.unique_id)
     c1.status = c1.Status.MEASURED
     experiment.default_configuration = c1
     database.write_one_record("Experiment_state",
                               experiment.get_experiment_state_record())
     written_record = database.get_all_records("Experiment_state")[0]
     assert experiment.unique_id == written_record["Exp_unique_ID"]
     assert experiment.get_number_of_measured_configurations(
     ) == written_record["Number_of_measured_configs"]
     assert experiment.get_bad_configuration_number(
     ) == written_record["Number_of_bad_configs"]
     assert experiment.get_current_solution().get_configuration_record(
     ) == written_record["Current_solution"]
     assert experiment.get_model_state() == written_record["is_model_valid"]