Пример #1
0
    def evaluation_by_type(self, current_configuration: Configuration):
        """
        Forwards a call to specific Repeater Type to evaluate if Configuration was measured precisely.
        :param current_configuration: instance of Configuration class.
        :returns: int Number of measurements of Configuration which need to be performed, EvaluationStatus
        """

        if self._type is None:
            raise TypeError("Repeater evaluation Type was not selected!")
        else:
            if current_configuration.status == Configuration.Status.NEW:
                number_of_measurements = self._type.evaluate(
                    current_configuration=current_configuration,
                    experiment=self.experiment)
                current_configuration.status = Configuration.Status.EVALUATED
                if self.experiment.try_add_configuration(current_configuration) or \
                        current_configuration.type == Configuration.Type.DEFAULT:  # for repeating default configuration
                    return number_of_measurements
                else:
                    current_configuration.status = Configuration.Status.EVALUATED
                    return 0
            else:
                if current_configuration.results:
                    number_of_measurements = self._type.evaluate(
                        current_configuration=current_configuration,
                        experiment=self.experiment)
                    if number_of_measurements > 0:
                        current_configuration.status = Configuration.Status.REPEATED_MEASURING
                        return number_of_measurements
                    else:
                        current_configuration.status = Configuration.Status.MEASURED
                        return 0
                else:
                    current_configuration.Status = Configuration.Status.EVALUATED
                    return 0
Пример #2
0
 def test_3_write_measured_configurations_records(self):
     # Test #3. Format and write multiple Configuration records to the database
     # Expected result: records can be read from the database.
     # They belong to the expected experiment and contain expected configuration IDs
     experiment, _ = self.initialize_exeriment()
     c1 = Configuration(OrderedDict({
         "frequency": 2900.0,
         "threads": 32
     }), Configuration.Type.DEFAULT, experiment.unique_id)
     c2 = Configuration(OrderedDict({
         "frequency": 2200.0,
         "threads": 8
     }), Configuration.Type.FROM_SELECTOR, experiment.unique_id)
     experiment.measured_configurations.append(c1)
     experiment.measured_configurations.append(c2)
     records = []
     for config in experiment.measured_configurations:
         records.append(config.get_configuration_record())
     database.write_many_records("Measured_configurations", records)
     written_records = database.get_all_records("Measured_configurations")
     configuration_ids = []
     experiment_ids = []
     for record in written_records:
         configuration_ids.append(record["Configuration_ID"])
         experiment_ids.append(record["Exp_unique_ID"])
     assert c1.unique_id in configuration_ids
     assert c2.unique_id in configuration_ids
     assert experiment.unique_id in experiment_ids
Пример #3
0
    def _add_measured_configuration_to_experiment(
            self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        This method also sends an update to API (front-end).
        :param configuration: Configuration object.
        :return: None
        """
        self.measured_configurations.append(configuration)
        if configuration.is_better(self.get_objectives_minimization(),
                                   self.get_objectives_priorities(),
                                   self.current_best_configurations[0]):
            # we do not need warm_startup_info anymore, since better configuration was found
            self.current_best_configurations[0].warm_startup_info = {}
            self.current_best_configurations = [configuration]
            self.database.update_record(
                collection_name="warm_startup_info",
                query={"Exp_unique_ID": self.unique_id},
                new_val={"wsi": configuration.warm_startup_info})
        else:
            # this configuration did not improve the previous solution, no need to keep track its solutions.
            configuration.warm_startup_info = {}

        self.database.write_one_record(
            "Measured_configurations",
            configuration.get_configuration_record())
        self.send_state_to_db()
        self.api.send("new",
                      "configuration",
                      configurations=[configuration.parameters],
                      results=[configuration.results])
        self.logger.info("Adding to Experiment: %s" % configuration)
Пример #4
0
 def _add_configuration_to_experiment(self, configuration: Configuration) -> None:
     """
     Save configuration after passing all checks.
     This method also sends an update to API (front-end).
     :param configuration: Configuration object.
     :return: None
     """
     self.all_configurations.append(configuration)
     self.api.send("new", "configuration",
                   configurations=[configuration.get_parameters()],
                   results=[configuration.get_average_result()])
     self.logger.info("Adding to Experiment: %s" % configuration)
Пример #5
0
 def put_default_configuration(self, default_configuration: Configuration):
     if self._is_valid_configuration_instance(default_configuration):
         if not self.default_configuration:
             self.default_configuration = default_configuration
             self.api.send("default", "configuration",
                           configurations=[default_configuration.get_parameters()],
                           results=[default_configuration.get_average_result()])
             if default_configuration not in self.all_configurations:
                 self.all_configurations.append(default_configuration)
                 self._calculate_current_best_configurations()
         else:
             raise ValueError("The default Configuration was registered already.")
Пример #6
0
    def get_default_configurations_results(self, ch, method, properties, body):
        """
        Callback function for the result of default configuration
        :param ch: pika.Channel
        :param method:  pika.spec.Basic.GetOk
        :param properties: pika.spec.BasicProperties
        :param body: result of measuring default configuration in bytes format
        """
        default_configuration = Configuration.from_json(body.decode())
        if default_configuration.status == Configuration.Status.BAD:
            new_default_values = self.default_config_handler.get_new_default_config(
            )
            if new_default_values:
                config = Configuration(new_default_values,
                                       Configuration.Type.FROM_SELECTOR,
                                       self.experiment.unique_id)
                temp_msg = "New default configuration sampled."
                self.logger.info(temp_msg)
                self.sub.send('log', 'info', message=temp_msg)
                self.consume_channel.basic_publish(
                    exchange='',
                    routing_key='measure_new_configuration_queue',
                    body=json.dumps({"configuration": config.to_json()}))
            else:
                self.logger.error(
                    "The specified default configuration is broken.")
                self.stop()
                self.sub.send(
                    'log',
                    'info',
                    message="The specified default configuration is broken.")
                return
        if self.experiment.is_configuration_evaluated(default_configuration):
            self.experiment.default_configuration = default_configuration
            self.database.update_record(
                "Search_space", {"Exp_unique_ID": self.experiment.unique_id}, {
                    "Default_configuration":
                    default_configuration.get_configuration_record()
                })
            self.database.update_record(
                "Search_space", {"Exp_unique_ID": self.experiment.unique_id}, {
                    "SearchspaceObject": pickle.dumps(
                        self.experiment.search_space)
                })

            temp_msg = f"Evaluated Default Configuration: {default_configuration}"
            self.logger.info(temp_msg)
            self.sub.send('log', 'info', message=temp_msg)

            # starting main work: building model and choosing configuration for measuring
            self.consume_channel.basic_publish(
                exchange='', routing_key='get_worker_capacity_queue', body='')
Пример #7
0
    def send_new_configurations_to_measure(self, ch, method, properties, body):
        """
        This callback function will be triggered on arrival of ONE measured Configuration.
        When there is new measured Configuration, following steps should be done:

            -   update and validate models;

            -   pick either by model, or by selection algorithm new Configuration(s) for evaluation;
                Note: The amount of new Configurations are:
                - 0 if number of available Worker nodes decreased;
                - 1 if number of available Workers did not change;
                - N + 1 if number of available Worker increased by N;

            -   send new Configuration to Repeater for evaluation.
        """

        needed_configs = json.loads(body.decode()).get("worker_capacity", 1)
        for _ in range(needed_configs):
            config = self.predictor.predict(
                self.experiment.measured_configurations)
            if config not in self.experiment.evaluated_configurations:
                temp_msg = f"Model predicted {config}."
            else:
                while config in self.experiment.evaluated_configurations and not self._is_interrupted:
                    if len(self.experiment.evaluated_configurations
                           ) >= self.experiment.search_space.get_size():
                        msg = "Entire Search Space was evaluated. Shutting down."
                        self.logger.warning(msg)
                        self.consume_channel.basic_publish(
                            exchange='',
                            routing_key='stop_experiment_queue',
                            body=msg)
                        break

                    new_parameter_values = OrderedDict()
                    while not self.experiment.search_space.validate(
                            new_parameter_values, is_recursive=True):
                        self.experiment.search_space.generate(
                            new_parameter_values)
                    config = Configuration(new_parameter_values,
                                           Configuration.Type.FROM_SELECTOR,
                                           self.experiment.unique_id)
                temp_msg = f"Fully randomly sampled {config}."

            self.logger.info(temp_msg)
            self.sub.send('log', 'info', message=temp_msg)
            self.consume_channel.basic_publish(
                exchange='',
                routing_key='measure_new_configuration_queue',
                body=json.dumps({"configuration": config.to_json()}))
Пример #8
0
    def evaluate(self, current_configuration: Configuration,
                 experiment: Experiment):
        """
        Return max_tasks_per_configuration to measure default Configuration or 0.
        :param current_configuration: instance of Configuration class.
        :param experiment: instance of 'experiment' is required for model-awareness.
        :return: max_tasks_per_configuration or 0
        """

        if len(current_configuration.get_tasks()
               ) < self.max_tasks_per_configuration:
            return self.max_tasks_per_configuration - len(
                current_configuration.get_tasks())
        else:
            return 0
Пример #9
0
    def evaluate(self, current_configuration: Configuration,
                 experiment: Experiment):
        """
        Return max_tasks_per_configuration to measure default Configuration or 0.
        :param current_configuration: instance of Configuration class.
        :param experiment: instance of 'experiment' required by the abstract class. Not used in this strategy.
        :return: max_tasks_per_configuration or 0
        """

        if len(current_configuration.get_tasks()
               ) < self.max_tasks_per_configuration:
            return self.max_tasks_per_configuration - len(
                current_configuration.get_tasks())
        else:
            return 0
Пример #10
0
 def _send_measurement(self, id_measurement, measurement):
     with pika.BlockingConnection(
             pika.ConnectionParameters(self.event_host,
                                       self.event_port)) as connection:
         with connection.channel() as channel:
             number_ready_task = len(measurement['tasks_results'])
             for i, task_parameter in enumerate(
                     measurement['tasks_to_send']):
                 if i >= number_ready_task:
                     self.logger.info("Sending task: %s" % task_parameter)
                     task_description = dict()
                     task_description["id_measurement"] = id_measurement
                     task_description["task_id"] = str(uuid.uuid4())
                     config = Configuration.from_json(
                         measurement["configuration"])
                     task_description[
                         "experiment_id"] = config.experiment_id
                     task_description["task_name"] = self._task_name
                     task_description[
                         "time_for_run"] = self._time_for_one_task_running
                     task_description["Scenario"] = self._scenario
                     task_description["result_structure"] = self._objectives
                     task_description["parameters"] = task_parameter
                     try:
                         channel.basic_publish(
                             exchange='',
                             routing_key='task_queue',
                             body=json.dumps(task_description))
                     except pika.exceptions.ChannelWrongStateError as err:
                         if not channel.is_open:
                             self.logger.warning(
                                 "Attempt to send a message after closing the connection"
                             )
                         else:
                             raise err
Пример #11
0
    def predict_next_configurations(self, amount):
        """
        Takes features, using previously created model makes regression to find labels and return label with the lowest value.
        :param amount: int number of Configurations which will be returned
        :return: list of Configurations that are needed to be measured.
        """
        # 1. get model's predictions
        predicted_results = []
        for index, predicted_result in sorted(enumerate(
                self.model.predict(self.experiment.search_space)),
                                              key=lambda c: c[1]):
            conf = self.experiment.search_space[index]
            predicted_results.append((predicted_result, conf))

        # Only for DEMO
        # self.sub.send('predictions', 'configurations',
        #               configurations=[self.experiment.search_space[index] for (predicted_result, index) in predicted_results],
        #               results=[[round(predicted_result[0], 2)] for (predicted_result, index) in predicted_results])

        # 2. Update predicted results for already evaluated Configurations.
        for config in self.all_configurations:
            for pred_tuple in predicted_results:
                if (pred_tuple[1] == config.get_parameters()):
                    config.add_predicted_result(pred_tuple[1], pred_tuple[0])

        # 3. Pick up requared amount of configs
        all_config = [
            conf.get_parameters() for conf in self.all_configurations
        ]
        result = []
        for best in predicted_results[:amount]:
            if best[1] in all_config:
                select = [
                    conf for conf in self.all_configurations
                    if conf.get_parameters() == best[1]
                ]
                result.append(select[0])
            else:
                new_conf = Configuration(best[1])
                new_conf.add_predicted_result(best[1], best[0])
                result.append(new_conf)

        # 4. return configs
        return result
Пример #12
0
 def test_1_write_exp_state_record(self):
     # Test #1. Format and write Experiment state (ES) record to the database
     # Expected result: record can be read from the database and contains all required ES fields. The Experiment id matches
     experiment, _ = self.initialize_exeriment()
     c1 = Configuration(OrderedDict({
         "frequency": 2900.0,
         "threads": 32
     }), Configuration.Type.DEFAULT, experiment.unique_id)
     c1.status = c1.Status.MEASURED
     experiment.default_configuration = c1
     database.write_one_record("Experiment_state",
                               experiment.get_experiment_state_record())
     written_record = database.get_all_records("Experiment_state")[0]
     assert experiment.unique_id == written_record["Exp_unique_ID"]
     assert experiment.get_number_of_measured_configurations(
     ) == written_record["Number_of_measured_configs"]
     assert experiment.get_bad_configuration_number(
     ) == written_record["Number_of_bad_configs"]
     assert experiment.get_current_solution().get_configuration_record(
     ) == written_record["Current_solution"]
     assert experiment.get_model_state() == written_record["is_model_valid"]
    def _add_measured_configuration_to_experiment(self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        This method also sends an update to API (front-end).
        :param configuration: Configuration object.
        :return: None
        """
        self.measured_configurations.append(configuration)
        if not self.current_best_configurations:
            # first soultion found
            self.current_best_configurations = [configuration]
        elif configuration.is_better_configuration(self.is_minimization(), self.current_best_configurations[0]):
            # new solution found
            self.current_best_configurations[0].warm_startup_info = {}
            self.current_best_configurations = [configuration]
        else:
            # this configuration did not improve the previous solution
            configuration.warm_startup_info = {}

        self.api.send("new", "configuration",
                      configurations=[configuration.hyperparameters],
                      results=[configuration.results])
        self.logger.info("Adding to Experiment: %s" % configuration)
Пример #14
0
 def test_4_write_task_record(self):
     # Test #4. Format and write Task record to the database
     # Expected result: record can be read from the database.
     # Task belongs to the expected configuration and has expected task ID
     c1 = Configuration(
         OrderedDict({
             "frequency": "dummy",
             "threads": "dummy"
         }), Configuration.Type.DEFAULT, "DummyID")
     task = {
         'task id': 'id',
         'worker': 'worker',
         'result': {
             'energy': 0.9
         },
         'ResultValidityCheckMark': 'OK'
     }
     c1.add_task(task)
     database.write_one_record("Tasks", c1.get_task_record(task))
     written_record = database.get_all_records("Tasks")[0]
     assert c1.unique_id == written_record["Configuration_ID"]
     assert task['task id'] == written_record["Task_ID"]
     assert task == written_record["Task"]
Пример #15
0
    def compute_avg_results_over_configurations(
            configurations: List[Configuration]) -> List[float]:

        # Make sure, that all Configurations are the same points in a search space:
        assert all([
            config.parameters == configurations[0].parameters
            for config in configurations
        ])
        tasks = BRISEBenchmarkAnalyser.collect_tasks_from_configurations(
            configurations)

        previous_task_configuration = Configuration.TaskConfiguration
        Configuration.set_task_config(configurations[0].TaskConfiguration)

        tmp_configuration = Configuration(configurations[0].parameters,
                                          Configuration.Type.TEST)
        tmp_configuration.add_tasks(task=tasks)

        result = tmp_configuration.get_average_result()
        Configuration.set_task_config(previous_task_configuration)

        return result
Пример #16
0
 def _put(self, configuration_instance: Configuration):
     """
     Takes instance of Configuration class and appends it to the list with all configuration instances.
     :param configuration_instance: Configuration class instance.
     """
     if self._is_valid_configuration_instance(configuration_instance):
         if self.all_configurations is []:
             self._add_configuration_to_experiment(configuration_instance)
         else:
             is_exists = False
             for value in self.all_configurations:
                 if value.get_parameters() == configuration_instance.get_parameters():
                     is_exists = True
             if not is_exists:
                 self._add_configuration_to_experiment(configuration_instance)
             else:
                 self.logger.warning("Attempt of adding Configuration that is already in Experiment: %s" %
                                     configuration_instance)
Пример #17
0
 def get_configurations_results(self, ch, method, properties, body):
     """
     Callback function for the result of all Configurations except Default
     :param ch: pika.Channel
     :param method:  pika.spec.Basic.GetOk
     :param properties: pika.spec.BasicProperties
     :param body: result of measuring any configuration except default in bytes format
     """
     with self.conf_lock:  # To be sure, that no Configuration will be added after satisfying all Stop Conditions.
         configuration = Configuration.from_json(body.decode())
         if not self._is_interrupted and self.experiment.is_configuration_evaluated(
                 configuration):
             self.experiment.try_add_configuration(configuration)
             temp_msg = "-- New Configuration was evaluated. Building Target System model."
             self.logger.info(temp_msg)
             self.sub.send('log', 'info', message=temp_msg)
             self.consume_channel.basic_publish(
                 exchange='',
                 routing_key='get_worker_capacity_queue',
                 body='')
Пример #18
0
 def default_configuration(self, default_configuration: Configuration):
     if self._is_valid_configuration_instance(default_configuration):
         if not self._default_configuration:
             self._default_configuration = default_configuration
             self.api.send(
                 "default",
                 "configuration",
                 configurations=[default_configuration.parameters],
                 results=[default_configuration.results])
             self.measured_configurations.append(default_configuration)
             if not self.current_best_configurations:
                 self.current_best_configurations = [default_configuration]
             self.database.write_one_record(
                 "Measured_configurations",
                 default_configuration.get_configuration_record())
             self.database.write_one_record(
                 collection_name="warm_startup_info",
                 record={
                     "Exp_unique_ID": self.unique_id,
                     "wsi": default_configuration.warm_startup_info
                 })
         else:
             raise ValueError(
                 "The default Configuration was registered already.")
Пример #19
0
def measure_task(configurations_sample: list, tasks_sample: list,
                 experiment_description: dict, search_space: Hyperparameter,
                 measured_tasks: int, config_type: Configuration.Type,
                 config_status: Configuration.Status):
    """
    Test function for Repeater module.
    Main steps:
    0. Take default tasks sample.
    1. Create instances of Repeater, Experiment, Default Configuration according to test requirements.
    2. Create instance of current measurement.
    3. Call Repeater function.

    :param configurations_sample: default sample of measured configurations
    :param tasks_sample: default sample of measured tasks
    :param experiment_description: Experiment Description sample in json format
    :param search_space: Search Space sample
    :param measured_tasks: specify number of measured tasks in current measurement.
    :param config_type: specify current measurement configuration type.
    :param config_status: specify current measurement configuration status.

    :return: list of configuration status and number of tasks to measure.
    """
    experiment = Experiment(experiment_description, search_space)
    Configuration.set_task_config(experiment.description["TaskConfiguration"])
    configuration = Configuration(configurations_sample[1]["Params"],
                                  config_type, experiment.unique_id)
    configuration.status = config_status
    for i in range(0, measured_tasks):
        configuration.add_task(tasks_sample[i])
    orchestrator = RepeaterOrchestration(experiment)
    if config_type == Configuration.Type.DEFAULT:
        orchestrator._type = orchestrator.get_repeater(True)
    else:
        orchestrator._type = orchestrator.get_repeater()
        default_configuration = Configuration(
            configurations_sample[0]["Params"], Configuration.Type.DEFAULT,
            experiment.unique_id)
        default_configuration.status = Configuration.Status.MEASURED
        default_configuration._task_number = configurations_sample[0]["Tasks"]
        default_configuration.results = configurations_sample[0]["Results"]
        default_configuration._standard_deviation = configurations_sample[0][
            "STD"]
        experiment.default_configuration = default_configuration
    task = json.dumps({"configuration": configuration.to_json()})

    dummy_channel = None
    dummy_method = None
    dummy_properties = None

    results_measurement = orchestrator.measure_configurations(
        dummy_channel, dummy_method, dummy_properties, task)

    return results_measurement
Пример #20
0
    def predict(self,
                measured_configurations: List[Configuration]) -> Configuration:
        """
        Predict next Configuration using already evaluated configurations.
        Prediction is a construction process and it is done in iterations.
        It stops after constructing the valid Configuration within the Search Space.
        Each iteration uncovers and predicts new Hyperparameters deeper in the Search Space.

        :param measured_configurations: a list of already measured Configurations that will be used to make a
        prediction.
        :return: Configuration that is going to be measured.

        Question: need to transfer data from the previous level? (that was fixed and will not be changed)
          - more no than yes.
              for no:
                - less data to (pre)process - less dimensions
                - less ad-hoc solutions on "how to differentiate in data???" - simply predict over all dimensions,
                other are targets.
              for yes:
                - models will be more accurate (?)
         """
        level = -1
        parameters = OrderedDict()
        # Select the latest Configurations, according to the window size
        if isinstance(self.predictor_config["window size"], int):
            number_of_configs_to_consider = self.predictor_config[
                "window size"]
        else:
            # meaning self.window_size, float)
            number_of_configs_to_consider = \
                int(round(self.predictor_config["window size"] * len(measured_configurations)))
        level_configs = measured_configurations[len(measured_configurations) -
                                                number_of_configs_to_consider:]

        # Check if entire configuration is valid now.
        while not self.search_space.validate(parameters, is_recursive=True):
            level += 1  # it is here because of 'continue'

            # 1. Filter Configurations.
            level_configs = list(
                filter(
                    lambda x: self.search_space.are_siblings(
                        parameters, x.parameters),  # Filter
                    level_configs  # Input data for filter
                ))

            if not level_configs:
                # If there is no data on current level, just use random sampling
                self.search_space.generate(parameters)
                continue

            # 2. Derive which parameters will be predicted on this level:
            # - by expanding the parameters from previous level to this level
            # - by removing information from the previous level(s)
            dummy = copy.deepcopy(parameters)
            self.search_space.generate(dummy)
            description = self.search_space.describe(dummy)
            for hyperparameter in parameters:
                del description[hyperparameter]

            # 4. Select and build model, predict parameters for this level
            # 4.1. Select and create model from ED
            # 4.2. Transform Configurations into Pandas DataFrame keeping only relevant for this level information,
            # split features and labels
            # 4.3. Build model
            # 4.4. Make a prediction as PD DataFrame or None
            # 4.5. Validate a prediction: results could be out of bound or more sophisticated cases (in future)

            # 4.1.
            model_parameters = \
                self.predictor_config["models"][level if len(self.predictor_config["models"]) > level else -1]
            model = get_model(model_parameters)

            # 4.2.
            feature_columns = list(description.keys())
            highest_priority_objective_index = self.task_config["ObjectivesPrioritiesModels"]\
                .index(max(self.task_config["ObjectivesPrioritiesModels"]))

            highest_priority_objective = self.task_config["Objectives"][
                highest_priority_objective_index]

            data = pd.DataFrame([
                cfg.to_series()[feature_columns + [highest_priority_objective]]
                for cfg in level_configs
            ])

            features = pd.DataFrame(data[feature_columns])
            labels = pd.DataFrame(data[highest_priority_objective])

            # 4.3
            is_minimization = self.task_config["ObjectivesMinimization"][
                highest_priority_objective_index]
            model.build_model(features, labels, description, is_minimization)
            # 4.4
            if model.is_built:
                pd_prediction = model.predict()
                prediction = pd_prediction.to_dict(orient="records")
                if len(prediction) > 1:
                    self.logger.warning(
                        f"Model predicted more than 1 parameters set. "
                        f"Only first valid will be used{prediction[0]}.")
                # 4.5
                valid_prediction_found = False
                for predicted_hyperparameters in prediction:
                    valid_prediction_found = True
                    for hyperparameter_name in description.keys():
                        hyperparameter = description[hyperparameter_name][
                            "hyperparameter"]
                        # Validation should be encapsulated if more sophisticated approaches arise.
                        if not hyperparameter.validate(
                                predicted_hyperparameters, is_recursive=False):
                            valid_prediction_found = False
                            break
                    if valid_prediction_found:
                        break
                    else:
                        continue

                if not valid_prediction_found:
                    self.logger.warning(
                        "Model did not predict valid hyperparameter set. Sampling random."
                    )
                    self.search_space.generate(parameters)
                else:
                    if any((h_name in parameters
                            for h_name in predicted_hyperparameters)):
                        raise ValueError(
                            f"Previously selected hyperparameters should not be altered! "
                            f"Previous: {parameters}. This level: {predicted_hyperparameters}"
                        )
                    parameters.update(predicted_hyperparameters)
            else:
                self.logger.debug(
                    f"{model_parameters['Type']} model was not build to predict hyperparameters: {list(description.keys())}. "
                    f"Random values will be sampled.")
                self.search_space.generate(parameters)

        return Configuration(parameters, Configuration.Type.PREDICTED,
                             self.experiment_id)
Пример #21
0
    def measure_configurations(self, channel, method, properties, body):
        """
        Callback function for the result of measuring
        :param ch: pika.Channel
        :param method:  pika.spec.Basic.GetOk
        :param properties: pika.spec.BasicProperties
        :param body: result of a configurations in bytes format
        """
        if os.environ.get('TEST_MODE') == 'UNIT_TEST':
            result = json.loads(body)
        else:
            result = json.loads(body.decode())
        configuration = Configuration.from_json(result["configuration"])
        if configuration.status != Configuration.Status.NEW and os.environ.get(
                'TEST_MODE') != 'UNIT_TEST':
            tasks_to_send = result["tasks_to_send"]
            tasks_results = result["tasks_results"]
            for index, objective in enumerate(self._objectives):
                tasks_results = error_check(tasks_results, objective,
                                            self._expected_values_range[index],
                                            self._objectives_data_types[index])
            if self.experiment.description["OutliersDetection"]["isEnabled"]:
                tasks_results = self.outlier_detectors.find_outliers_for_taskset(
                    tasks_results, self._objectives, [configuration],
                    tasks_to_send)

            # Sending data to API and adding Tasks to Configuration
            for parameters, task in zip(tasks_to_send, tasks_results):
                if configuration.parameters == parameters:
                    if configuration.is_valid_task(task):
                        configuration.add_task(task)
                        if os.environ.get('TEST_MODE') != 'UNIT_TEST':
                            self.database.write_one_record(
                                "Tasks", configuration.get_task_record(task))
                    else:
                        configuration.increase_failed_tasks_number()

                API().send('new',
                           'task',
                           configurations=[parameters],
                           results=[task])

        # Evaluating configuration
        if configuration.number_of_failed_tasks <= self.repeater_parameters[
                'MaxFailedTasksPerConfiguration']:
            needed_tasks_count = self.evaluation_by_type(configuration)
        else:
            needed_tasks_count = 0
            configuration.status = Configuration.Status.BAD
            if len(configuration.get_tasks()) == 0:
                self.experiment.increment_bad_configuration_number()
                configuration.disable_configuration()
        current_measurement = {
            str(configuration.parameters): {
                'parameters': configuration.parameters,
                'needed_tasks_count': needed_tasks_count,
                'Finished': False
            }
        }

        if needed_tasks_count == 0:
            current_measurement[str(
                configuration.parameters)]['Finished'] = True
            current_measurement[str(
                configuration.parameters)]['Results'] = configuration.results

        tasks_to_send = []
        for point in current_measurement.keys():
            if not current_measurement[point]['Finished']:
                for i in range(
                        current_measurement[point]['needed_tasks_count']):
                    tasks_to_send.append(
                        current_measurement[point]['parameters'])
                    self.performed_measurements += 1
                    if os.environ.get('TEST_MODE') != 'UNIT_TEST':
                        self.database.write_one_record(
                            "Repeater_measurements",
                            self.get_repeater_measurements_record())

        if os.environ.get('TEST_MODE') == 'UNIT_TEST':
            return configuration, needed_tasks_count

        elif configuration.status == Configuration.Status.MEASURED or configuration.status == Configuration.Status.BAD:

            conn_params = pika.ConnectionParameters(host=self.event_host,
                                                    port=int(self.event_port))
            with pika.BlockingConnection(conn_params) as connection:
                with connection.channel() as channel:
                    try:
                        if configuration.type == Configuration.Type.DEFAULT:
                            self._type = self.get_repeater()
                            channel.basic_publish(
                                exchange='',
                                routing_key=
                                'default_configuration_results_queue',
                                body=configuration.to_json())
                        elif configuration.type == Configuration.Type.PREDICTED or \
                                configuration.type == Configuration.Type.FROM_SELECTOR:
                            channel.basic_publish(
                                exchange='',
                                routing_key='configurations_results_queue',
                                body=configuration.to_json())
                    except pika.exceptions.ChannelWrongStateError as err:
                        if not channel.is_open:
                            self.logger.warning(
                                "Attempt to send a message after closing the connection"
                            )
                        else:
                            raise err
        elif configuration.status == Configuration.Status.EVALUATED or \
                configuration.status == Configuration.Status.REPEATED_MEASURING:

            conn_params = pika.ConnectionParameters(host=self.event_host,
                                                    port=int(self.event_port))
            with pika.BlockingConnection(conn_params) as connection:
                with connection.channel() as channel:
                    body = json.dumps({
                        "configuration": configuration.to_json(),
                        "tasks": tasks_to_send
                    })
                    channel.basic_publish(exchange='',
                                          routing_key='process_tasks_queue',
                                          body=body)
Пример #22
0
    def run(self):
        """
        The entry point to the main node functionality - measuring default Configuration.
        When the default Configuration finishes its evaluation, the first set of Configurations will be
        sampled for evaluation (respectively, the queues for Configuration measurement results initialize).
        """
        self._state = self.State.RUNNING
        self.logger.info("Starting BRISE")
        self.sub.send('log', 'info', message="Starting BRISE")

        if not self.experiment_setup:
            # Check if main.py running with a specified experiment description file path
            if len(argv) > 1:
                exp_desc_file_path = argv[1]
            else:
                exp_desc_file_path = './Resources/EnergyExperiment/EnergyExperiment.json'
                log_msg = f"The Experiment Setup was not provided and the path to an experiment file was not specified." \
                          f" The default one will be executed: {exp_desc_file_path}"
                self.logger.warning(log_msg)
                self.sub.send('log', 'warning', message=log_msg)
            experiment_description, search_space = load_experiment_setup(
                exp_desc_file_path)
        else:
            experiment_description = self.experiment_setup[
                "experiment_description"]
            search_space = self.experiment_setup["search_space"]

        validate_experiment_description(experiment_description)
        os.makedirs(experiment_description["General"]["results_storage"],
                    exist_ok=True)

        # Initializing instance of Experiment - main data holder.
        self.experiment = Experiment(experiment_description, search_space)
        search_space.experiment_id = self.experiment.unique_id
        Configuration.set_task_config(
            self.experiment.description["TaskConfiguration"])

        # initialize connection to rabbitmq service
        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters(
                os.getenv("BRISE_EVENT_SERVICE_HOST"),
                int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT"))))
        self.consume_channel = self.connection.channel()

        # initialize connection to the database
        self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"),
                                int(os.getenv("BRISE_DATABASE_PORT")),
                                os.getenv("BRISE_DATABASE_NAME"),
                                os.getenv("BRISE_DATABASE_USER"),
                                os.getenv("BRISE_DATABASE_PASS"))

        # write initial settings to the database
        self.database.write_one_record(
            "Experiment_description",
            self.experiment.get_experiment_description_record())
        self.database.write_one_record(
            "Search_space",
            get_search_space_record(self.experiment.search_space,
                                    self.experiment.unique_id))
        self.experiment.send_state_to_db()

        self.sub.send(
            'experiment',
            'description',
            global_config=self.experiment.description["General"],
            experiment_description=self.experiment.description,
            searchspace_description=self.experiment.search_space.serialize(
                True))
        self.logger.debug(
            "Experiment description and global configuration sent to the API.")

        # Create and launch Stop Condition services in separate threads.
        launch_stop_condition_threads(self.experiment.unique_id)

        # Instantiate client for Worker Service, establish connection.
        self.wsc_client = WSClient(
            self.experiment.description["TaskConfiguration"],
            os.getenv("BRISE_EVENT_SERVICE_HOST"),
            int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT")))

        # Initialize Repeater - encapsulate Configuration evaluation process to avoid results fluctuations.
        # (achieved by multiple Configuration evaluations on Workers - Tasks)
        RepeaterOrchestration(self.experiment)

        self.predictor: Predictor = Predictor(self.experiment.unique_id,
                                              self.experiment.description,
                                              self.experiment.search_space)

        self.consume_channel.basic_consume(
            queue='default_configuration_results_queue',
            auto_ack=True,
            on_message_callback=self.get_default_configurations_results)
        self.consume_channel.basic_consume(
            queue='configurations_results_queue',
            auto_ack=True,
            on_message_callback=self.get_configurations_results)
        self.consume_channel.basic_consume(queue='stop_experiment_queue',
                                           auto_ack=True,
                                           on_message_callback=self.stop)
        self.consume_channel.basic_consume(
            queue="get_new_configuration_queue",
            auto_ack=True,
            on_message_callback=self.send_new_configurations_to_measure)

        self.default_config_handler = get_default_config_handler(
            self.experiment)
        temp_msg = "Measuring default Configuration."
        self.logger.info(temp_msg)
        self.sub.send('log', 'info', message=temp_msg)
        default_parameters = self.experiment.search_space.generate_default()
        default_configuration = Configuration(default_parameters,
                                              Configuration.Type.DEFAULT,
                                              self.experiment.unique_id)
        default_configuration.experiment_id = self.experiment.unique_id
        dictionary_dump = {"configuration": default_configuration.to_json()}
        body = json.dumps(dictionary_dump)

        self.consume_channel.basic_publish(
            exchange='',
            routing_key='measure_new_configuration_queue',
            body=body)
        # listen all queues with responses until the _is_interrupted flag is False
        try:
            while not self._is_interrupted:
                self.consume_channel.connection.process_data_events(
                    time_limit=1)  # 1 second
        finally:
            if self.connection.is_open:
                self.connection.close()
Пример #23
0
    def evaluate(self, current_configuration: Configuration,
                 experiment: Experiment):
        """
        Return number of measurements to finish Configuration or 0 if it finished.
        In other case - compute result as average between all experiments.
        :param current_configuration: instance of Configuration class
        :param experiment: instance of 'experiment' is required for experiment-awareness.
        :return: int min_tasks_per_configuration if Configuration was not measured at all
                 or 1 if Configuration was not measured precisely or 0 if it finished
        """
        tasks_data = current_configuration.get_tasks()

        if len(tasks_data) == 0:
            return 1

        c_c_results = current_configuration.results
        c_s_results = experiment.get_current_solution().results
        c_c_results_l = []
        c_s_results_l = []
        for key in experiment.get_objectives():
            c_c_results_l.append(c_c_results[key])
            c_s_results_l.append(c_s_results[key])

        if len(tasks_data) < self.min_tasks_per_configuration:
            if self.is_experiment_aware:
                ratios = [
                    cur_config_dim / cur_solution_dim
                    for cur_config_dim, cur_solution_dim in zip(
                        c_c_results_l, c_s_results_l)
                ]
                if all([
                        ratio >= ratio_max
                        for ratio, ratio_max in zip(ratios, self.ratios_max)
                ]):
                    return 0
            return self.min_tasks_per_configuration - len(tasks_data)

        elif len(tasks_data) >= self.max_tasks_per_configuration:
            return 0
        else:
            # Calculating standard deviation
            all_dim_std = current_configuration.get_standard_deviation()

            # The number of Degrees of Freedom generally equals the number of observations (Tasks) minus
            # the number of estimated parameters.
            degrees_of_freedom = len(tasks_data) - len(c_c_results_l)

            # Calculate the critical t-student value from the t distribution
            student_coefficients = [
                t.ppf(c_l, df=degrees_of_freedom)
                for c_l in self.confidence_levels
            ]

            # Calculating confidence interval for each dimension, that contains a confidence intervals for
            # singular measurements and confidence intervals for multiple measurements.
            # First - singular measurements errors:
            conf_intervals_sm = []
            for c_l, d_s_a, d_a_c, avg in zip(self.confidence_levels,
                                              self.device_scale_accuracies,
                                              self.device_accuracy_classes,
                                              c_c_results_l):
                d = sqrt((c_l * d_s_a / 2)**2 + (d_a_c * avg / 100)**2)
                conf_intervals_sm.append(c_l * d)

            # Calculation of confidence interval for multiple measurements:
            conf_intervals_mm = []
            for student_coefficient, dim_skd in zip(student_coefficients,
                                                    all_dim_std):
                conf_intervals_mm.append(student_coefficient * dim_skd /
                                         sqrt(len(tasks_data)))

            # confidence interval, or in other words absolute error
            absolute_errors = []
            for c_i_ss, c_i_mm in zip(conf_intervals_sm, conf_intervals_mm):
                absolute_errors.append(sqrt(pow(c_i_ss, 2) + pow(c_i_mm, 2)))

            # Calculating relative error for each dimension
            relative_errors = []
            for interval, avg_res in zip(absolute_errors, c_c_results_l):
                if not avg_res:  # it is 0 or 0.0
                    # if new use-cases appear with the same behaviour.
                    if interval == 0:
                        avg_res = 1  # Anyway relative error will be 0 and avg will not be changed.
                    else:
                        return 1
                relative_errors.append(interval / avg_res * 100)

            # Thresholds for relative errors that should not be exceeded for accurate measurement.
            thresholds = []
            if self.is_experiment_aware:
                # We adapt thresholds
                objectives_minimization = experiment.get_objectives_minimization(
                )

                for i in range(len(objectives_minimization)):
                    if objectives_minimization[i]:
                        if not c_s_results_l[i]:
                            ratio = 1
                        else:
                            ratio = c_c_results_l[i] / c_s_results_l[i]
                    else:
                        if not c_c_results_l[i]:
                            ratio = 1
                        else:
                            ratio = c_s_results_l[i] / c_c_results_l[i]

                    adopted_threshold = \
                        self.base_acceptable_errors[i] \
                        + (self.max_acceptable_errors[i] - self.base_acceptable_errors[i]) \
                        / (1 + exp(- (10 / self.ratios_max[i]) * (ratio - self.ratios_max[i] / 2)))

                    thresholds.append(adopted_threshold)

            else:
                # Or we don't adapt thresholds
                for acceptable_error in self.base_acceptable_errors:
                    thresholds.append(acceptable_error)

            # Simple implementation of possible multi-dim Repeater decision making:
            # If any of resulting dimensions are not accurate - just terminate.
            for threshold, error in zip(thresholds, relative_errors):
                if error > threshold:
                    return 1
            return 0
Пример #24
0
    def __predict_next_configuration(self):
        """
        Predicts the solution candidate of the model. Returns old Configuration instance if configuration is already
        exists in all_configuration list, otherwise creates new Configuration instance.
        :return Configuration instance
        """
        predicted_configuration = None
        info_dict = {}
        if self.isMinimizationExperiment:
            predicted_result = np.inf
        else:
            predicted_result = -np.inf
        predicted_result_vector = None

        if predicted_configuration is None:
            try:

                l = self.model['good'].pdf
                g = self.model['bad'].pdf

                minimize_me = lambda x: max(1e-32, g(x)) / max(l(x), 1e-32)

                kde_good = self.model['good']
                kde_bad = self.model['bad']

                for i in range(self.num_samples):
                    idx = np.random.randint(0, len(kde_good.data))
                    datum = kde_good.data[idx]
                    vector = []

                    for m, bw, t in zip(datum, kde_good.bw, self.vartypes):

                        bw = max(bw, self.min_bandwidth)
                        if t == 0:
                            bw = self.bw_factor * bw
                            try:
                                vector.append(
                                    sps.truncnorm.rvs(-m / bw, (1 - m) / bw,
                                                      loc=m,
                                                      scale=bw))
                            except:
                                self.logger.warning(
                                    "Truncated Normal failed for:\ndatum=%s\nbandwidth=%s\nfor entry with value %s"
                                    % (datum, kde_good.bw, m))
                                self.logger.warning("data in the KDE:\n%s" %
                                                    kde_good.data)
                        else:

                            if np.random.rand() < (1 - bw):
                                vector.append(int(m))
                            else:
                                vector.append(np.random.randint(t))
                    val = minimize_me(vector)

                    if not np.isfinite(val):
                        # right now, this happens because a KDE does not contain all values for a categorical parameter
                        # this cannot be fixed with the statsmodels KDE, so for now, we are just going to evaluate this one
                        # if the good_kde has a finite value, i.e. there is no config with that value in the bad kde, so it shouldn't be terrible.
                        if np.isfinite(l(vector)):
                            predicted_result_vector = vector
                            break

                    if (val < predicted_result
                            and self.isMinimizationExperiment) or (
                                val > predicted_result
                                and not self.isMinimizationExperiment):
                        predicted_result = val
                        predicted_result_vector = vector

                if predicted_result_vector is None:
                    self.logger.info(
                        "Sampling based optimization with %i samples failed -> using random configuration"
                        % self.num_samples)
                    info_dict['model_based_pick'] = False
                else:
                    predicted_configuration = []
                    for index, dimension in enumerate(
                            self.experiment.description["DomainDescription"]
                        ["AllConfigurations"]):
                        predicted_configuration.append(
                            dimension[predicted_result_vector[index]])

            except:
                self.logger.warning(
                    "Sampling based optimization with %i samples failed\n %s\n"
                    "Using random configuration" %
                    (self.num_samples, traceback.format_exc()))
                info_dict['model_based_pick'] = False

        # self.logger.debug('done sampling a new configuration.')
        for configuration in self.all_configurations:
            if configuration.get_parameters() == predicted_configuration:
                configuration.add_predicted_result(
                    parameters=predicted_configuration,
                    predicted_result=[predicted_result])
                return configuration
        predicted_configuration_class = Configuration(predicted_configuration)
        predicted_configuration_class.add_predicted_result(
            parameters=predicted_configuration,
            predicted_result=[predicted_result])
        return predicted_configuration_class