Example #1
0
 def put_default_configuration(self, default_configuration: Configuration):
     if self._is_valid_configuration_instance(default_configuration):
         if not self.default_configuration:
             self.default_configuration = default_configuration
             self.api.send("default", "configuration",
                           configurations=[default_configuration.get_parameters()],
                           results=[default_configuration.get_average_result()])
             if default_configuration not in self.all_configurations:
                 self.all_configurations.append(default_configuration)
                 self._calculate_current_best_configurations()
         else:
             raise ValueError("The default Configuration was registered already.")
Example #2
0
 def _add_configuration_to_experiment(self, configuration: Configuration) -> None:
     """
     Save configuration after passing all checks.
     This method also sends an update to API (front-end).
     :param configuration: Configuration object.
     :return: None
     """
     self.all_configurations.append(configuration)
     self.api.send("new", "configuration",
                   configurations=[configuration.get_parameters()],
                   results=[configuration.get_average_result()])
     self.logger.info("Adding to Experiment: %s" % configuration)
Example #3
0
    def compute_avg_results_over_configurations(
            configurations: List[Configuration]) -> List[float]:

        # Make sure, that all Configurations are the same points in a search space:
        assert all([
            config.parameters == configurations[0].parameters
            for config in configurations
        ])
        tasks = BRISEBenchmarkAnalyser.collect_tasks_from_configurations(
            configurations)

        previous_task_configuration = Configuration.TaskConfiguration
        Configuration.set_task_config(configurations[0].TaskConfiguration)

        tmp_configuration = Configuration(configurations[0].parameters,
                                          Configuration.Type.TEST)
        tmp_configuration.add_tasks(task=tasks)

        result = tmp_configuration.get_average_result()
        Configuration.set_task_config(previous_task_configuration)

        return result
Example #4
0
    def evaluate(self, current_configuration: Configuration,
                 experiment: Experiment):
        """
        Return number of measurements to finish Configuration or 0 if it finished.
        In other case - compute result as average between all experiments.
        :param current_configuration: instance of Configuration class
        :param experiment: instance of 'experiment' is required for model-awareness.
        :return: int min_tasks_per_configuration if Configuration was not measured at all or 1 if Configuration was not measured precisely or 0 if it finished
        """
        tasks_data = current_configuration.get_tasks()

        if len(tasks_data) < self.min_tasks_per_configuration:
            return self.min_tasks_per_configuration - len(tasks_data)

        elif len(tasks_data) >= self.max_tasks_per_configuration:
            return 0
        else:
            average_result = current_configuration.get_average_result()
            # Calculating standard deviation
            all_dim_std = current_configuration.get_standard_deviation()

            # The number of Degrees of Freedom generally equals the number of observations (Tasks) minus
            # the number of estimated parameters.
            degrees_of_freedom = len(tasks_data) - len(average_result)

            # Calculate the critical t-student value from the t distribution
            student_coefficients = [
                t.ppf(c_l, df=degrees_of_freedom)
                for c_l in self.confidence_levels
            ]

            # Calculating confidence interval for each dimension, that contains a confidence intervals for
            # singular measurements and confidence intervals for multiple measurements.
            # First - singular measurements errors:
            conf_intervals_sm = []
            for c_l, d_s_a, d_a_c, avg in zip(self.confidence_levels,
                                              self.device_scale_accuracies,
                                              self.device_accuracy_classes,
                                              average_result):
                d = sqrt((c_l * d_s_a / 2)**2 + (d_a_c * avg / 100)**2)
                conf_intervals_sm.append(c_l * d)

            # Calculation of confidence interval for multiple measurements:
            conf_intervals_mm = []
            for student_coefficient, dim_skd in zip(student_coefficients,
                                                    all_dim_std):
                conf_intervals_mm.append(student_coefficient * dim_skd /
                                         sqrt(len(tasks_data)))

            # confidence interval, or in other words absolute error
            absolute_errors = []
            for c_i_ss, c_i_mm in zip(conf_intervals_sm, conf_intervals_mm):
                absolute_errors.append(sqrt(pow(c_i_ss, 2) + pow(c_i_mm, 2)))

            # Calculating relative error for each dimension
            relative_errors = []
            for interval, avg_res in zip(absolute_errors, average_result):
                if not avg_res:  # it is 0 or 0.0
                    # if new use-cases appear with the same behaviour.
                    if interval == 0:
                        avg_res = 1  # Anyway relative error will be 0 and avg will not be changed.
                    else:
                        return 1
                relative_errors.append(interval / avg_res * 100)

            # Thresholds for relative errors that should not be exceeded for accurate measurement.
            thresholds = []
            if self.is_model_aware:
                # We adapt thresholds
                current_solution = experiment.get_current_solution(
                ).get_average_result()
                minimization_experiment = experiment.is_minimization()

                for b_t, max_t, r_max, avg_res, cur_solution_avg in \
                        zip(self.base_acceptable_errors, self.max_acceptable_errors, self.ratios_max, average_result, current_solution):
                    if minimization_experiment:
                        if not cur_solution_avg:
                            ratio = 1
                        else:
                            ratio = avg_res / cur_solution_avg
                    else:
                        if not avg_res:
                            ratio = 1
                        else:
                            ratio = cur_solution_avg / avg_res

                    adopted_threshold = b_t + (max_t - b_t) / (
                        1 + exp(-1 * (ratio - r_max / 2)))
                    thresholds.append(adopted_threshold)

            else:
                # Or we don't adapt thresholds
                for acceptable_error in self.base_acceptable_errors:
                    thresholds.append(acceptable_error)

            # Simple implementation of possible multi-dim Repeater decision making:
            # If any of resulting dimensions are not accurate - just terminate.
            for threshold, error in zip(thresholds, relative_errors):
                if error > threshold:
                    return 1
            return 0