def load_json_file(path_to_file): """ Method reads .json file :param path_to_file: sting path to file. :return: object that represent .json file """ logger = logging.getLogger(__name__) front_api = API() try: with open(path_to_file, 'r') as File: jsonFile = json.loads(File.read()) return jsonFile except IOError as error: msg = "Unable to read a json file '%s'. Error information: %s" % (path_to_file, error) logger.error(msg, exc_info=True) front_api.send('log', 'error', message=msg) raise error except json.JSONDecodeError as error: msg = "Unable to decode a json file '%s'. Error information: %s" % (path_to_file, error) logger.error(msg, exc_info=True) front_api.send('log', 'error', message=msg) raise error
class RegressionSweetSpot(Model): def __init__(self, log_file_name, experiment): """ Initialization of regression model :param log_file_name: - string, location of file, which will store results of model creation :param experiment: instance of Experiment class """ self.logger = logging.getLogger(__name__) # Send updates to subscribers self.sub = API() # Model configuration - related fields. self.minimal_test_size = experiment.description["ModelConfiguration"][ "minimalTestingSize"] self.maximal_test_size = experiment.description["ModelConfiguration"][ "maximalTestingSize"] self.log_file_name = log_file_name # Built model - related fields. self.model = None self.minimum_model_accuracy = experiment.description[ "ModelConfiguration"]["MinimumAccuracy"] self.built_model_accuracy = 0 self.built_model_test_size = 0.0 # Data holding fields. self.experiment = experiment self.all_configurations = [] def build_model(self, degree=6, tries=20): """ Tries to build the new regression model. :param degree: Int. scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html :param tries: Int. Number of tries to build the model in each step of decreasing test size. :return: Boolean. True if the model was successfully built, otherwise - False. """ # Building model cur_accuracy = 0.99 best_got = -10e10 best_model = None while cur_accuracy > self.minimum_model_accuracy: current_test_size = self.maximal_test_size while current_test_size > self.minimal_test_size: for x in range(tries): feature_train, feature_test, target_train, target_test = self.resplit_data( current_test_size) model = Pipeline([ ('poly', PolynomialFeatures(degree=degree, interaction_only=False)), ('reg', Ridge()) ]) model.fit(feature_train, target_train) score_measured = model.score(feature_test, target_test) if score_measured > best_got: best_got = score_measured best_model = model self.logger.info( 'GOT NEW ACCURACY: %s with %s test size and %s accuracy threshold ' % (round(score_measured, 3), round(current_test_size, 2), round(cur_accuracy, 2))) if best_got > cur_accuracy: self.model = best_model self.built_model_accuracy = best_got self.built_model_test_size = current_test_size self.logger.info( "Regression model built with %s test size and %s accuracy." % (current_test_size, best_got)) return True else: current_test_size -= 0.01 cur_accuracy -= 0.01 self.logger.info( "Unable to build model, current best accuracy: %s need more data.." % best_got) return False def validate_model(self, degree=6): """ Return True, if the model have built, and False, if the model can not build or the model already exists :param degree: :return: True or False """ # Check if model was built. if not self.model: return False # Check if the model is adequate - write it. predicted_configuration = self.predict_next_configurations(1) predicted_labels = predicted_configuration[0].predicted_result if predicted_labels[0] >= 0: f = open(self.log_file_name, "a") f.write("Search space::\n") f.write(str(self.experiment.search_space) + "\n") f.write("Testing size = " + str(self.built_model_test_size) + "\n") for i in range(degree + 1): if i == 0: f.write("(TR ^ 0) * (FR ^ 0) = " + str(self.model.named_steps['reg'].coef_[i]) + "\n") else: for j in range(i + 1): f.write("(TR ^ " + str(i - j) + ") * (FR ^ " + str(j) + ") = " + str(self.model.named_steps['reg'].coef_[0][ self.sum_fact(i) + j]) + "\n") f.write("R^2 = " + str(self.built_model_accuracy) + "\n") f.write("Intercept = " + str(self.model.named_steps['reg'].intercept_) + "\n") f.close() self.logger.info("Built model is valid.") self.sub.send('log', 'info', message="Built model is valid") return True else: self.logger.info( "Predicted energy lower than 0: %s. Need more data.." % predicted_labels[0]) self.sub.send( 'log', 'info', message="Predicted energy lower than 0: %s. Need more data.." % predicted_labels[0]) return False def predict_next_configurations(self, amount): """ Takes features, using previously created model makes regression to find labels and return label with the lowest value. :param amount: int number of Configurations which will be returned :return: list of Configurations that are needed to be measured. """ # 1. get model's predictions predicted_results = [] for index, predicted_result in sorted(enumerate( self.model.predict(self.experiment.search_space)), key=lambda c: c[1]): conf = self.experiment.search_space[index] predicted_results.append((predicted_result, conf)) # Only for DEMO # self.sub.send('predictions', 'configurations', # configurations=[self.experiment.search_space[index] for (predicted_result, index) in predicted_results], # results=[[round(predicted_result[0], 2)] for (predicted_result, index) in predicted_results]) # 2. Update predicted results for already evaluated Configurations. for config in self.all_configurations: for pred_tuple in predicted_results: if (pred_tuple[1] == config.get_parameters()): config.add_predicted_result(pred_tuple[1], pred_tuple[0]) # 3. Pick up requared amount of configs all_config = [ conf.get_parameters() for conf in self.all_configurations ] result = [] for best in predicted_results[:amount]: if best[1] in all_config: select = [ conf for conf in self.all_configurations if conf.get_parameters() == best[1] ] result.append(select[0]) else: new_conf = Configuration(best[1]) new_conf.add_predicted_result(best[1], best[0]) result.append(new_conf) # 4. return configs return result def resplit_data(self, test_size): """ Just recreates subsets of features and labels for training and testing from existing features and labels. :param test_size: Float. Indicates the amount of data that will be used to test the model. :return: None """ all_features = [] all_labels = [] for configuration in self.all_configurations: all_features.append(configuration.get_parameters()) all_labels.append(configuration.get_average_result()) feature_train, feature_test, target_train, target_test = \ model_selection.train_test_split(all_features, all_labels, test_size=test_size) return feature_train, feature_test, target_train, target_test @staticmethod def sum_fact(num): """ Return the sum of all numbers from 1 till 'num' :param num: int :return: """ return reduce(lambda x, y: x + y, list(range(1, num + 1))) def update_data(self, configurations): """ Method adds configurations to whole set of configurations. :param configurations: List of Configuration's instances :return: self """ self.all_configurations = configurations return self
class Experiment: def __init__(self, description: dict, search_space: Hyperparameter): """ Initialization of Experiment class Following fields are declared: self.measured_configurations - list of configuration instances shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]`` self.description - description of the current experiment, it is taken from .json file shape - dict with subdicts """ self.logger = logging.getLogger(__name__) self.api = API() self.evaluated_configurations: List[Configuration] = [ ] # repeater already evaluates these configurations self.measured_configurations: List[Configuration] = [ ] # the results for these configurations are already received self._default_configuration: Configuration = None self._description: Mapping = description self.search_space: Hyperparameter = search_space self.end_time = self.start_time = datetime.datetime.now() # An ID that is used to differentiate Experiments by descriptions. self.ed_id = hashlib.sha1( json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest() # A unique ID, different for every experiment (even with the same description) self.unique_id = str(uuid.uuid4()) self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.ed_id}" self.current_best_configurations: List[Configuration] = [] self.bad_configurations_number = 0 self.model_is_valid = False self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock() # initialize connection to the database self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"), os.getenv("BRISE_DATABASE_PORT"), os.getenv("BRISE_DATABASE_NAME"), os.getenv("BRISE_DATABASE_USER"), os.getenv("BRISE_DATABASE_PASS")) def _get_description(self): return deepcopy(self._description) def _set_description(self, description): if not self._description: self._description = description else: self.logger.error( "Unable to update Experiment Description: Read-only property.") raise AttributeError( "Unable to update Experiment Description: Read-only property.") def _del_description(self): if self._description: self.logger.error( "Unable to delete Experiment Description: Read-only property.") raise AttributeError( "Unable to update Experiment Description: Read-only property.") description = property(_get_description, _set_description, _del_description) def __getstate__(self): space = self.__dict__.copy() del space['api'] del space['logger'] del space['measured_conf_lock'] del space['evaluated_conf_lock'] del space['database'] return space def __setstate__(self, space): self.__dict__ = space self.logger = logging.getLogger(__name__) self.api = API() # for thread-safe adding value to relevant array; protection against duplicates configurations self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock() @property def default_configuration(self) -> Configuration: return self._default_configuration @default_configuration.setter def default_configuration(self, default_configuration: Configuration): if self._is_valid_configuration_instance(default_configuration): if not self._default_configuration: self._default_configuration = default_configuration self.api.send( "default", "configuration", configurations=[default_configuration.parameters], results=[default_configuration.results]) self.measured_configurations.append(default_configuration) if not self.current_best_configurations: self.current_best_configurations = [default_configuration] self.database.write_one_record( "Measured_configurations", default_configuration.get_configuration_record()) self.database.write_one_record( collection_name="warm_startup_info", record={ "Exp_unique_ID": self.unique_id, "wsi": default_configuration.warm_startup_info }) else: raise ValueError( "The default Configuration was registered already.") def try_add_configuration(self, configuration: Configuration): """ Add a Configuration object to the Experiment, if the Configuration was now added previously. :param configuration: Configuration instance. :return bool flag, True if the Configuration was added to list of either measured or evaluated configurations, False if not. """ result = False if configuration.is_enabled: if self._try_put(configuration): # configuration will not be added to the Experiment if it is already there result = True return result def _try_put(self, configuration_instance: Configuration): """ Takes instance of Configuration class and appends it to the list with all configuration instances. :param configuration_instance: Configuration class instance. :return bool flag, is _put add configuration to any lists or not """ if self._is_valid_configuration_instance(configuration_instance): if configuration_instance.status == Configuration.Status.MEASURED: with self.measured_conf_lock: if configuration_instance not in self.measured_configurations: self._add_measured_configuration_to_experiment( configuration_instance) return True else: return False elif configuration_instance.status == Configuration.Status.EVALUATED: with self.evaluated_conf_lock: if configuration_instance not in self.evaluated_configurations: self._add_evaluated_configuration_to_experiment( configuration_instance) return True else: return False else: raise ValueError( f"Can not add Configuration with status {configuration_instance.status.name} to Experiment." ) def get_any_configuration_by_parameters( self, parameters: tuple) -> Union[None, Configuration]: """ Find and retrieve instance of Configuration that was previously added to Experiment by it's Parameters. :param parameters: tuple. Parameters of desired Configuration. :return: instance of Configuration class or`None` if the Configuration instance was not found. """ for configuration_instance in self.measured_configurations: if configuration_instance.parameters == parameters: return configuration_instance for configuration_instance in self.evaluated_configurations: if configuration_instance.parameters == parameters: return configuration_instance return None def get_current_status(self, serializable: bool): """ Returns current state of Experiment, including already elapsed time, currently found solution Configuration, default Configuration, Experiment description and all already evaluated Configurations. :param serializable: Boolean. Defines if returned structure should be serializable or not. If True - all Configuration objects will be transformed to their string representation. :return: Dict with following keys["Running time", "Best found Configuration", "Default configuration", "Experiment description", "Evaluated Configurations"] """ current_status = { "Running time": str(self.get_running_time()) if serializable else self.get_running_time(), "Best found Configuration": self.get_current_solution().__getstate__() if serializable else self.get_current_solution(), "Experiment description": self.description, "Evaluated Configurations": [ conf.__getstate__() if serializable else conf for conf in self.measured_configurations ] } return current_status def summarize_results_to_file(self, report_format: str, folder_path: str): """ Called before the BRISE proper termination. Aggregates current state of the Experiment and writes it as a json or yaml file. :param report_format: String. Format of output file, either 'yaml' or 'json'. :param folder_path: String. Folder, where results should be stored. :return: self """ os.makedirs(folder_path, exist_ok=True) output_file_name = folder_path + self.name data = "" if report_format.lower() == "yaml": from yaml import safe_dump output_file_name += '.yaml' data = safe_dump(self.get_current_status(serializable=True), width=120, indent=4) elif report_format.lower() == "json": from json import dumps output_file_name += '.json' data = dumps(self.get_current_status(serializable=True), indent=4) else: self.logger.error( "Wrong serialization format provided. Supported 'yaml' and 'json'." ) with open(output_file_name, 'w') as output_file: output_file.write(data) self.logger.info( "Results of the Experiment have been writen to file: %s" % output_file_name) return self def is_configuration_evaluated(self, configuration): """ Check is the Configuration in the evaluated_configurations list or not. Could be used to filter out outdated (not added to current Experiment) Configurations. :param configuration: Configuration instance. :return: True if Configuration instance was previously added to the Experiment as those of False """ return configuration in self.evaluated_configurations def get_final_report_and_result(self): self.end_time = datetime.datetime.now() if self.measured_configurations: performed_measurements = \ self.database.get_last_record_by_experiment_id("Repeater_measurements", self.unique_id)["Performed_measurements"] self.logger.info("\n\nFinal report:") self.logger.info("ALL MEASURED CONFIGURATIONS:\n") for configuration in self.measured_configurations: self.logger.info(configuration) self.logger.info("Number of measured Configurations: %s" % len(self.measured_configurations)) self.logger.info("Number of Tasks: %s" % performed_measurements) self.logger.info("Best found Configuration: %s" % self.get_current_solution()) self.logger.info("BRISE running time: %s" % str(self.get_running_time())) all_features = [] for configuration in self.measured_configurations: all_features.append(configuration.parameters) results_folder = self.description["General"]["results_storage"] self.dump( folder_path=results_folder) # Store instance of Experiment self.write_csv( folder_path=results_folder) # Store Experiment metrics self.summarize_results_to_file(report_format="yaml", folder_path=results_folder) self.api.send( 'final', 'configuration', configurations=[self.get_current_solution().parameters], results=[self.get_current_solution().results], measured_points=[all_features], performed_measurements=[performed_measurements]) return self.current_best_configurations else: self.logger.error( 'No configuration was measured. Please, check your Experiment Description.' ) def get_current_solution(self) -> Union[Configuration, None]: if self.current_best_configurations: return self.current_best_configurations[0] else: return None def _is_valid_configuration_instance( self, configuration_instance: Configuration) -> bool: if isinstance(configuration_instance, Configuration): return True else: self.logger.error( 'Current object is not a Configuration instance, but %s' % type(configuration_instance)) return False def _add_measured_configuration_to_experiment( self, configuration: Configuration) -> None: """ Save configuration after passing all checks. This method also sends an update to API (front-end). :param configuration: Configuration object. :return: None """ self.measured_configurations.append(configuration) if configuration.is_better(self.get_objectives_minimization(), self.get_objectives_priorities(), self.current_best_configurations[0]): # we do not need warm_startup_info anymore, since better configuration was found self.current_best_configurations[0].warm_startup_info = {} self.current_best_configurations = [configuration] self.database.update_record( collection_name="warm_startup_info", query={"Exp_unique_ID": self.unique_id}, new_val={"wsi": configuration.warm_startup_info}) else: # this configuration did not improve the previous solution, no need to keep track its solutions. configuration.warm_startup_info = {} self.database.write_one_record( "Measured_configurations", configuration.get_configuration_record()) self.send_state_to_db() self.api.send("new", "configuration", configurations=[configuration.parameters], results=[configuration.results]) self.logger.info("Adding to Experiment: %s" % configuration) def _add_evaluated_configuration_to_experiment( self, configuration: Configuration) -> None: """ Save configuration after passing all checks. :param configuration: Configuration object. :return: None """ self.evaluated_configurations.append(configuration) def get_objectives(self) -> List[str]: return self.description["TaskConfiguration"]["Objectives"] def get_objectives_minimization(self) -> List[bool]: return self.description["TaskConfiguration"]["ObjectivesMinimization"] def get_objectives_priorities(self) -> List[int]: return self.description["TaskConfiguration"]["ObjectivesPriorities"] def get_models_objectives_priorities(self) -> List[int]: return self.description["TaskConfiguration"][ "ObjectivesPrioritiesModels"] def dump(self, folder_path: str) -> None: """ Save dump of experiment object. Later it could be uploaded through the web API. :param folder_path: str. Path to folder, where to store dump file. User, which is running main.py should be authorized to write into a specified folder. :return: None """ if folder_path[-1] != "/" and folder_path[-1] != "\\": folder_path = folder_path + "/" os.makedirs(folder_path, exist_ok=True) dump_path = folder_path + self.name + ".pkl" with open(dump_path, 'wb') as output: pickle.dump(self, output, pickle.HIGHEST_PROTOCOL) self.logger.info(f"Saved experiment instance. Path: {dump_path}") os.environ["EXP_DUMP_NAME"] = folder_path + self.name self.database.update_record( "Experiment_description", {"Exp_unique_ID": self.unique_id}, {"ExperimentObject": pickle.dumps(self, pickle.HIGHEST_PROTOCOL)}) def write_csv(self, folder_path: str) -> None: """save .csv file with main metrics of the experiment Args: folder_path (str, optional): Path to folder, where to store the csv report. """ if self.search_space.get_size() == np.inf: search_space_coverage = "unknown (infinite search space)" else: search_space_coverage = str( round((len(self.measured_configurations) / self.search_space.get_size()) * 100)) + '%' data = dict({ 'model': "_".join([ model["Type"] for model in self.description["Predictor"]["models"] ]), 'default configuration': [' '.join(str(v) for v in self.default_configuration.parameters)], 'solution configuration': [' '.join(str(v) for v in self.get_current_solution().parameters)], 'default result': self.default_configuration.results, 'solution result': self.get_current_solution().results, 'number of measured configurations': len(self.measured_configurations), 'search space coverage': search_space_coverage, 'number of repetitions': len(self.get_all_repetition_tasks()), 'execution time': (self.get_running_time()).seconds, 'repeater': self.description['Repeater']['Type'] }) file_path = '{0}{1}.csv'.format(folder_path, self.name) keys = list(data.keys()) values = list(data.values()) with open(file_path, 'w') as csvFile: writer = csv.writer(csvFile) writer.writerow(keys) writer.writerow(values) self.logger.info("Saved csv file. Path: %s" % file_path) def get_name(self): return self.name def get_running_time(self): if self.end_time is self.start_time: return datetime.datetime.now() - self.start_time else: return self.end_time - self.start_time def get_all_repetition_tasks(self): """ List of results for all tasks that were received on workers Returns: [List] -- List with results for all atom-tasks """ all_tasks = [] result_key = self.description['TaskConfiguration']['Objectives'][0] for configuration in self.measured_configurations: for task in configuration.get_tasks().values(): if 'result' in task: all_tasks.append(task['result'][result_key]) return all_tasks def get_number_of_measured_configurations(self): return len(self.measured_configurations) def get_stop_condition_parameters(self): return self.description["StopCondition"] def get_selection_algorithm_parameters(self): return self.description["SelectionAlgorithm"] def get_outlier_detectors_parameters(self): return self.description["OutliersDetection"] def get_repeater_parameters(self): return self.description["Repeater"] def increment_bad_configuration_number(self): self.bad_configurations_number = self.bad_configurations_number + 1 return self def get_bad_configuration_number(self): return self.bad_configurations_number def update_model_state(self, model_state: bool): self.model_is_valid = model_state def get_model_state(self) -> bool: return self.model_is_valid def send_state_to_db(self) -> None: """ Send current experiment state information, or create one if not exist. :return: None """ if self.database.get_last_record_by_experiment_id( "Experiment_state", self.unique_id) is None: self.database.write_one_record("Experiment_state", self.get_experiment_state_record()) else: self.database.update_record( "Experiment_state", {"Exp_unique_ID": self.unique_id}, { "Number_of_measured_configs": self.get_number_of_measured_configurations(), "Number_of_bad_configs": self.get_bad_configuration_number(), "Current_solution": self.get_current_solution().get_configuration_record(), "is_model_valid": self.get_model_state() }) def get_experiment_description_record(self) -> Mapping: ''' The helper method that formats an experiment description to be stored as a record in a Database :return: Mapping. Field names of the database collection with respective information ''' record = {} # add this specific experiment information record["Exp_unique_ID"] = self.unique_id record["Exp_ID"] = self.ed_id record["DateStarted"] = str(datetime.datetime.now()) # store experiment description fields record.update(self.description) # experiment description record will be updated at the end of the experiment record["ExperimentObject"] = None return record def get_experiment_state_record(self) -> Mapping: ''' The helper method that formats current experiment state to be stored as a record in a Database :return: Mapping. Field names of the database collection with respective information ''' record = {} record["Exp_unique_ID"] = self.unique_id record[ "Number_of_measured_configs"] = self.get_number_of_measured_configurations( ) record["Number_of_bad_configs"] = self.get_bad_configuration_number() current_solution = self.get_current_solution() if current_solution is not None: current_solution = current_solution.get_configuration_record() record["Current_solution"] = current_solution record["is_model_valid"] = self.get_model_state() return record
class Experiment: def __init__(self, description: dict): """ Initialization of Experiment class Following fields are declared: self.all_configurations - list of configuration instances shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]`` self.description - description of the current experiment, it is taken from .json file shape - dict with subdicts """ self.logger = logging.getLogger(__name__) self.api = API() self.default_configuration = [] self.all_configurations = [] self._description = description self.search_space = [] self.end_time = self.start_time = datetime.datetime.now() # A unique ID that is used to differentiate an Experiments by descriptions. self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest() self.name = "exp_{task_name}_{experiment_hash}".format( task_name=self.description["TaskConfiguration"]["TaskName"], experiment_hash=self.id) self.current_best_configurations = [] self.__generate_search_space() def _get_description(self): return deepcopy(self._description) def _set_description(self, description): if not self._description: self._description = description else: self.logger.error("Unable to update Experiment Description: Read-only property.") raise AttributeError("Unable to update Experiment Description: Read-only property.") def _del_description(self): if self._description: self.logger.error("Unable to delete Experiment Description: Read-only property.") raise AttributeError("Unable to update Experiment Description: Read-only property.") description = property(_get_description, _set_description, _del_description) def __getstate__(self): space = self.__dict__.copy() del space['api'] del space['logger'] return space def __setstate__(self, space): self.__dict__ = space self.logger = logging.getLogger(__name__) self.api = API() def put_default_configuration(self, default_configuration: Configuration): if self._is_valid_configuration_instance(default_configuration): if not self.default_configuration: self.default_configuration = default_configuration self.api.send("default", "configuration", configurations=[default_configuration.get_parameters()], results=[default_configuration.get_average_result()]) if default_configuration not in self.all_configurations: self.all_configurations.append(default_configuration) self._calculate_current_best_configurations() else: raise ValueError("The default Configuration was registered already.") def add_configurations(self, configurations: List[Configuration]): """Takes the List of Configuration objects and adds it to Experiment state. :param configurations: List of Configuration instances. """ for configuration in configurations: self._put(configuration) def _put(self, configuration_instance: Configuration): """ Takes instance of Configuration class and appends it to the list with all configuration instances. :param configuration_instance: Configuration class instance. """ if self._is_valid_configuration_instance(configuration_instance): if self.all_configurations is []: self._add_configuration_to_experiment(configuration_instance) else: is_exists = False for value in self.all_configurations: if value.get_parameters() == configuration_instance.get_parameters(): is_exists = True if not is_exists: self._add_configuration_to_experiment(configuration_instance) else: self.logger.warning("Attempt of adding Configuration that is already in Experiment: %s" % configuration_instance) def get_configuration_by_parameters(self, parameters): """ Returns the instance of Configuration class, which contains the concrete configuration, if configuration the exists :param parameters: list. Concrete experiment configuration shape - list, e.g. [2900.0, 32] :return: instance of Configuration class """ for configuration_instance in self.all_configurations: if configuration_instance.get_parameters() == parameters: return configuration_instance return None def get_final_report_and_result(self, repeater): # In case, if the model predicted the final point, that has less value, than the default, but there is # a point, that has less value, than the predicted point - report this point instead of predicted point. self.end_time = datetime.datetime.now() self.logger.info("\n\nFinal report:") self.logger.info("ALL MEASURED CONFIGURATIONS:\n") for configuration in self.all_configurations: self.logger.info(configuration) self.logger.info("Number of measured Configurations: %s" % len(self.all_configurations)) self.logger.info("Number of Tasks: %s" % repeater.performed_measurements) self.logger.info("Best found Configuration: %s" % self.get_current_solution()) self.logger.info("BRISE running time: %s" % str(self.get_running_time())) all_features = [] for configuration in self.all_configurations: all_features.append(configuration.get_parameters()) self.api.send('final', 'configuration', configurations=[self.get_current_solution().get_parameters()], results=[[round(self.get_current_solution().get_average_result()[0], 2)]], measured_points=[all_features], performed_measurements=[repeater.performed_measurements]) self.dump() # Store instance of Experiment self.summarize_results_to_file() self.write_csv() return self.current_best_configurations def get_current_status(self, serializable: bool = False): """ Returns current state of Experiment, including already elapsed time, currently found solution Configuration, default Configuration, Experiment description and all already evaluated Configurations. :param serializable: Boolean. Defines if returned structure should be serializable or not. If True - all Configuration objects will be transformed to their string representation. :return: Dict with following keys["Running time", "Best found Configuration", "Default configuration", "Experiment description", "Evaluated Configurations"] """ current_status = { "Running time": str(self.get_running_time()) if serializable else self.get_running_time(), "Best found Configuration": self.get_current_solution().__getstate__() if serializable else self.get_current_solution(), "Default configuration": self.default_configuration.__getstate__() if serializable else self.default_configuration, "Experiment description": self.description, "Evaluated Configurations": [conf.__getstate__() if serializable else conf for conf in self.all_configurations] } return current_status def get_current_solution(self): self._calculate_current_best_configurations() return self.current_best_configurations[0] def get_current_best_configurations(self): self._calculate_current_best_configurations() return self.current_best_configurations def _is_valid_configuration_instance(self, configuration_instance): if isinstance(configuration_instance, Configuration): return True else: self.logger.error('Current object is not a Configuration instance, but %s' % type(configuration_instance)) return False def _calculate_current_best_configurations(self): best_configuration = [self.all_configurations[0]] for configuration in self.all_configurations: if configuration.is_better_configuration(self.is_minimization(), best_configuration[0]): best_configuration = [configuration] self.current_best_configurations = best_configuration def _add_configuration_to_experiment(self, configuration: Configuration) -> None: """ Save configuration after passing all checks. This method also sends an update to API (front-end). :param configuration: Configuration object. :return: None """ self.all_configurations.append(configuration) self.api.send("new", "configuration", configurations=[configuration.get_parameters()], results=[configuration.get_average_result()]) self.logger.info("Adding to Experiment: %s" % configuration) def is_minimization(self): return self.description["General"]["isMinimizationExperiment"] def get_number_of_configurations_per_iteration(self): if "ConfigurationsPerIteration" in self.description["General"]: return self.description["General"]["ConfigurationsPerIteration"] else: return 0 def __generate_search_space(self): self.search_space = [list(configuration) for configuration in itertools.product(*self.description["DomainDescription"]["AllConfigurations"])] def dump(self, folder_path: str = 'Results/serialized/'): """ save instance of experiment class """ # Used to upload Experiment dump through web API os.environ["EXP_DUMP_NAME"] = self.name create_folder_if_not_exists(folder_path) file_name = '{}.pkl'.format(self.name) # write pickl with open(folder_path + file_name, 'wb') as output: pickle.dump(self, output, pickle.HIGHEST_PROTOCOL) self.logger.info("Saved experiment instance. Path: %s" % (folder_path + file_name)) def summarize_results_to_file(self, report_format: str = 'yaml', path: str = 'Results/'): """ Called before the BRISE proper termination. Aggregates current state of the Experiment and writes it as a json or yaml file. :param report_format: String. Format of output file, either 'yaml' or 'json'. :param path: String. Folder, where results should be stored. :return: self """ if report_format.lower() == "yaml": from yaml import safe_dump output_file_name = path + self.name + '.yaml' data = safe_dump(self.get_current_status(serializable=True), width=120, indent=4) elif report_format.lower() == "json": from json import dumps output_file_name = path + self.name + '.json' data = dumps(self.get_current_status(serializable=True), indent=4) else: raise TypeError("Wrong serialization format provided. Supported 'yaml' and 'json'.") create_folder_if_not_exists(path) with open(output_file_name, 'w') as output_file: output_file.write(data) self.logger.info("Results of the Experiment have been writen to file: %s" % output_file_name) return self def write_csv(self, path='Results/serialized/'): """save .csv file with main metrics of the experiment Args: final (bool, optional): Is the Experiment finished?. Defaults to False. """ search_space = 1 for dim in self.description['DomainDescription']['AllConfigurations']: search_space *= len(dim) data = dict({ 'model': self.description['ModelConfiguration']['ModelType'], 'default configuration': [' '.join( str(v) for v in self.default_configuration.get_parameters())], 'solution configuration': [' '.join( str(v) for v in self.get_current_solution().get_parameters())], 'default result': self.default_configuration.get_average_result()[0], 'solution result': self.get_current_solution().get_average_result()[0], 'number of measured configurations': len(self.all_configurations), 'search space coverage': str(round((len(self.all_configurations) / search_space) * 100)) + '%', 'number of repetitions': len(self.get_all_repetition_tasks()), 'execution time': (self.get_running_time()).seconds, 'repeater': self.description['Repeater']['Type'] }) file_path = '{0}{1}.csv'.format(path, self.name) keys = list(data.keys()) values = list(data.values()) with open(file_path, 'a') as csvFile: writer = csv.writer(csvFile) writer.writerow(keys) writer.writerow(values) self.logger.info("Saved csv file. Path: %s" % file_path) def get_name(self): return self.name def get_running_time(self): if self.end_time is self.start_time: return datetime.datetime.now() - self.start_time else: return self.end_time - self.start_time def get_all_repetition_tasks(self): """ List of results for all tasks that were received on workers Returns: [List] -- List with results for all atom-tasks """ all_tasks = [] result_key = self.description['TaskConfiguration']['ResultStructure'][0] for configuration in self.all_configurations: for task in configuration.get_tasks().values(): if 'result' in task: all_tasks.append(task['result'][result_key]) return all_tasks def get_number_of_measured_configurations(self): return len(self.all_configurations) def get_search_space_size(self): return len(self.search_space) def get_stop_condition_parameters(self): return self.description["StopCondition"] def get_selection_algorithm_parameters(self): return self.description["SelectionAlgorithm"]
class Experiment: def __init__(self, description: dict): """ Initialization of Experiment class Following fields are declared: self.measured_configurations - list of configuration instances shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]`` """ self.logger = logging.getLogger(__name__) self.api = API() # TODO: merge lists into a single one (https://github.com/dpukhkaiev/BRISEv2/pull/112#discussion_r371761149) self.evaluated_configurations: List[Configuration] = [] # repeater already evaluates these configurations self.measured_configurations: List[Configuration] = [] # the results for these configurations are already gotten self._default_configuration: Configuration = None self._description: dict = description self.end_time = self.start_time = datetime.datetime.now() # A unique ID that is used to differentiate Experiments by descriptions. self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest() self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.id}" # TODO MultiOpt: Currently we store only one solution configuration here, # but it was made as a possible Hook for multidimensional optimization. self.current_best_configurations: List[Configuration] = [] self.bad_configurations_number = 0 self.model_is_valid = False self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock() def _get_description(self): return deepcopy(self._description) def _set_description(self, description): if not self._description: self._description = description else: self.logger.error("Unable to update Experiment Description: Read-only property.") raise AttributeError("Unable to update Experiment Description: Read-only property.") def _del_description(self): if self._description: self.logger.error("Unable to delete Experiment Description: Read-only property.") raise AttributeError("Unable to update Experiment Description: Read-only property.") description = property(_get_description, _set_description, _del_description) def __getstate__(self): space = self.__dict__.copy() del space['api'] del space['logger'] del space['measured_conf_lock'] del space['evaluated_conf_lock'] return space def __setstate__(self, space): self.__dict__ = space self.logger = logging.getLogger(__name__) self.api = API() # for thread-safe adding value to relevant array; protection against duplicates configurations self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock() @property def default_configuration(self) -> Configuration: return self._default_configuration @default_configuration.setter def default_configuration(self, default_configuration: Configuration): if self._is_valid_configuration_instance(default_configuration): if not self._default_configuration: self._default_configuration = default_configuration self.api.send("default", "configuration", configurations=[default_configuration.hyperparameters], results=[default_configuration.results]) self.measured_configurations.append(default_configuration) if not self.current_best_configurations: self.current_best_configurations = [default_configuration] temp_msg = f"Added Default Configuration: {default_configuration}" self.logger.info(temp_msg) self.api.send('log', 'info', message=temp_msg) else: raise ValueError("The default Configuration was registered already.") def try_add_configuration(self, configuration: Configuration): """ Add a Configuration object to the Experiment, if the Configuration was now added previously. :param configuration: Configuration instance. :return bool flag, True if the Configuration was added to list of either measured or evaluated configurations, False if not. """ result = False if configuration.is_enabled: if self._try_put(configuration): # configuration will not be added to the Experiment if it is already there result = True return result def _try_put(self, configuration_instance: Configuration): """ Takes instance of Configuration class and appends it to the list with all configuration instances. :param configuration_instance: Configuration class instance. :return bool flag, is _put add configuration to any lists or not """ if self._is_valid_configuration_instance(configuration_instance): if configuration_instance.status == Configuration.Status.MEASURED: with self.measured_conf_lock: if configuration_instance not in self.measured_configurations: self._add_measured_configuration_to_experiment(configuration_instance) return True else: return False elif configuration_instance.status == Configuration.Status.EVALUATED: with self.evaluated_conf_lock: if configuration_instance not in self.evaluated_configurations: self._add_evaluated_configuration_to_experiment(configuration_instance) return True else: return False else: raise ValueError( f"Can not add Configuration with status {configuration_instance.status} to Experiment.") def get_any_configuration_by_parameters(self, hyperparameters: tuple) -> Union[None, Configuration]: """ Find and retrieve instance of Configuration that was previously added to Experiment by it's hyperparameters. :param hyperparameters: tuple. hyperparameters of desired Configuration. :return: instance of Configuration class or`None` if the Configuration instance was not found. """ for configuration_instance in self.measured_configurations: if configuration_instance.hyperparameters == hyperparameters: return configuration_instance for configuration_instance in self.evaluated_configurations: if configuration_instance.hyperparameters == hyperparameters: return configuration_instance return None def is_configuration_in_experiment(self, configuration: Configuration) -> bool: """ Check if provided Configuration already in Experiment. :param configuration: BRISE Configuration instance with required 'hyperparameters' property. :type configuration: Configuration :return: boolean True if Configuration was previously added to Experiment, False otherwise. :rtype bool """ found_config = self.get_any_configuration_by_parameters(configuration.hyperparameters) return True if found_config else False def is_configuration_evaluated(self, configuration): """ Check is the Configuration in the evaluated_configurations list or not. Could be used to filter out outdated (not added to current Experiment) Configurations. :param configuration: Configuration instance. :return: True if Configuration instance was previously added to the Experiment as those of False """ return configuration in self.evaluated_configurations def get_final_report_and_result(self, repeater): self.end_time = datetime.datetime.now() if self.measured_configurations: self.logger.info("\n\nFinal report:") self.logger.info("ALL MEASURED CONFIGURATIONS:\n") for configuration in self.measured_configurations: self.logger.info(configuration) self.logger.info("Number of measured Configurations: %s" % len(self.measured_configurations)) self.logger.info("Number of Tasks: %s" % repeater.performed_measurements) self.logger.info("Best found Configuration: %s" % self.get_current_solution()) self.logger.info("BRISE running time: %s" % str(self.get_running_time())) all_features = [] for configuration in self.measured_configurations: all_features.append(configuration.hyperparameters) self.dump() # Store instance of Experiment self.api.send('final', 'configuration', configurations=[self.get_current_solution().hyperparameters], results=[self.get_current_solution().results], measured_points=[all_features], performed_measurements=[repeater.performed_measurements]) return self.current_best_configurations else: self.logger.error('No configuration was measured. Please, check your Experiment Description.') def get_current_status(self, serializable: bool = False): """ Returns current state of Experiment, including already elapsed time, currently found solution Configuration, default Configuration, Experiment description and all already evaluated Configurations. :param serializable: Boolean. Defines if returned structure should be serializable or not. If True - all Configuration objects will be transformed to their string representation. :return: Dict with following keys["Running time", "Best found Configuration", "Default configuration", "Experiment description", "Evaluated Configurations"] """ current_status = { "Running time": str(self.get_running_time()) if serializable else self.get_running_time(), "Best found Configuration": self.get_current_solution().__getstate__() if serializable else self.get_current_solution(), "Default configuration": self.default_configuration.__getstate__() if serializable else self.default_configuration, "Experiment description": self.description, "Evaluated Configurations": [conf.__getstate__() if serializable else conf for conf in self.measured_configurations] } return current_status def get_current_solution(self) -> Configuration: return self.current_best_configurations[0] def _is_valid_configuration_instance(self, configuration_instance: Configuration) -> bool: if isinstance(configuration_instance, Configuration): return True else: self.logger.error('Current object is not a Configuration instance, but %s' % type(configuration_instance)) return False def _add_measured_configuration_to_experiment(self, configuration: Configuration) -> None: """ Save configuration after passing all checks. This method also sends an update to API (front-end). :param configuration: Configuration object. :return: None """ self.measured_configurations.append(configuration) if not self.current_best_configurations: # first soultion found self.current_best_configurations = [configuration] elif configuration.is_better_configuration(self.is_minimization(), self.current_best_configurations[0]): # new solution found self.current_best_configurations[0].warm_startup_info = {} self.current_best_configurations = [configuration] else: # this configuration did not improve the previous solution configuration.warm_startup_info = {} self.api.send("new", "configuration", configurations=[configuration.hyperparameters], results=[configuration.results]) self.logger.info("Adding to Experiment: %s" % configuration) def _add_evaluated_configuration_to_experiment(self, configuration: Configuration) -> None: """ Save configuration after passing all checks. :param configuration: Configuration object. :return: None """ self.evaluated_configurations.append(configuration) def is_minimization(self): return self.description["General"]["isMinimizationExperiment"] def dump(self, folder_path: str = 'Results/serialized/'): """ save instance of experiment class """ # Used to upload Experiment dump through web API os.environ["EXP_DUMP_NAME"] = self.name create_folder_if_not_exists(folder_path) file_name = '{}.pkl'.format(self.name) # write pickle with open(folder_path + file_name, 'wb') as output: pickle.dump(self, output, pickle.HIGHEST_PROTOCOL) self.logger.info("Saved experiment instance. Path: %s" % (folder_path + file_name)) def get_name(self): return self.name def get_running_time(self): if self.end_time is self.start_time: return datetime.datetime.now() - self.start_time else: return self.end_time - self.start_time def get_all_repetition_tasks(self): """ List of results for all tasks that were received on workers Returns: [List] -- List with results for all atom-tasks """ all_tasks = [] result_key = self.description['TaskConfiguration']['ResultStructure'][0] for configuration in self.measured_configurations: for task in configuration.get_tasks().values(): if 'result' in task: all_tasks.append(task['result'][result_key]) return all_tasks def get_number_of_measured_configurations(self): return len(self.measured_configurations) def get_stop_condition_parameters(self): return self.description["StopCondition"] def get_selection_algorithm_parameters(self): return self.description["SelectionAlgorithm"] def get_outlier_detectors_parameters(self): return self.description["OutliersDetection"] def increment_bad_configuration_number(self): self.bad_configurations_number = self.bad_configurations_number + 1 return self def get_bad_configuration_number(self): return self.bad_configurations_number def update_model_state(self, model_state: bool): self.model_is_valid = model_state def get_model_state(self) -> bool: return self.model_is_valid
class MainThread(threading.Thread): """ This class runs Main functionality in a separate thread, connected to the `default_configuration_results_queue` and `configurations results queue` as a consumer. """ class State(int, Enum): RUNNING = 0 SHUTTING_DOWN = 1 IDLE = 2 def __init__(self, experiment_setup: [Experiment, Hyperparameter] = None): """ The function for initializing main thread :param experiment_setup: fully initialized experiment, r.g from a POST request """ super(MainThread, self).__init__() self._is_interrupted = False self.conf_lock = threading.Lock() self._state = self.State.IDLE self.experiment_setup = experiment_setup self.sub = API() # front-end subscribers if __name__ == "__main__": self.logger = BRISELogConfigurator().get_logger(__name__) else: self.logger = logging.getLogger(__name__) self.experiment: Experiment = None self.connection: pika.BlockingConnection = None self.consume_channel = None self.predictor: Predictor = None self.wsc_client: WSClient = None self.repeater: RepeaterOrchestration = None self.database: MongoDB = None def run(self): """ The entry point to the main node functionality - measuring default Configuration. When the default Configuration finishes its evaluation, the first set of Configurations will be sampled for evaluation (respectively, the queues for Configuration measurement results initialize). """ self._state = self.State.RUNNING self.logger.info("Starting BRISE") self.sub.send('log', 'info', message="Starting BRISE") if not self.experiment_setup: # Check if main.py running with a specified experiment description file path if len(argv) > 1: exp_desc_file_path = argv[1] else: exp_desc_file_path = './Resources/EnergyExperiment/EnergyExperiment.json' log_msg = f"The Experiment Setup was not provided and the path to an experiment file was not specified." \ f" The default one will be executed: {exp_desc_file_path}" self.logger.warning(log_msg) self.sub.send('log', 'warning', message=log_msg) experiment_description, search_space = load_experiment_setup( exp_desc_file_path) else: experiment_description = self.experiment_setup[ "experiment_description"] search_space = self.experiment_setup["search_space"] validate_experiment_description(experiment_description) os.makedirs(experiment_description["General"]["results_storage"], exist_ok=True) # Initializing instance of Experiment - main data holder. self.experiment = Experiment(experiment_description, search_space) search_space.experiment_id = self.experiment.unique_id Configuration.set_task_config( self.experiment.description["TaskConfiguration"]) # initialize connection to rabbitmq service self.connection = pika.BlockingConnection( pika.ConnectionParameters( os.getenv("BRISE_EVENT_SERVICE_HOST"), int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT")))) self.consume_channel = self.connection.channel() # initialize connection to the database self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"), int(os.getenv("BRISE_DATABASE_PORT")), os.getenv("BRISE_DATABASE_NAME"), os.getenv("BRISE_DATABASE_USER"), os.getenv("BRISE_DATABASE_PASS")) # write initial settings to the database self.database.write_one_record( "Experiment_description", self.experiment.get_experiment_description_record()) self.database.write_one_record( "Search_space", get_search_space_record(self.experiment.search_space, self.experiment.unique_id)) self.experiment.send_state_to_db() self.sub.send( 'experiment', 'description', global_config=self.experiment.description["General"], experiment_description=self.experiment.description, searchspace_description=self.experiment.search_space.serialize( True)) self.logger.debug( "Experiment description and global configuration sent to the API.") # Create and launch Stop Condition services in separate threads. launch_stop_condition_threads(self.experiment.unique_id) # Instantiate client for Worker Service, establish connection. self.wsc_client = WSClient( self.experiment.description["TaskConfiguration"], os.getenv("BRISE_EVENT_SERVICE_HOST"), int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT"))) # Initialize Repeater - encapsulate Configuration evaluation process to avoid results fluctuations. # (achieved by multiple Configuration evaluations on Workers - Tasks) RepeaterOrchestration(self.experiment) self.predictor: Predictor = Predictor(self.experiment.unique_id, self.experiment.description, self.experiment.search_space) self.consume_channel.basic_consume( queue='default_configuration_results_queue', auto_ack=True, on_message_callback=self.get_default_configurations_results) self.consume_channel.basic_consume( queue='configurations_results_queue', auto_ack=True, on_message_callback=self.get_configurations_results) self.consume_channel.basic_consume(queue='stop_experiment_queue', auto_ack=True, on_message_callback=self.stop) self.consume_channel.basic_consume( queue="get_new_configuration_queue", auto_ack=True, on_message_callback=self.send_new_configurations_to_measure) self.default_config_handler = get_default_config_handler( self.experiment) temp_msg = "Measuring default Configuration." self.logger.info(temp_msg) self.sub.send('log', 'info', message=temp_msg) default_parameters = self.experiment.search_space.generate_default() default_configuration = Configuration(default_parameters, Configuration.Type.DEFAULT, self.experiment.unique_id) default_configuration.experiment_id = self.experiment.unique_id dictionary_dump = {"configuration": default_configuration.to_json()} body = json.dumps(dictionary_dump) self.consume_channel.basic_publish( exchange='', routing_key='measure_new_configuration_queue', body=body) # listen all queues with responses until the _is_interrupted flag is False try: while not self._is_interrupted: self.consume_channel.connection.process_data_events( time_limit=1) # 1 second finally: if self.connection.is_open: self.connection.close() def get_default_configurations_results(self, ch, method, properties, body): """ Callback function for the result of default configuration :param ch: pika.Channel :param method: pika.spec.Basic.GetOk :param properties: pika.spec.BasicProperties :param body: result of measuring default configuration in bytes format """ default_configuration = Configuration.from_json(body.decode()) if default_configuration.status == Configuration.Status.BAD: new_default_values = self.default_config_handler.get_new_default_config( ) if new_default_values: config = Configuration(new_default_values, Configuration.Type.FROM_SELECTOR, self.experiment.unique_id) temp_msg = "New default configuration sampled." self.logger.info(temp_msg) self.sub.send('log', 'info', message=temp_msg) self.consume_channel.basic_publish( exchange='', routing_key='measure_new_configuration_queue', body=json.dumps({"configuration": config.to_json()})) else: self.logger.error( "The specified default configuration is broken.") self.stop() self.sub.send( 'log', 'info', message="The specified default configuration is broken.") return if self.experiment.is_configuration_evaluated(default_configuration): self.experiment.default_configuration = default_configuration self.database.update_record( "Search_space", {"Exp_unique_ID": self.experiment.unique_id}, { "Default_configuration": default_configuration.get_configuration_record() }) self.database.update_record( "Search_space", {"Exp_unique_ID": self.experiment.unique_id}, { "SearchspaceObject": pickle.dumps( self.experiment.search_space) }) temp_msg = f"Evaluated Default Configuration: {default_configuration}" self.logger.info(temp_msg) self.sub.send('log', 'info', message=temp_msg) # starting main work: building model and choosing configuration for measuring self.consume_channel.basic_publish( exchange='', routing_key='get_worker_capacity_queue', body='') def get_configurations_results(self, ch, method, properties, body): """ Callback function for the result of all Configurations except Default :param ch: pika.Channel :param method: pika.spec.Basic.GetOk :param properties: pika.spec.BasicProperties :param body: result of measuring any configuration except default in bytes format """ with self.conf_lock: # To be sure, that no Configuration will be added after satisfying all Stop Conditions. configuration = Configuration.from_json(body.decode()) if not self._is_interrupted and self.experiment.is_configuration_evaluated( configuration): self.experiment.try_add_configuration(configuration) temp_msg = "-- New Configuration was evaluated. Building Target System model." self.logger.info(temp_msg) self.sub.send('log', 'info', message=temp_msg) self.consume_channel.basic_publish( exchange='', routing_key='get_worker_capacity_queue', body='') def send_new_configurations_to_measure(self, ch, method, properties, body): """ This callback function will be triggered on arrival of ONE measured Configuration. When there is new measured Configuration, following steps should be done: - update and validate models; - pick either by model, or by selection algorithm new Configuration(s) for evaluation; Note: The amount of new Configurations are: - 0 if number of available Worker nodes decreased; - 1 if number of available Workers did not change; - N + 1 if number of available Worker increased by N; - send new Configuration to Repeater for evaluation. """ needed_configs = json.loads(body.decode()).get("worker_capacity", 1) for _ in range(needed_configs): config = self.predictor.predict( self.experiment.measured_configurations) if config not in self.experiment.evaluated_configurations: temp_msg = f"Model predicted {config}." else: while config in self.experiment.evaluated_configurations and not self._is_interrupted: if len(self.experiment.evaluated_configurations ) >= self.experiment.search_space.get_size(): msg = "Entire Search Space was evaluated. Shutting down." self.logger.warning(msg) self.consume_channel.basic_publish( exchange='', routing_key='stop_experiment_queue', body=msg) break new_parameter_values = OrderedDict() while not self.experiment.search_space.validate( new_parameter_values, is_recursive=True): self.experiment.search_space.generate( new_parameter_values) config = Configuration(new_parameter_values, Configuration.Type.FROM_SELECTOR, self.experiment.unique_id) temp_msg = f"Fully randomly sampled {config}." self.logger.info(temp_msg) self.sub.send('log', 'info', message=temp_msg) self.consume_channel.basic_publish( exchange='', routing_key='measure_new_configuration_queue', body=json.dumps({"configuration": config.to_json()})) def stop(self, ch=None, method=None, properties=None, body=None): """ The function for stop main thread externally (e.g. from front-end) """ with self.conf_lock: self.consume_channel.basic_publish( exchange='brise_termination_sender', routing_key='', body='') self.sub.send('log', 'info', message=f"Terminating experiment. Reason: {body}") self._state = self.State.SHUTTING_DOWN self._is_interrupted = True optimal_configuration = self.experiment.get_final_report_and_result( ) self._state = self.State.IDLE return optimal_configuration def get_state(self): return self._state