def measure_task(configurations_sample: list, tasks_sample: list, experiment_description: dict, search_space: Hyperparameter, measured_tasks: int, config_type: Configuration.Type, config_status: Configuration.Status): """ Test function for Repeater module. Main steps: 0. Take default tasks sample. 1. Create instances of Repeater, Experiment, Default Configuration according to test requirements. 2. Create instance of current measurement. 3. Call Repeater function. :param configurations_sample: default sample of measured configurations :param tasks_sample: default sample of measured tasks :param experiment_description: Experiment Description sample in json format :param search_space: Search Space sample :param measured_tasks: specify number of measured tasks in current measurement. :param config_type: specify current measurement configuration type. :param config_status: specify current measurement configuration status. :return: list of configuration status and number of tasks to measure. """ experiment = Experiment(experiment_description, search_space) Configuration.set_task_config(experiment.description["TaskConfiguration"]) configuration = Configuration(configurations_sample[1]["Params"], config_type, experiment.unique_id) configuration.status = config_status for i in range(0, measured_tasks): configuration.add_task(tasks_sample[i]) orchestrator = RepeaterOrchestration(experiment) if config_type == Configuration.Type.DEFAULT: orchestrator._type = orchestrator.get_repeater(True) else: orchestrator._type = orchestrator.get_repeater() default_configuration = Configuration( configurations_sample[0]["Params"], Configuration.Type.DEFAULT, experiment.unique_id) default_configuration.status = Configuration.Status.MEASURED default_configuration._task_number = configurations_sample[0]["Tasks"] default_configuration.results = configurations_sample[0]["Results"] default_configuration._standard_deviation = configurations_sample[0][ "STD"] experiment.default_configuration = default_configuration task = json.dumps({"configuration": configuration.to_json()}) dummy_channel = None dummy_method = None dummy_properties = None results_measurement = orchestrator.measure_configurations( dummy_channel, dummy_method, dummy_properties, task) return results_measurement
def compute_avg_results_over_configurations( configurations: List[Configuration]) -> List[float]: # Make sure, that all Configurations are the same points in a search space: assert all([ config.parameters == configurations[0].parameters for config in configurations ]) tasks = BRISEBenchmarkAnalyser.collect_tasks_from_configurations( configurations) previous_task_configuration = Configuration.TaskConfiguration Configuration.set_task_config(configurations[0].TaskConfiguration) tmp_configuration = Configuration(configurations[0].parameters, Configuration.Type.TEST) tmp_configuration.add_tasks(task=tasks) result = tmp_configuration.get_average_result() Configuration.set_task_config(previous_task_configuration) return result
def run(self): """ The entry point to the main node functionality - measuring default Configuration. When the default Configuration finishes its evaluation, the first set of Configurations will be sampled for evaluation (respectively, the queues for Configuration measurement results initialize). """ self._state = self.State.RUNNING self.logger.info("Starting BRISE") self.sub.send('log', 'info', message="Starting BRISE") if not self.experiment_setup: # Check if main.py running with a specified experiment description file path if len(argv) > 1: exp_desc_file_path = argv[1] else: exp_desc_file_path = './Resources/EnergyExperiment/EnergyExperiment.json' log_msg = f"The Experiment Setup was not provided and the path to an experiment file was not specified." \ f" The default one will be executed: {exp_desc_file_path}" self.logger.warning(log_msg) self.sub.send('log', 'warning', message=log_msg) experiment_description, search_space = load_experiment_setup( exp_desc_file_path) else: experiment_description = self.experiment_setup[ "experiment_description"] search_space = self.experiment_setup["search_space"] validate_experiment_description(experiment_description) os.makedirs(experiment_description["General"]["results_storage"], exist_ok=True) # Initializing instance of Experiment - main data holder. self.experiment = Experiment(experiment_description, search_space) search_space.experiment_id = self.experiment.unique_id Configuration.set_task_config( self.experiment.description["TaskConfiguration"]) # initialize connection to rabbitmq service self.connection = pika.BlockingConnection( pika.ConnectionParameters( os.getenv("BRISE_EVENT_SERVICE_HOST"), int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT")))) self.consume_channel = self.connection.channel() # initialize connection to the database self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"), int(os.getenv("BRISE_DATABASE_PORT")), os.getenv("BRISE_DATABASE_NAME"), os.getenv("BRISE_DATABASE_USER"), os.getenv("BRISE_DATABASE_PASS")) # write initial settings to the database self.database.write_one_record( "Experiment_description", self.experiment.get_experiment_description_record()) self.database.write_one_record( "Search_space", get_search_space_record(self.experiment.search_space, self.experiment.unique_id)) self.experiment.send_state_to_db() self.sub.send( 'experiment', 'description', global_config=self.experiment.description["General"], experiment_description=self.experiment.description, searchspace_description=self.experiment.search_space.serialize( True)) self.logger.debug( "Experiment description and global configuration sent to the API.") # Create and launch Stop Condition services in separate threads. launch_stop_condition_threads(self.experiment.unique_id) # Instantiate client for Worker Service, establish connection. self.wsc_client = WSClient( self.experiment.description["TaskConfiguration"], os.getenv("BRISE_EVENT_SERVICE_HOST"), int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT"))) # Initialize Repeater - encapsulate Configuration evaluation process to avoid results fluctuations. # (achieved by multiple Configuration evaluations on Workers - Tasks) RepeaterOrchestration(self.experiment) self.predictor: Predictor = Predictor(self.experiment.unique_id, self.experiment.description, self.experiment.search_space) self.consume_channel.basic_consume( queue='default_configuration_results_queue', auto_ack=True, on_message_callback=self.get_default_configurations_results) self.consume_channel.basic_consume( queue='configurations_results_queue', auto_ack=True, on_message_callback=self.get_configurations_results) self.consume_channel.basic_consume(queue='stop_experiment_queue', auto_ack=True, on_message_callback=self.stop) self.consume_channel.basic_consume( queue="get_new_configuration_queue", auto_ack=True, on_message_callback=self.send_new_configurations_to_measure) self.default_config_handler = get_default_config_handler( self.experiment) temp_msg = "Measuring default Configuration." self.logger.info(temp_msg) self.sub.send('log', 'info', message=temp_msg) default_parameters = self.experiment.search_space.generate_default() default_configuration = Configuration(default_parameters, Configuration.Type.DEFAULT, self.experiment.unique_id) default_configuration.experiment_id = self.experiment.unique_id dictionary_dump = {"configuration": default_configuration.to_json()} body = json.dumps(dictionary_dump) self.consume_channel.basic_publish( exchange='', routing_key='measure_new_configuration_queue', body=body) # listen all queues with responses until the _is_interrupted flag is False try: while not self._is_interrupted: self.consume_channel.connection.process_data_events( time_limit=1) # 1 second finally: if self.connection.is_open: self.connection.close()