def download_dump_request_queue(self, channel, method, properties, body): """ RPC function that returns a base64 encoded dump of the latest experiment. body['format']: specifies file extension of dump result["status"]: contains a status of a response, "ok" or "error" result["body"]: contains a base64 encoded dump result["file_name"]: contains a file name """ body = json.loads(body) result = {"status": None, "body": None, "file_name": None} dump_name = os.environ.get('EXP_DUMP_NAME') try: if dump_name == 'undefined': result["status"] = "missing experiment file" API().send("log", "error", message=result["body"]) else: filename = f"{dump_name}.{body['format']}" with open(filename, "rb") as file: result["status"] = "ok" result["body"] = str(base64.b64encode(file.read()), "utf-8") result["file_name"] = f"{dump_name}.{body['format']}" except Exception as error: result[ "status"] = 'Download dump file of the experiment: %s' % error API().send("log", "error", message=result["status"]) self.channel.basic_publish( exchange='', routing_key=properties.reply_to, properties=pika.BasicProperties( correlation_id=properties.correlation_id), body=json.dumps(result)) self.channel.basic_ack(delivery_tag=method.delivery_tag)
def test_14_singleton_front_api(self): # Test #14. Try to create 2 instances of the API class # Expected result: only a single instance exists (due to the singleton) API._instance = None api1 = API() api2 = API() assert api1 is api2
def test_15_singleton_front_api_different_objects(self): # Test #15. Try to create 2 instances of the API class, initialized with different api objects # Expected result: only a single instance exists (due to the singleton) API._instance = None api1 = API() api2 = API(api_object=RabbitApi("event-service", 49153)) assert api1 is api2
def validate_experiment_description(experiment_description: dict, schema_file_path: str = './Resources/schema/experiment.schema.json'): """ Performs validation and raises error if provided Experiment Description does not pass the validation according to the schema :param experiment_description: Dict. Experiment Description. :param schema_file_path: :return: """ logger = logging.getLogger(__name__) validity_check = is_json_file_valid(validated_data=experiment_description, schema_path=schema_file_path) uniqueness_check = get_duplicated_sc_names(experiment_description) presence_check = get_missing_sc_entities(experiment_description) if not validity_check: msg = f"Provided Experiment Description has not passed the validation using schema in file {schema_file_path}." logger.error(msg) API().send('log', 'error', message=msg) if uniqueness_check: msg = f"Some Stop Condition instances are duplicated: {uniqueness_check}." logger.error(msg) API().send('log', 'error', message=msg) if presence_check: msg = f"Some Stop Conditions defined in Stop Condition Trigger Logic expression are missing: {presence_check}." logger.error(msg) API().send('log', 'error', message=msg) if validity_check and not uniqueness_check and not presence_check: logger.info("Provided Experiment Description is valid.") else: msg = "Some errors caused during validation. Please, check the Experiment Description." raise ValueError(msg)
def __init__(self, description: dict): """ Initialization of Experiment class Following fields are declared: self.measured_configurations - list of configuration instances shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]`` """ self.logger = logging.getLogger(__name__) self.api = API() # TODO: merge lists into a single one (https://github.com/dpukhkaiev/BRISEv2/pull/112#discussion_r371761149) self.evaluated_configurations: List[Configuration] = [] # repeater already evaluates these configurations self.measured_configurations: List[Configuration] = [] # the results for these configurations are already gotten self._default_configuration: Configuration = None self._description: dict = description self.end_time = self.start_time = datetime.datetime.now() # A unique ID that is used to differentiate Experiments by descriptions. self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest() self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.id}" # TODO MultiOpt: Currently we store only one solution configuration here, # but it was made as a possible Hook for multidimensional optimization. self.current_best_configurations: List[Configuration] = [] self.bad_configurations_number = 0 self.model_is_valid = False self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock()
def __init__(self, description: dict): """ Initialization of Experiment class Following fields are declared: self.all_configurations - list of configuration instances shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]`` self.description - description of the current experiment, it is taken from .json file shape - dict with subdicts """ self.logger = logging.getLogger(__name__) self.api = API() self.default_configuration = [] self.all_configurations = [] self._description = description self.search_space = [] self.end_time = self.start_time = datetime.datetime.now() # A unique ID that is used to differentiate an Experiments by descriptions. self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest() self.name = "exp_{task_name}_{experiment_hash}".format( task_name=self.description["TaskConfiguration"]["TaskName"], experiment_hash=self.id) self.current_best_configurations = [] self.__generate_search_space()
def __init__(self, log_file_name, experiment): """ Initialization of regression model :param log_file_name: - string, location of file, which will store results of model creation :param experiment: instance of Experiment class """ self.logger = logging.getLogger(__name__) # Send updates to subscribers self.sub = API() # Model configuration - related fields. self.minimal_test_size = experiment.description["ModelConfiguration"][ "minimalTestingSize"] self.maximal_test_size = experiment.description["ModelConfiguration"][ "maximalTestingSize"] self.log_file_name = log_file_name # Built model - related fields. self.model = None self.minimum_model_accuracy = experiment.description[ "ModelConfiguration"]["MinimumAccuracy"] self.built_model_accuracy = 0 self.built_model_test_size = 0.0 # Data holding fields. self.experiment = experiment self.all_configurations = []
def __setstate__(self, space): self.__dict__ = space self.logger = logging.getLogger(__name__) self.api = API() # for thread-safe adding value to relevant array; protection against duplicates configurations self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock()
def __init__(self, experiment, min_points_in_model=None, top_n_percent=30, num_samples=96, random_fraction=1 / 3, bandwidth_factor=3, min_bandwidth=1e-3, **kwargs): self.model = None self.top_n_percent = top_n_percent self.experiment = experiment self.isMinimizationExperiment = experiment.is_minimization() self.bw_factor = bandwidth_factor self.min_bandwidth = min_bandwidth if "logger" not in dir(self): self.logger = logging.getLogger(__name__) self.sub = API() if min_points_in_model is None: self.min_points_in_model = len( self.experiment.description["DomainDescription"] ["AllConfigurations"]) + 1 elif min_points_in_model < len(self.experiment.description[ "DomainDescription"]["AllConfigurations"]) + 1: self.logger.warning( 'Invalid min_points_in_model value. Setting it to %i' % (len(self.experiment.description["DomainDescription"] ["AllConfigurations"]) + 1)) self.min_points_in_model = len( self.experiment.description["DomainDescription"] ["AllConfigurations"]) + 1 self.num_samples = num_samples self.random_fraction = random_fraction hps = self.experiment.description["DomainDescription"][ "AllConfigurations"] self.kde_vartypes = "" self.vartypes = [] for h in hps: self.kde_vartypes += 'u' self.vartypes += [len(h)] self.vartypes = np.array(self.vartypes, dtype=int) # store precomputed probs for the categorical parameters self.cat_probs = [] # Data holding fields. self.all_configurations = [] self.good_config_rankings = dict()
def disable_configuration(self): """ Disable configuration. This configuration won't be used in experiment. """ if self.is_enabled: self.is_enabled = False temp_msg = f"Configuration {self} was disabled. It will not be added to the Experiment." self.logger.warning(temp_msg) API().send('log', 'warning', message=temp_msg)
def test_20_correct_api_object(self): # Test #20. Try to create an instance of the API class, with expected api object # Expected result: an instance is created class GoodAPI: def emit(self, message_type: str, message_subtype: str, message: str): pass API._instance = None test_api = API(GoodAPI()) assert isinstance(test_api, API)
def test_16_api_without_emit(self): # Test #16. Try to create an instance of the API class, with api object without emit() method # Expected result: AttributeError is thrown, informing about the requirements to the api object class BadAPI: def no_emit(self): pass expected_result = "Provided API object doesn't contain 'emit()' method" # cleanup API._instance = None with pytest.raises(AttributeError) as excinfo: API(BadAPI()) assert expected_result in str(excinfo.value)
def test_19_api_with_missing_emit_parameters(self): # Test #19. Try to create an instance of the API class, with missing api object emit() parameters # Expected result: AttributeError is thrown, informing about the requirements to the api object class BadAPI: def emit(self, message_type: str, message_subtype: str): pass expected_result = ( "Provided API object has unsupported 'emit()' method." "Its parameters do not correspond to the required!" "Expected parameters are: 'message_type: str', 'message_subtype: str', 'message: str'" ) # cleanup API._instance = None with pytest.raises(AttributeError) as excinfo: API(BadAPI()) assert expected_result in str(excinfo.value)
def validate_experiment_data(experiment_data: dict, schema_file_path: str = './Resources/schema/experiment_data.schema.json'): """ Performs validation and raises error if provided Experiment Data does not pass the validation according to the schema :param experiment_data: Dict. Experiment Data. :param schema_file_path: :return: """ logger = logging.getLogger(__name__) if is_json_file_valid(validated_data=experiment_data, schema_path=schema_file_path): logger.info("Provided Experiment Data is valid.") else: msg = "Provided Experiment Data has not passed the validation using schema in file %s. " \ "Experiment data: \n%s" % (schema_file_path, experiment_data) logger.error(msg) API().send('log', 'error', message=msg) raise ValueError(msg)
def create_folder_if_not_exists(folderPath): """ Method create folder if it don't exist. :param folderPath: sting path to folder, could include filename. :return: true if create folder or it exist """ logger = logging.getLogger(__name__) try: dir_path = path.dirname(folderPath) if dir_path: if not path.exists(path.dirname(folderPath)): makedirs(path.dirname(folderPath)) return True except IOError as error: msg = "Unable to create folder %s. Error information: %s" % ( folderPath, e) logger.error(msg, exc_info=True) API().send("log", "error", message=msg) raise error
def test_18_api_with_unexpected_emit_parameter_names(self, caplog): # Test #18. Try to create an instance of the API class, with wrong api object emit() parameters' names # Expected result: object is created, but user is warned about the advisable parameters' names import logging caplog.set_level(logging.WARNING) class BadAPI: def emit(self, dummy: str, message_subtype: str, message: str): pass expected_result = ( "Parameter names of the emit() method are untypical for your API object." "It is advisable to check emit() parameters." "Expected parameters are: 'message_type', 'message_subtype', 'message'" ) # cleanup API._instance = None API(BadAPI()) for record in caplog.records: assert record.levelname == "WARNING" assert expected_result in str(record)
def __init__(self, description: dict, search_space: Hyperparameter): """ Initialization of Experiment class Following fields are declared: self.measured_configurations - list of configuration instances shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]`` self.description - description of the current experiment, it is taken from .json file shape - dict with subdicts """ self.logger = logging.getLogger(__name__) self.api = API() self.evaluated_configurations: List[Configuration] = [ ] # repeater already evaluates these configurations self.measured_configurations: List[Configuration] = [ ] # the results for these configurations are already received self._default_configuration: Configuration = None self._description: Mapping = description self.search_space: Hyperparameter = search_space self.end_time = self.start_time = datetime.datetime.now() # An ID that is used to differentiate Experiments by descriptions. self.ed_id = hashlib.sha1( json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest() # A unique ID, different for every experiment (even with the same description) self.unique_id = str(uuid.uuid4()) self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.ed_id}" self.current_best_configurations: List[Configuration] = [] self.bad_configurations_number = 0 self.model_is_valid = False self.measured_conf_lock = Lock() self.evaluated_conf_lock = Lock() # initialize connection to the database self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"), os.getenv("BRISE_DATABASE_PORT"), os.getenv("BRISE_DATABASE_NAME"), os.getenv("BRISE_DATABASE_USER"), os.getenv("BRISE_DATABASE_PASS"))
def load_json_file(path_to_file): """ Method reads .json file :param path_to_file: sting path to file. :return: object that represent .json file """ logger = logging.getLogger(__name__) front_api = API() try: with open(path_to_file, 'r') as File: jsonFile = json.loads(File.read()) return jsonFile except IOError as error: msg = "Unable to read a json file '%s'. Error information: %s" % (path_to_file, error) logger.error(msg, exc_info=True) front_api.send('log', 'error', message=msg) raise error except json.JSONDecodeError as error: msg = "Unable to decode a json file '%s'. Error information: %s" % (path_to_file, error) logger.error(msg, exc_info=True) front_api.send('log', 'error', message=msg) raise error
def __init__(self, experiment_setup: [Experiment, Hyperparameter] = None): """ The function for initializing main thread :param experiment_setup: fully initialized experiment, r.g from a POST request """ super(MainThread, self).__init__() self._is_interrupted = False self.conf_lock = threading.Lock() self._state = self.State.IDLE self.experiment_setup = experiment_setup self.sub = API() # front-end subscribers if __name__ == "__main__": self.logger = BRISELogConfigurator().get_logger(__name__) else: self.logger = logging.getLogger(__name__) self.experiment: Experiment = None self.connection: pika.BlockingConnection = None self.consume_channel = None self.predictor: Predictor = None self.wsc_client: WSClient = None self.repeater: RepeaterOrchestration = None self.database: MongoDB = None
def measure_configurations(self, configurations: list, experiment: Experiment): """ Evaluates the Target System using specific Configuration while results of Evaluation will not be precise. :param configurations: list of Configurations that are needed to be measured. :param experiment: instance of 'experiment' is required for model-awareness. :return: list of Configurations that were evaluated """ # Removing previous measurements current_measurement = {} # Creating holders for current measurements for configuration in configurations: # Evaluating each Configuration in configurations list needed_tasks_count = self.evaluation_by_type( configuration, experiment) current_measurement[str(configuration.get_parameters())] = { 'parameters': configuration.get_parameters(), 'needed_tasks_count': needed_tasks_count, 'Finished': False } if needed_tasks_count == 0: current_measurement[str( configuration.get_parameters())]['Finished'] = True current_measurement[str(configuration.get_parameters( ))]['Results'] = configuration.get_average_result() # Continue to feed with a new Tasks while not passing the evaluation. while True: # Selecting only that configurations that were not finished. tasks_to_send = [] for point in current_measurement.keys(): if not current_measurement[point]['Finished']: for i in range( current_measurement[point]['needed_tasks_count']): tasks_to_send.append( current_measurement[point]['parameters']) self.performed_measurements += 1 if not tasks_to_send: return configurations # Send this configurations to Worker service results = self.worker_service_client.work(tasks_to_send) # Sending data to API and adding Tasks to Configuration for parameters, result in zip(tasks_to_send, results): for config in configurations: if config.get_parameters() == parameters: config.add_tasks(parameters, result) API().send('new', 'task', configurations=[parameters], results=[result]) # Evaluating each Configuration in configurations list for configuration in configurations: needed_tasks_count = self.evaluation_by_type( configuration, experiment) current_measurement[str(configuration.get_parameters( ))]['needed_tasks_count'] = needed_tasks_count if needed_tasks_count == 0: current_measurement[str( configuration.get_parameters())]['Finished'] = True current_measurement[str(configuration.get_parameters( ))]['Results'] = configuration.get_average_result()
socketio_logger.setLevel(logging.WARNING) engineio_logger.setLevel(logging.WARNING) socketIO = socketio.Server(ping_timeout=300, logger=socketio_logger, engineio_logger=engineio_logger) # instance of Flask app app = Flask(__name__, static_url_path='', template_folder="static") CORS(app) app.wsgi_app = socketio.WSGIApp(socketIO, app.wsgi_app) # WebSocket app.config['SECRET_KEY'] = 'galamaga' # Initialize the API singleton API(api_object=socketIO) MAIN_PROCESS = None data_header = {'version': '1.0'} # add clients in room front_clients = [] # --- START @app.route('/main_start', methods=['GET', 'POST']) def main_process_start(): """ Verifies that Main-node is not running. If free - creates new process with socketio instance and starts it. It ensures that for each run of main.py you will use only one socketio.
def __setstate__(self, space): self.__dict__ = space self.logger = logging.getLogger(__name__) self.api = API()
def measure_configurations(self, channel, method, properties, body): """ Callback function for the result of measuring :param ch: pika.Channel :param method: pika.spec.Basic.GetOk :param properties: pika.spec.BasicProperties :param body: result of a configurations in bytes format """ if os.environ.get('TEST_MODE') == 'UNIT_TEST': result = json.loads(body) else: result = json.loads(body.decode()) configuration = Configuration.from_json(result["configuration"]) if configuration.status != Configuration.Status.NEW and os.environ.get( 'TEST_MODE') != 'UNIT_TEST': tasks_to_send = result["tasks_to_send"] tasks_results = result["tasks_results"] for index, objective in enumerate(self._objectives): tasks_results = error_check(tasks_results, objective, self._expected_values_range[index], self._objectives_data_types[index]) if self.experiment.description["OutliersDetection"]["isEnabled"]: tasks_results = self.outlier_detectors.find_outliers_for_taskset( tasks_results, self._objectives, [configuration], tasks_to_send) # Sending data to API and adding Tasks to Configuration for parameters, task in zip(tasks_to_send, tasks_results): if configuration.parameters == parameters: if configuration.is_valid_task(task): configuration.add_task(task) if os.environ.get('TEST_MODE') != 'UNIT_TEST': self.database.write_one_record( "Tasks", configuration.get_task_record(task)) else: configuration.increase_failed_tasks_number() API().send('new', 'task', configurations=[parameters], results=[task]) # Evaluating configuration if configuration.number_of_failed_tasks <= self.repeater_parameters[ 'MaxFailedTasksPerConfiguration']: needed_tasks_count = self.evaluation_by_type(configuration) else: needed_tasks_count = 0 configuration.status = Configuration.Status.BAD if len(configuration.get_tasks()) == 0: self.experiment.increment_bad_configuration_number() configuration.disable_configuration() current_measurement = { str(configuration.parameters): { 'parameters': configuration.parameters, 'needed_tasks_count': needed_tasks_count, 'Finished': False } } if needed_tasks_count == 0: current_measurement[str( configuration.parameters)]['Finished'] = True current_measurement[str( configuration.parameters)]['Results'] = configuration.results tasks_to_send = [] for point in current_measurement.keys(): if not current_measurement[point]['Finished']: for i in range( current_measurement[point]['needed_tasks_count']): tasks_to_send.append( current_measurement[point]['parameters']) self.performed_measurements += 1 if os.environ.get('TEST_MODE') != 'UNIT_TEST': self.database.write_one_record( "Repeater_measurements", self.get_repeater_measurements_record()) if os.environ.get('TEST_MODE') == 'UNIT_TEST': return configuration, needed_tasks_count elif configuration.status == Configuration.Status.MEASURED or configuration.status == Configuration.Status.BAD: conn_params = pika.ConnectionParameters(host=self.event_host, port=int(self.event_port)) with pika.BlockingConnection(conn_params) as connection: with connection.channel() as channel: try: if configuration.type == Configuration.Type.DEFAULT: self._type = self.get_repeater() channel.basic_publish( exchange='', routing_key= 'default_configuration_results_queue', body=configuration.to_json()) elif configuration.type == Configuration.Type.PREDICTED or \ configuration.type == Configuration.Type.FROM_SELECTOR: channel.basic_publish( exchange='', routing_key='configurations_results_queue', body=configuration.to_json()) except pika.exceptions.ChannelWrongStateError as err: if not channel.is_open: self.logger.warning( "Attempt to send a message after closing the connection" ) else: raise err elif configuration.status == Configuration.Status.EVALUATED or \ configuration.status == Configuration.Status.REPEATED_MEASURING: conn_params = pika.ConnectionParameters(host=self.event_host, port=int(self.event_port)) with pika.BlockingConnection(conn_params) as connection: with connection.channel() as channel: body = json.dumps({ "configuration": configuration.to_json(), "tasks": tasks_to_send }) channel.basic_publish(exchange='', routing_key='process_tasks_queue', body=body)
class TestFrontApi: # this test set is aimed to cover the functionality of the 'front_API' tools and the 'singleton' api = API() def test_0_build_log_message(self): # Test #0. Build log message according to the API message format # Expected result: the message of type 'string' is built independently on its content expected_result = "This is log" msg = "This is log" actual_result = APIMessageBuilder.build('LOG', message=msg) assert actual_result == expected_result def test_1_build_log_message_of_multiple_parts(self): # Test #1. Build log message according to the API message format. Message consists of several parts # Expected result: the single message of type 'string' is built by concatenation expected_result = "This is log" msg1 = "This is " msg2 = "log" actual_result = APIMessageBuilder.build('LOG', message1=msg1, message2=msg2) assert actual_result == expected_result def test_2_build_empty_log_message(self): # Test #2. Build log message according to the API message format. Message is empty # Expected result: the single empty message of type 'string' is built expected_result = "" actual_result = APIMessageBuilder.build('LOG') assert actual_result == expected_result def test_3_build_new_task_message(self): # Test #3. Build task message according to the API message format. The 'result' is an empty list # Expected result: the task message of type 'list' is built. It contains the "configuration-result" pair as a dictionary. expected_result = [{'configurations': [2200.0, 8], 'results': None}] configurations = [[2200.0, 8]] actual_result = APIMessageBuilder.build('NEW', configurations=configurations, results=[None]) assert actual_result == expected_result def test_4_build_task_message_with_result(self): # Test #4. Build task message according to the API message format. Use different supported message types # Expected result: the task message of type 'list' is built. It contains the "configuration-result" pair as a dictionary. expected_result = [{'configurations': [2200.0, 8], 'results': 123}] configurations = [[2200.0, 8]] results = [123] message_types = ['DEFAULT', 'PREDICTIONS', 'FINAL'] for message_type in message_types: actual_result = APIMessageBuilder.build( message_type, configurations=configurations, results=results) assert actual_result == expected_result def test_5_build_invalid_task_message(self): # Test #5. Build task message with the invalid values according to the API message format # Expected result: error is raised, that invalid parameters are passed expected_result = ( "Invalid parameters passed to send message via API: " "The configurations(key parameter) are not provided or have invalid format!" ) with pytest.raises(KeyError) as excinfo: APIMessageBuilder.build('NEW', configurations="Invalid string", results="Invalid string") assert expected_result in str(excinfo.value) def test_6_build_incomplete_task_message(self): # Test #6. Build task message with incomplete parameters (without 'results') # Expected result: error is raised configurations = [[2200.0, 8]] expected_result = ( "Invalid parameters passed to send message via API: " "The results(key parameter) are not provided or have invalid format!" ) with pytest.raises(KeyError) as excinfo: APIMessageBuilder.build('NEW', configurations=configurations) assert expected_result in str(excinfo.value) def test_7_build_inconsistent_task_message(self): # Test #7. Build task message with inconsistent dimensionality of 'configurations' and 'results' # Expected result: error is raised configurations = [[2200.0, 8]] results = [123, 234] expected_result = "Different sizes of provided parameters!" with pytest.raises(KeyError) as excinfo: APIMessageBuilder.build('NEW', configurations=configurations, results=results) assert expected_result in str(excinfo.value) def test_8_build_experiment_message(self): # Test #8. Build experiment message according to the API message format # Expected result: the message of type 'dictionary' is built and contains all needed fields from tools.initial_config import load_experiment_setup experiment_description, searchspace_description = \ load_experiment_setup("./Resources/EnergyExperiment/EnergyExperiment.json") global_config = experiment_description["General"] actual_result = APIMessageBuilder.build( 'EXPERIMENT', global_config=global_config, experiment_description=experiment_description, searchspace_description=searchspace_description) assert actual_result["global_configuration"] assert actual_result["experiment_description"] assert actual_result["searchspace_description"] def test_9_build_incomplete_experiment_message(self): # Test #9. Build experiment message from incomplete parameters # Expected result: error is raised from tools.initial_config import load_experiment_setup expected_result = "Invalid parameters passed to send message via API: The search space description is not provided!" experiment_description, _ = load_experiment_setup( "./Resources/EnergyExperiment/EnergyExperiment.json") global_config = experiment_description["General"] with pytest.raises(KeyError) as excinfo: APIMessageBuilder.build( 'EXPERIMENT', global_config=global_config, experiment_description=experiment_description) assert expected_result in str(excinfo.value) def test_10_send_valid_message(self): # Test #10. Send messages of all supported types and subtypes via the dummy API # Expected result: no errors are arised, sending is simulated expected_type = "log" expected_payload = "This is msg" msg = "This is msg" type_subtype = {} type_subtype["LOG"] = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] for msg_type in type_subtype: for msg_subtype in type_subtype[msg_type]: expected_subtype = msg_subtype.lower() actual_result = TestFrontApi.api.send(msg_type, msg_subtype, message=msg) assert actual_result[0][0] == expected_type assert actual_result[0][1] == expected_subtype assert actual_result[0][2] == expected_payload def test_11_send_unknown_type_of_msg(self, caplog): # Test #11. Try to send unknown (unsupported) type of message # Expected result: error is raised, that message type is not supported expected_result = "Message type is not supported!" msg = "This is msg" TestFrontApi.api.send('DUMMY', 'INFO', message=msg) for record in caplog.records: assert record.levelname == "ERROR" assert expected_result in str(record) def test_12_send_unknown_subtype_of_msg(self, caplog): # Test #12. Try to send unknown (unsupported) subtype of message # Expected result: error is raised, that message subtype is not supported expected_result = "Message subtype is not supported!" msg = "This is msg" msg_types = [ "LOG", "NEW", "DEFAULT", "PREDICTIONS", "FINAL", "EXPERIMENT" ] for msg_type in msg_types: TestFrontApi.api.send(msg_type, 'dummy', message=msg) for record in caplog.records: assert record.levelname == "ERROR" assert expected_result in str(record) def test_13_send_invalid_format_of_message(self, caplog): # Test #13. Try to send supported type of message, but specified in unexpected format # Expected result: error is raised expected_result = "Wrong API message type object!" msg = "This is msg" TestFrontApi.api.send(['LOG'], 'dummy', message=msg) for record in caplog.records: assert record.levelname == "ERROR" assert expected_result in str(record) def test_14_singleton_front_api(self): # Test #14. Try to create 2 instances of the API class # Expected result: only a single instance exists (due to the singleton) API._instance = None api1 = API() api2 = API() assert api1 is api2 def test_15_singleton_front_api_different_objects(self): # Test #15. Try to create 2 instances of the API class, initialized with different api objects # Expected result: only a single instance exists (due to the singleton) API._instance = None api1 = API() api2 = API(api_object=RabbitApi("event-service", 49153)) assert api1 is api2 def test_16_api_without_emit(self): # Test #16. Try to create an instance of the API class, with api object without emit() method # Expected result: AttributeError is thrown, informing about the requirements to the api object class BadAPI: def no_emit(self): pass expected_result = "Provided API object doesn't contain 'emit()' method" # cleanup API._instance = None with pytest.raises(AttributeError) as excinfo: API(BadAPI()) assert expected_result in str(excinfo.value) def test_17_api_with_wrong_emit_parameter_types(self): # Test #17. Try to create an instance of the API class, with wrong api object emit() parameters' types # Expected result: AttributeError is thrown, informing about the requirements to the api object class BadAPI: def emit(self, message_type: dict, message_subtype: dict, message: dict): pass expected_result = ( "Provided API object has unsupported 'emit()' method." "Its parameters do not correspond to the required!" "Expected parameters are: 'message_type: str', 'message_subtype: str', 'message: str'" ) # cleanup API._instance = None with pytest.raises(AttributeError) as excinfo: API(BadAPI()) assert expected_result in str(excinfo.value) def test_18_api_with_unexpected_emit_parameter_names(self, caplog): # Test #18. Try to create an instance of the API class, with wrong api object emit() parameters' names # Expected result: object is created, but user is warned about the advisable parameters' names import logging caplog.set_level(logging.WARNING) class BadAPI: def emit(self, dummy: str, message_subtype: str, message: str): pass expected_result = ( "Parameter names of the emit() method are untypical for your API object." "It is advisable to check emit() parameters." "Expected parameters are: 'message_type', 'message_subtype', 'message'" ) # cleanup API._instance = None API(BadAPI()) for record in caplog.records: assert record.levelname == "WARNING" assert expected_result in str(record) def test_19_api_with_missing_emit_parameters(self): # Test #19. Try to create an instance of the API class, with missing api object emit() parameters # Expected result: AttributeError is thrown, informing about the requirements to the api object class BadAPI: def emit(self, message_type: str, message_subtype: str): pass expected_result = ( "Provided API object has unsupported 'emit()' method." "Its parameters do not correspond to the required!" "Expected parameters are: 'message_type: str', 'message_subtype: str', 'message: str'" ) # cleanup API._instance = None with pytest.raises(AttributeError) as excinfo: API(BadAPI()) assert expected_result in str(excinfo.value) def test_20_correct_api_object(self): # Test #20. Try to create an instance of the API class, with expected api object # Expected result: an instance is created class GoodAPI: def emit(self, message_type: str, message_subtype: str, message: str): pass API._instance = None test_api = API(GoodAPI()) assert isinstance(test_api, API)
import pickle import time from threading import Thread import pika # USER from logger.default_logger import BRISELogConfigurator from main import MainThread from tools.front_API import API from tools.rabbit_API_class import RabbitApi logger = BRISELogConfigurator().get_logger(__name__) # from tools.main_mock import run as main_run # Initialize the API singleton API(api_object=RabbitApi(os.getenv("BRISE_EVENT_SERVICE_HOST"), os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT"))) class ConsumerThread(Thread): """ This class runs in a separate thread and handles requests client nodes (e.g. benchmark, front-end), connected to the `main_start_queue`, `main_status_queue`, `main_stop_queue`, `main_download_dump_queue`, produces results in specified queue with specified tag, works as server part of RPC """ def __init__(self, host, port, *args, **kwargs): super(ConsumerThread, self).__init__(*args, **kwargs) self._host = host self._port = port self.connection = pika.BlockingConnection( pika.ConnectionParameters(host=self._host, port=self._port))