예제 #1
0
    def download_dump_request_queue(self, channel, method, properties, body):
        """
       RPC function that returns a base64 encoded dump of the latest experiment.
       body['format']: specifies file extension of dump

       result["status"]: contains a status of a response, "ok" or "error"
       result["body"]: contains a base64 encoded dump
       result["file_name"]: contains a file name
       """
        body = json.loads(body)
        result = {"status": None, "body": None, "file_name": None}
        dump_name = os.environ.get('EXP_DUMP_NAME')
        try:
            if dump_name == 'undefined':
                result["status"] = "missing experiment file"
                API().send("log", "error", message=result["body"])
            else:
                filename = f"{dump_name}.{body['format']}"
                with open(filename, "rb") as file:
                    result["status"] = "ok"
                    result["body"] = str(base64.b64encode(file.read()),
                                         "utf-8")
                    result["file_name"] = f"{dump_name}.{body['format']}"
        except Exception as error:
            result[
                "status"] = 'Download dump file of the experiment: %s' % error
            API().send("log", "error", message=result["status"])
        self.channel.basic_publish(
            exchange='',
            routing_key=properties.reply_to,
            properties=pika.BasicProperties(
                correlation_id=properties.correlation_id),
            body=json.dumps(result))
        self.channel.basic_ack(delivery_tag=method.delivery_tag)
예제 #2
0
    def __init__(self, log_file_name, experiment):
        """
        Initialization of regression model
        :param log_file_name: - string, location of file, which will store results of model creation
        :param experiment: instance of Experiment class
        """
        self.logger = logging.getLogger(__name__)
        # Send updates to subscribers
        self.sub = API()

        # Model configuration - related fields.
        self.minimal_test_size = experiment.description["ModelConfiguration"][
            "minimalTestingSize"]
        self.maximal_test_size = experiment.description["ModelConfiguration"][
            "maximalTestingSize"]
        self.log_file_name = log_file_name

        # Built model - related fields.
        self.model = None
        self.minimum_model_accuracy = experiment.description[
            "ModelConfiguration"]["MinimumAccuracy"]
        self.built_model_accuracy = 0
        self.built_model_test_size = 0.0

        # Data holding fields.
        self.experiment = experiment
        self.all_configurations = []
예제 #3
0
def validate_experiment_description(experiment_description: dict,
                                    schema_file_path: str = './Resources/schema/experiment.schema.json'):
    """
    Performs validation and raises error if provided Experiment Description does not pass the validation
        according to the schema
    :param experiment_description: Dict. Experiment Description.
    :param schema_file_path:
    :return:
    """
    logger = logging.getLogger(__name__)
    validity_check = is_json_file_valid(validated_data=experiment_description, schema_path=schema_file_path)
    uniqueness_check = get_duplicated_sc_names(experiment_description)
    presence_check = get_missing_sc_entities(experiment_description)
    if not validity_check:
        msg = f"Provided Experiment Description has not passed the validation using schema in file {schema_file_path}."
        logger.error(msg)
        API().send('log', 'error', message=msg)
    if uniqueness_check:
        msg = f"Some Stop Condition instances are duplicated: {uniqueness_check}."
        logger.error(msg)
        API().send('log', 'error', message=msg)
    if presence_check:
        msg = f"Some Stop Conditions defined in Stop Condition Trigger Logic expression are missing: {presence_check}."
        logger.error(msg)
        API().send('log', 'error', message=msg)

    if validity_check and not uniqueness_check and not presence_check:
        logger.info("Provided Experiment Description is valid.")
    else:
        msg = "Some errors caused during validation. Please, check the Experiment Description."
        raise ValueError(msg)
예제 #4
0
 def test_15_singleton_front_api_different_objects(self):
     # Test #15. Try to create 2 instances of the API class, initialized with different api objects
     # Expected result: only a single instance exists (due to the singleton)
     API._instance = None
     api1 = API()
     api2 = API(api_object=RabbitApi("event-service", 49153))
     assert api1 is api2
예제 #5
0
    def __init__(self, description: dict):
        """
        Initialization of Experiment class
        Following fields are declared:

        self.all_configurations - list of configuration instances
                                  shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]``
        self.description - description of the current experiment, it is taken from .json file
                           shape - dict with subdicts
        """
        self.logger = logging.getLogger(__name__)
        self.api = API()

        self.default_configuration = []
        self.all_configurations = []
        self._description = description
        self.search_space = []
        self.end_time = self.start_time = datetime.datetime.now()
        # A unique ID that is used to differentiate an Experiments by descriptions.
        self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest()
        self.name = "exp_{task_name}_{experiment_hash}".format(
            task_name=self.description["TaskConfiguration"]["TaskName"],
            experiment_hash=self.id)
        self.current_best_configurations = []

        self.__generate_search_space()
예제 #6
0
 def test_14_singleton_front_api(self):
     # Test #14. Try to create 2 instances of the API class
     # Expected result: only a single instance exists (due to the singleton)
     API._instance = None
     api1 = API()
     api2 = API()
     assert api1 is api2
    def __init__(self, description: dict):
        """
        Initialization of Experiment class
        Following fields are declared:
        self.measured_configurations - list of configuration instances
                                  shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]``
        """
        self.logger = logging.getLogger(__name__)
        self.api = API()

        # TODO: merge lists into a single one (https://github.com/dpukhkaiev/BRISEv2/pull/112#discussion_r371761149)
        self.evaluated_configurations: List[Configuration] = []  # repeater already evaluates these configurations
        self.measured_configurations: List[Configuration] = [] # the results for these configurations are already gotten
        self._default_configuration: Configuration = None
        self._description: dict = description
        self.end_time = self.start_time = datetime.datetime.now()
        # A unique ID that is used to differentiate Experiments by descriptions.
        self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest()
        self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.id}"
        # TODO MultiOpt: Currently we store only one solution configuration here,
        #  but it was made as a possible Hook for multidimensional optimization.
        self.current_best_configurations: List[Configuration] = []
        self.bad_configurations_number = 0
        self.model_is_valid = False

        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()
예제 #8
0
    def __setstate__(self, space):
        self.__dict__ = space
        self.logger = logging.getLogger(__name__)
        self.api = API()

        # for thread-safe adding value to relevant array; protection against duplicates configurations
        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()
예제 #9
0
    def __init__(self,
                 experiment,
                 min_points_in_model=None,
                 top_n_percent=30,
                 num_samples=96,
                 random_fraction=1 / 3,
                 bandwidth_factor=3,
                 min_bandwidth=1e-3,
                 **kwargs):

        self.model = None
        self.top_n_percent = top_n_percent

        self.experiment = experiment
        self.isMinimizationExperiment = experiment.is_minimization()

        self.bw_factor = bandwidth_factor
        self.min_bandwidth = min_bandwidth

        if "logger" not in dir(self):
            self.logger = logging.getLogger(__name__)
        self.sub = API()

        if min_points_in_model is None:
            self.min_points_in_model = len(
                self.experiment.description["DomainDescription"]
                ["AllConfigurations"]) + 1
        elif min_points_in_model < len(self.experiment.description[
                "DomainDescription"]["AllConfigurations"]) + 1:
            self.logger.warning(
                'Invalid min_points_in_model value. Setting it to %i' %
                (len(self.experiment.description["DomainDescription"]
                     ["AllConfigurations"]) + 1))
            self.min_points_in_model = len(
                self.experiment.description["DomainDescription"]
                ["AllConfigurations"]) + 1

        self.num_samples = num_samples
        self.random_fraction = random_fraction

        hps = self.experiment.description["DomainDescription"][
            "AllConfigurations"]

        self.kde_vartypes = ""
        self.vartypes = []

        for h in hps:
            self.kde_vartypes += 'u'
            self.vartypes += [len(h)]

        self.vartypes = np.array(self.vartypes, dtype=int)

        # store precomputed probs for the categorical parameters
        self.cat_probs = []

        # Data holding fields.
        self.all_configurations = []
        self.good_config_rankings = dict()
예제 #10
0
 def disable_configuration(self):
     """
     Disable configuration. This configuration won't be used in experiment.
     """
     if self.is_enabled:
         self.is_enabled = False
         temp_msg = f"Configuration {self} was disabled. It will not be added to the Experiment."
         self.logger.warning(temp_msg)
         API().send('log', 'warning', message=temp_msg)
예제 #11
0
    def test_20_correct_api_object(self):
        # Test #20. Try to create an instance of the API class, with expected api object
        # Expected result: an instance is created
        class GoodAPI:
            def emit(self, message_type: str, message_subtype: str,
                     message: str):
                pass

        API._instance = None
        test_api = API(GoodAPI())
        assert isinstance(test_api, API)
예제 #12
0
    def __init__(self, description: dict, search_space: Hyperparameter):
        """
        Initialization of Experiment class
        Following fields are declared:
        self.measured_configurations - list of configuration instances
                                  shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]``
        self.description - description of the current experiment, it is taken from .json file
                           shape - dict with subdicts
        """
        self.logger = logging.getLogger(__name__)
        self.api = API()

        self.evaluated_configurations: List[Configuration] = [
        ]  # repeater already evaluates these configurations
        self.measured_configurations: List[Configuration] = [
        ]  # the results for these configurations are already received
        self._default_configuration: Configuration = None
        self._description: Mapping = description
        self.search_space: Hyperparameter = search_space
        self.end_time = self.start_time = datetime.datetime.now()
        # An ID that is used to differentiate Experiments by descriptions.
        self.ed_id = hashlib.sha1(
            json.dumps(self.description,
                       sort_keys=True).encode("utf-8")).hexdigest()
        # A unique ID, different for every experiment (even with the same description)
        self.unique_id = str(uuid.uuid4())
        self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.ed_id}"
        self.current_best_configurations: List[Configuration] = []
        self.bad_configurations_number = 0
        self.model_is_valid = False

        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()

        # initialize connection to the database
        self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"),
                                os.getenv("BRISE_DATABASE_PORT"),
                                os.getenv("BRISE_DATABASE_NAME"),
                                os.getenv("BRISE_DATABASE_USER"),
                                os.getenv("BRISE_DATABASE_PASS"))
예제 #13
0
    def test_16_api_without_emit(self):
        # Test #16. Try to create an instance of the API class, with api object without emit() method
        # Expected result: AttributeError is thrown, informing about the requirements to the api object
        class BadAPI:
            def no_emit(self):
                pass

        expected_result = "Provided API object doesn't contain 'emit()' method"
        # cleanup
        API._instance = None
        with pytest.raises(AttributeError) as excinfo:
            API(BadAPI())
        assert expected_result in str(excinfo.value)
예제 #14
0
    def __init__(self, experiment_setup: [Experiment, Hyperparameter] = None):
        """
        The function for initializing main thread
        :param experiment_setup: fully initialized experiment, r.g from a POST request
        """
        super(MainThread, self).__init__()
        self._is_interrupted = False
        self.conf_lock = threading.Lock()
        self._state = self.State.IDLE
        self.experiment_setup = experiment_setup

        self.sub = API()  # front-end subscribers
        if __name__ == "__main__":
            self.logger = BRISELogConfigurator().get_logger(__name__)
        else:
            self.logger = logging.getLogger(__name__)

        self.experiment: Experiment = None
        self.connection: pika.BlockingConnection = None
        self.consume_channel = None
        self.predictor: Predictor = None
        self.wsc_client: WSClient = None
        self.repeater: RepeaterOrchestration = None
        self.database: MongoDB = None
예제 #15
0
    def test_19_api_with_missing_emit_parameters(self):
        # Test #19. Try to create an instance of the API class, with missing api object emit() parameters
        # Expected result: AttributeError is thrown, informing about the requirements to the api object
        class BadAPI:
            def emit(self, message_type: str, message_subtype: str):
                pass

        expected_result = (
            "Provided API object has unsupported 'emit()' method."
            "Its parameters do not correspond to the required!"
            "Expected parameters are: 'message_type: str', 'message_subtype: str', 'message: str'"
        )
        # cleanup
        API._instance = None
        with pytest.raises(AttributeError) as excinfo:
            API(BadAPI())
        assert expected_result in str(excinfo.value)
예제 #16
0
def create_folder_if_not_exists(folderPath):
    """
    Method create folder if it don't exist.
    :param folderPath: sting path to folder, could include filename.
    :return: true if create folder or it exist
    """
    logger = logging.getLogger(__name__)
    try:
        dir_path = path.dirname(folderPath)
        if dir_path:
            if not path.exists(path.dirname(folderPath)):
                makedirs(path.dirname(folderPath))
        return True
    except IOError as error:
        msg = "Unable to create folder %s. Error information: %s" % (
            folderPath, e)
        logger.error(msg, exc_info=True)
        API().send("log", "error", message=msg)
        raise error
예제 #17
0
def validate_experiment_data(experiment_data: dict,
                             schema_file_path: str = './Resources/schema/experiment_data.schema.json'):
    """
    Performs validation and raises error if provided Experiment Data does not pass the validation
        according to the schema
    :param experiment_data: Dict. Experiment Data.
    :param schema_file_path:
    :return:
    """
    logger = logging.getLogger(__name__)

    if is_json_file_valid(validated_data=experiment_data, schema_path=schema_file_path):
        logger.info("Provided Experiment Data is valid.")
    else:
        msg = "Provided Experiment Data has not passed the validation using schema in file %s. " \
              "Experiment data: \n%s" % (schema_file_path, experiment_data)
        logger.error(msg)
        API().send('log', 'error', message=msg)
        raise ValueError(msg)
예제 #18
0
    def test_18_api_with_unexpected_emit_parameter_names(self, caplog):
        # Test #18. Try to create an instance of the API class, with wrong api object emit() parameters' names
        # Expected result: object is created, but user is warned about the advisable parameters' names
        import logging
        caplog.set_level(logging.WARNING)

        class BadAPI:
            def emit(self, dummy: str, message_subtype: str, message: str):
                pass

        expected_result = (
            "Parameter names of the emit() method are untypical for your API object."
            "It is advisable to check emit() parameters."
            "Expected parameters are: 'message_type', 'message_subtype', 'message'"
        )
        # cleanup
        API._instance = None
        API(BadAPI())
        for record in caplog.records:
            assert record.levelname == "WARNING"
            assert expected_result in str(record)
예제 #19
0
def load_json_file(path_to_file):
    """
    Method reads .json file
    :param path_to_file: sting path to file.
    :return: object that represent .json file
    """
    logger = logging.getLogger(__name__)
    front_api = API()
    try:
        with open(path_to_file, 'r') as File:
            jsonFile = json.loads(File.read())
            return jsonFile
    except IOError as error:
        msg = "Unable to read a json file '%s'. Error information: %s" % (path_to_file, error)
        logger.error(msg, exc_info=True)
        front_api.send('log', 'error', message=msg)
        raise error
    except json.JSONDecodeError as error:
        msg = "Unable to decode a json file '%s'. Error information: %s" % (path_to_file, error)
        logger.error(msg, exc_info=True)
        front_api.send('log', 'error', message=msg)
        raise error
예제 #20
0
파일: repeater.py 프로젝트: ITS-Zah/BRISE2
    def measure_configurations(self, configurations: list,
                               experiment: Experiment):
        """
        Evaluates the Target System using specific Configuration while results of Evaluation will not be precise.
        :param configurations: list of Configurations that are needed to be measured.
        :param experiment: instance of 'experiment' is required for model-awareness.
        :return: list of Configurations that were evaluated
        """
        # Removing previous measurements
        current_measurement = {}
        # Creating holders for current measurements
        for configuration in configurations:
            # Evaluating each Configuration in configurations list
            needed_tasks_count = self.evaluation_by_type(
                configuration, experiment)
            current_measurement[str(configuration.get_parameters())] = {
                'parameters': configuration.get_parameters(),
                'needed_tasks_count': needed_tasks_count,
                'Finished': False
            }

            if needed_tasks_count == 0:
                current_measurement[str(
                    configuration.get_parameters())]['Finished'] = True
                current_measurement[str(configuration.get_parameters(
                ))]['Results'] = configuration.get_average_result()

        # Continue to feed with a new Tasks while not passing the evaluation.
        while True:

            # Selecting only that configurations that were not finished.
            tasks_to_send = []
            for point in current_measurement.keys():
                if not current_measurement[point]['Finished']:
                    for i in range(
                            current_measurement[point]['needed_tasks_count']):
                        tasks_to_send.append(
                            current_measurement[point]['parameters'])
                        self.performed_measurements += 1

            if not tasks_to_send:
                return configurations

            # Send this configurations to Worker service
            results = self.worker_service_client.work(tasks_to_send)

            # Sending data to API and adding Tasks to Configuration
            for parameters, result in zip(tasks_to_send, results):
                for config in configurations:
                    if config.get_parameters() == parameters:
                        config.add_tasks(parameters, result)

                API().send('new',
                           'task',
                           configurations=[parameters],
                           results=[result])

            # Evaluating each Configuration in configurations list
            for configuration in configurations:
                needed_tasks_count = self.evaluation_by_type(
                    configuration, experiment)
                current_measurement[str(configuration.get_parameters(
                ))]['needed_tasks_count'] = needed_tasks_count
                if needed_tasks_count == 0:
                    current_measurement[str(
                        configuration.get_parameters())]['Finished'] = True
                    current_measurement[str(configuration.get_parameters(
                    ))]['Results'] = configuration.get_average_result()
예제 #21
0
class RegressionSweetSpot(Model):
    def __init__(self, log_file_name, experiment):
        """
        Initialization of regression model
        :param log_file_name: - string, location of file, which will store results of model creation
        :param experiment: instance of Experiment class
        """
        self.logger = logging.getLogger(__name__)
        # Send updates to subscribers
        self.sub = API()

        # Model configuration - related fields.
        self.minimal_test_size = experiment.description["ModelConfiguration"][
            "minimalTestingSize"]
        self.maximal_test_size = experiment.description["ModelConfiguration"][
            "maximalTestingSize"]
        self.log_file_name = log_file_name

        # Built model - related fields.
        self.model = None
        self.minimum_model_accuracy = experiment.description[
            "ModelConfiguration"]["MinimumAccuracy"]
        self.built_model_accuracy = 0
        self.built_model_test_size = 0.0

        # Data holding fields.
        self.experiment = experiment
        self.all_configurations = []

    def build_model(self, degree=6, tries=20):
        """
        Tries to build the new regression model.

        :param degree: Int. scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html
        :param tries: Int. Number of tries to build the model in each step of decreasing test size.
        :return: Boolean. True if the model was successfully built, otherwise - False.
        """

        # Building model
        cur_accuracy = 0.99
        best_got = -10e10
        best_model = None
        while cur_accuracy > self.minimum_model_accuracy:
            current_test_size = self.maximal_test_size
            while current_test_size > self.minimal_test_size:
                for x in range(tries):
                    feature_train, feature_test, target_train, target_test = self.resplit_data(
                        current_test_size)
                    model = Pipeline([
                        ('poly',
                         PolynomialFeatures(degree=degree,
                                            interaction_only=False)),
                        ('reg', Ridge())
                    ])
                    model.fit(feature_train, target_train)
                    score_measured = model.score(feature_test, target_test)

                    if score_measured > best_got:
                        best_got = score_measured
                        best_model = model
                        self.logger.info(
                            'GOT NEW ACCURACY: %s with %s test size and %s accuracy threshold '
                            % (round(score_measured,
                                     3), round(current_test_size,
                                               2), round(cur_accuracy, 2)))

                if best_got > cur_accuracy:
                    self.model = best_model
                    self.built_model_accuracy = best_got
                    self.built_model_test_size = current_test_size
                    self.logger.info(
                        "Regression model built with %s test size and %s accuracy."
                        % (current_test_size, best_got))
                    return True
                else:
                    current_test_size -= 0.01
            cur_accuracy -= 0.01
        self.logger.info(
            "Unable to build model, current best accuracy: %s need more data.."
            % best_got)
        return False

    def validate_model(self, degree=6):
        """
        Return True, if the model have built, and False, if the model can not build or the model already exists
        :param degree:
        :return: True or False
        """
        # Check if model was built.
        if not self.model:
            return False

        # Check if the model is adequate - write it.
        predicted_configuration = self.predict_next_configurations(1)
        predicted_labels = predicted_configuration[0].predicted_result
        if predicted_labels[0] >= 0:
            f = open(self.log_file_name, "a")
            f.write("Search space::\n")
            f.write(str(self.experiment.search_space) + "\n")
            f.write("Testing size = " + str(self.built_model_test_size) + "\n")
            for i in range(degree + 1):
                if i == 0:
                    f.write("(TR ^ 0) * (FR ^ 0) = " +
                            str(self.model.named_steps['reg'].coef_[i]) + "\n")
                else:
                    for j in range(i + 1):
                        f.write("(TR ^ " + str(i - j) + ") * (FR ^ " + str(j) +
                                ") = " +
                                str(self.model.named_steps['reg'].coef_[0][
                                    self.sum_fact(i) + j]) + "\n")
            f.write("R^2 = " + str(self.built_model_accuracy) + "\n")
            f.write("Intercept = " +
                    str(self.model.named_steps['reg'].intercept_) + "\n")
            f.close()
            self.logger.info("Built model is valid.")
            self.sub.send('log', 'info', message="Built model is valid")
            return True
        else:
            self.logger.info(
                "Predicted energy lower than 0: %s. Need more data.." %
                predicted_labels[0])
            self.sub.send(
                'log',
                'info',
                message="Predicted energy lower than 0: %s. Need more data.." %
                predicted_labels[0])
            return False

    def predict_next_configurations(self, amount):
        """
        Takes features, using previously created model makes regression to find labels and return label with the lowest value.
        :param amount: int number of Configurations which will be returned
        :return: list of Configurations that are needed to be measured.
        """
        # 1. get model's predictions
        predicted_results = []
        for index, predicted_result in sorted(enumerate(
                self.model.predict(self.experiment.search_space)),
                                              key=lambda c: c[1]):
            conf = self.experiment.search_space[index]
            predicted_results.append((predicted_result, conf))

        # Only for DEMO
        # self.sub.send('predictions', 'configurations',
        #               configurations=[self.experiment.search_space[index] for (predicted_result, index) in predicted_results],
        #               results=[[round(predicted_result[0], 2)] for (predicted_result, index) in predicted_results])

        # 2. Update predicted results for already evaluated Configurations.
        for config in self.all_configurations:
            for pred_tuple in predicted_results:
                if (pred_tuple[1] == config.get_parameters()):
                    config.add_predicted_result(pred_tuple[1], pred_tuple[0])

        # 3. Pick up requared amount of configs
        all_config = [
            conf.get_parameters() for conf in self.all_configurations
        ]
        result = []
        for best in predicted_results[:amount]:
            if best[1] in all_config:
                select = [
                    conf for conf in self.all_configurations
                    if conf.get_parameters() == best[1]
                ]
                result.append(select[0])
            else:
                new_conf = Configuration(best[1])
                new_conf.add_predicted_result(best[1], best[0])
                result.append(new_conf)

        # 4. return configs
        return result

    def resplit_data(self, test_size):
        """
        Just recreates subsets of features and labels for training and testing from existing features and labels.
        :param test_size: Float. Indicates the amount of data that will be used to test the model.
        :return: None
        """
        all_features = []
        all_labels = []
        for configuration in self.all_configurations:
            all_features.append(configuration.get_parameters())
            all_labels.append(configuration.get_average_result())

        feature_train, feature_test, target_train, target_test = \
            model_selection.train_test_split(all_features, all_labels, test_size=test_size)

        return feature_train, feature_test, target_train, target_test

    @staticmethod
    def sum_fact(num):
        """
        Return the sum of all numbers from 1 till 'num'
        :param num: int
        :return:
        """
        return reduce(lambda x, y: x + y, list(range(1, num + 1)))

    def update_data(self, configurations):
        """
        Method adds configurations to whole set of configurations.

        :param configurations: List of Configuration's instances
        :return: self
        """
        self.all_configurations = configurations
        return self
예제 #22
0
    def measure_configurations(self, channel, method, properties, body):
        """
        Callback function for the result of measuring
        :param ch: pika.Channel
        :param method:  pika.spec.Basic.GetOk
        :param properties: pika.spec.BasicProperties
        :param body: result of a configurations in bytes format
        """
        if os.environ.get('TEST_MODE') == 'UNIT_TEST':
            result = json.loads(body)
        else:
            result = json.loads(body.decode())
        configuration = Configuration.from_json(result["configuration"])
        if configuration.status != Configuration.Status.NEW and os.environ.get(
                'TEST_MODE') != 'UNIT_TEST':
            tasks_to_send = result["tasks_to_send"]
            tasks_results = result["tasks_results"]
            for index, objective in enumerate(self._objectives):
                tasks_results = error_check(tasks_results, objective,
                                            self._expected_values_range[index],
                                            self._objectives_data_types[index])
            if self.experiment.description["OutliersDetection"]["isEnabled"]:
                tasks_results = self.outlier_detectors.find_outliers_for_taskset(
                    tasks_results, self._objectives, [configuration],
                    tasks_to_send)

            # Sending data to API and adding Tasks to Configuration
            for parameters, task in zip(tasks_to_send, tasks_results):
                if configuration.parameters == parameters:
                    if configuration.is_valid_task(task):
                        configuration.add_task(task)
                        if os.environ.get('TEST_MODE') != 'UNIT_TEST':
                            self.database.write_one_record(
                                "Tasks", configuration.get_task_record(task))
                    else:
                        configuration.increase_failed_tasks_number()

                API().send('new',
                           'task',
                           configurations=[parameters],
                           results=[task])

        # Evaluating configuration
        if configuration.number_of_failed_tasks <= self.repeater_parameters[
                'MaxFailedTasksPerConfiguration']:
            needed_tasks_count = self.evaluation_by_type(configuration)
        else:
            needed_tasks_count = 0
            configuration.status = Configuration.Status.BAD
            if len(configuration.get_tasks()) == 0:
                self.experiment.increment_bad_configuration_number()
                configuration.disable_configuration()
        current_measurement = {
            str(configuration.parameters): {
                'parameters': configuration.parameters,
                'needed_tasks_count': needed_tasks_count,
                'Finished': False
            }
        }

        if needed_tasks_count == 0:
            current_measurement[str(
                configuration.parameters)]['Finished'] = True
            current_measurement[str(
                configuration.parameters)]['Results'] = configuration.results

        tasks_to_send = []
        for point in current_measurement.keys():
            if not current_measurement[point]['Finished']:
                for i in range(
                        current_measurement[point]['needed_tasks_count']):
                    tasks_to_send.append(
                        current_measurement[point]['parameters'])
                    self.performed_measurements += 1
                    if os.environ.get('TEST_MODE') != 'UNIT_TEST':
                        self.database.write_one_record(
                            "Repeater_measurements",
                            self.get_repeater_measurements_record())

        if os.environ.get('TEST_MODE') == 'UNIT_TEST':
            return configuration, needed_tasks_count

        elif configuration.status == Configuration.Status.MEASURED or configuration.status == Configuration.Status.BAD:

            conn_params = pika.ConnectionParameters(host=self.event_host,
                                                    port=int(self.event_port))
            with pika.BlockingConnection(conn_params) as connection:
                with connection.channel() as channel:
                    try:
                        if configuration.type == Configuration.Type.DEFAULT:
                            self._type = self.get_repeater()
                            channel.basic_publish(
                                exchange='',
                                routing_key=
                                'default_configuration_results_queue',
                                body=configuration.to_json())
                        elif configuration.type == Configuration.Type.PREDICTED or \
                                configuration.type == Configuration.Type.FROM_SELECTOR:
                            channel.basic_publish(
                                exchange='',
                                routing_key='configurations_results_queue',
                                body=configuration.to_json())
                    except pika.exceptions.ChannelWrongStateError as err:
                        if not channel.is_open:
                            self.logger.warning(
                                "Attempt to send a message after closing the connection"
                            )
                        else:
                            raise err
        elif configuration.status == Configuration.Status.EVALUATED or \
                configuration.status == Configuration.Status.REPEATED_MEASURING:

            conn_params = pika.ConnectionParameters(host=self.event_host,
                                                    port=int(self.event_port))
            with pika.BlockingConnection(conn_params) as connection:
                with connection.channel() as channel:
                    body = json.dumps({
                        "configuration": configuration.to_json(),
                        "tasks": tasks_to_send
                    })
                    channel.basic_publish(exchange='',
                                          routing_key='process_tasks_queue',
                                          body=body)
예제 #23
0
class MainThread(threading.Thread):
    """
    This class runs Main functionality in a separate thread,
    connected to the `default_configuration_results_queue` and `configurations results queue` as a consumer.
    """
    class State(int, Enum):
        RUNNING = 0
        SHUTTING_DOWN = 1
        IDLE = 2

    def __init__(self, experiment_setup: [Experiment, Hyperparameter] = None):
        """
        The function for initializing main thread
        :param experiment_setup: fully initialized experiment, r.g from a POST request
        """
        super(MainThread, self).__init__()
        self._is_interrupted = False
        self.conf_lock = threading.Lock()
        self._state = self.State.IDLE
        self.experiment_setup = experiment_setup

        self.sub = API()  # front-end subscribers
        if __name__ == "__main__":
            self.logger = BRISELogConfigurator().get_logger(__name__)
        else:
            self.logger = logging.getLogger(__name__)

        self.experiment: Experiment = None
        self.connection: pika.BlockingConnection = None
        self.consume_channel = None
        self.predictor: Predictor = None
        self.wsc_client: WSClient = None
        self.repeater: RepeaterOrchestration = None
        self.database: MongoDB = None

    def run(self):
        """
        The entry point to the main node functionality - measuring default Configuration.
        When the default Configuration finishes its evaluation, the first set of Configurations will be
        sampled for evaluation (respectively, the queues for Configuration measurement results initialize).
        """
        self._state = self.State.RUNNING
        self.logger.info("Starting BRISE")
        self.sub.send('log', 'info', message="Starting BRISE")

        if not self.experiment_setup:
            # Check if main.py running with a specified experiment description file path
            if len(argv) > 1:
                exp_desc_file_path = argv[1]
            else:
                exp_desc_file_path = './Resources/EnergyExperiment/EnergyExperiment.json'
                log_msg = f"The Experiment Setup was not provided and the path to an experiment file was not specified." \
                          f" The default one will be executed: {exp_desc_file_path}"
                self.logger.warning(log_msg)
                self.sub.send('log', 'warning', message=log_msg)
            experiment_description, search_space = load_experiment_setup(
                exp_desc_file_path)
        else:
            experiment_description = self.experiment_setup[
                "experiment_description"]
            search_space = self.experiment_setup["search_space"]

        validate_experiment_description(experiment_description)
        os.makedirs(experiment_description["General"]["results_storage"],
                    exist_ok=True)

        # Initializing instance of Experiment - main data holder.
        self.experiment = Experiment(experiment_description, search_space)
        search_space.experiment_id = self.experiment.unique_id
        Configuration.set_task_config(
            self.experiment.description["TaskConfiguration"])

        # initialize connection to rabbitmq service
        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters(
                os.getenv("BRISE_EVENT_SERVICE_HOST"),
                int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT"))))
        self.consume_channel = self.connection.channel()

        # initialize connection to the database
        self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"),
                                int(os.getenv("BRISE_DATABASE_PORT")),
                                os.getenv("BRISE_DATABASE_NAME"),
                                os.getenv("BRISE_DATABASE_USER"),
                                os.getenv("BRISE_DATABASE_PASS"))

        # write initial settings to the database
        self.database.write_one_record(
            "Experiment_description",
            self.experiment.get_experiment_description_record())
        self.database.write_one_record(
            "Search_space",
            get_search_space_record(self.experiment.search_space,
                                    self.experiment.unique_id))
        self.experiment.send_state_to_db()

        self.sub.send(
            'experiment',
            'description',
            global_config=self.experiment.description["General"],
            experiment_description=self.experiment.description,
            searchspace_description=self.experiment.search_space.serialize(
                True))
        self.logger.debug(
            "Experiment description and global configuration sent to the API.")

        # Create and launch Stop Condition services in separate threads.
        launch_stop_condition_threads(self.experiment.unique_id)

        # Instantiate client for Worker Service, establish connection.
        self.wsc_client = WSClient(
            self.experiment.description["TaskConfiguration"],
            os.getenv("BRISE_EVENT_SERVICE_HOST"),
            int(os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT")))

        # Initialize Repeater - encapsulate Configuration evaluation process to avoid results fluctuations.
        # (achieved by multiple Configuration evaluations on Workers - Tasks)
        RepeaterOrchestration(self.experiment)

        self.predictor: Predictor = Predictor(self.experiment.unique_id,
                                              self.experiment.description,
                                              self.experiment.search_space)

        self.consume_channel.basic_consume(
            queue='default_configuration_results_queue',
            auto_ack=True,
            on_message_callback=self.get_default_configurations_results)
        self.consume_channel.basic_consume(
            queue='configurations_results_queue',
            auto_ack=True,
            on_message_callback=self.get_configurations_results)
        self.consume_channel.basic_consume(queue='stop_experiment_queue',
                                           auto_ack=True,
                                           on_message_callback=self.stop)
        self.consume_channel.basic_consume(
            queue="get_new_configuration_queue",
            auto_ack=True,
            on_message_callback=self.send_new_configurations_to_measure)

        self.default_config_handler = get_default_config_handler(
            self.experiment)
        temp_msg = "Measuring default Configuration."
        self.logger.info(temp_msg)
        self.sub.send('log', 'info', message=temp_msg)
        default_parameters = self.experiment.search_space.generate_default()
        default_configuration = Configuration(default_parameters,
                                              Configuration.Type.DEFAULT,
                                              self.experiment.unique_id)
        default_configuration.experiment_id = self.experiment.unique_id
        dictionary_dump = {"configuration": default_configuration.to_json()}
        body = json.dumps(dictionary_dump)

        self.consume_channel.basic_publish(
            exchange='',
            routing_key='measure_new_configuration_queue',
            body=body)
        # listen all queues with responses until the _is_interrupted flag is False
        try:
            while not self._is_interrupted:
                self.consume_channel.connection.process_data_events(
                    time_limit=1)  # 1 second
        finally:
            if self.connection.is_open:
                self.connection.close()

    def get_default_configurations_results(self, ch, method, properties, body):
        """
        Callback function for the result of default configuration
        :param ch: pika.Channel
        :param method:  pika.spec.Basic.GetOk
        :param properties: pika.spec.BasicProperties
        :param body: result of measuring default configuration in bytes format
        """
        default_configuration = Configuration.from_json(body.decode())
        if default_configuration.status == Configuration.Status.BAD:
            new_default_values = self.default_config_handler.get_new_default_config(
            )
            if new_default_values:
                config = Configuration(new_default_values,
                                       Configuration.Type.FROM_SELECTOR,
                                       self.experiment.unique_id)
                temp_msg = "New default configuration sampled."
                self.logger.info(temp_msg)
                self.sub.send('log', 'info', message=temp_msg)
                self.consume_channel.basic_publish(
                    exchange='',
                    routing_key='measure_new_configuration_queue',
                    body=json.dumps({"configuration": config.to_json()}))
            else:
                self.logger.error(
                    "The specified default configuration is broken.")
                self.stop()
                self.sub.send(
                    'log',
                    'info',
                    message="The specified default configuration is broken.")
                return
        if self.experiment.is_configuration_evaluated(default_configuration):
            self.experiment.default_configuration = default_configuration
            self.database.update_record(
                "Search_space", {"Exp_unique_ID": self.experiment.unique_id}, {
                    "Default_configuration":
                    default_configuration.get_configuration_record()
                })
            self.database.update_record(
                "Search_space", {"Exp_unique_ID": self.experiment.unique_id}, {
                    "SearchspaceObject": pickle.dumps(
                        self.experiment.search_space)
                })

            temp_msg = f"Evaluated Default Configuration: {default_configuration}"
            self.logger.info(temp_msg)
            self.sub.send('log', 'info', message=temp_msg)

            # starting main work: building model and choosing configuration for measuring
            self.consume_channel.basic_publish(
                exchange='', routing_key='get_worker_capacity_queue', body='')

    def get_configurations_results(self, ch, method, properties, body):
        """
        Callback function for the result of all Configurations except Default
        :param ch: pika.Channel
        :param method:  pika.spec.Basic.GetOk
        :param properties: pika.spec.BasicProperties
        :param body: result of measuring any configuration except default in bytes format
        """
        with self.conf_lock:  # To be sure, that no Configuration will be added after satisfying all Stop Conditions.
            configuration = Configuration.from_json(body.decode())
            if not self._is_interrupted and self.experiment.is_configuration_evaluated(
                    configuration):
                self.experiment.try_add_configuration(configuration)
                temp_msg = "-- New Configuration was evaluated. Building Target System model."
                self.logger.info(temp_msg)
                self.sub.send('log', 'info', message=temp_msg)
                self.consume_channel.basic_publish(
                    exchange='',
                    routing_key='get_worker_capacity_queue',
                    body='')

    def send_new_configurations_to_measure(self, ch, method, properties, body):
        """
        This callback function will be triggered on arrival of ONE measured Configuration.
        When there is new measured Configuration, following steps should be done:

            -   update and validate models;

            -   pick either by model, or by selection algorithm new Configuration(s) for evaluation;
                Note: The amount of new Configurations are:
                - 0 if number of available Worker nodes decreased;
                - 1 if number of available Workers did not change;
                - N + 1 if number of available Worker increased by N;

            -   send new Configuration to Repeater for evaluation.
        """

        needed_configs = json.loads(body.decode()).get("worker_capacity", 1)
        for _ in range(needed_configs):
            config = self.predictor.predict(
                self.experiment.measured_configurations)
            if config not in self.experiment.evaluated_configurations:
                temp_msg = f"Model predicted {config}."
            else:
                while config in self.experiment.evaluated_configurations and not self._is_interrupted:
                    if len(self.experiment.evaluated_configurations
                           ) >= self.experiment.search_space.get_size():
                        msg = "Entire Search Space was evaluated. Shutting down."
                        self.logger.warning(msg)
                        self.consume_channel.basic_publish(
                            exchange='',
                            routing_key='stop_experiment_queue',
                            body=msg)
                        break

                    new_parameter_values = OrderedDict()
                    while not self.experiment.search_space.validate(
                            new_parameter_values, is_recursive=True):
                        self.experiment.search_space.generate(
                            new_parameter_values)
                    config = Configuration(new_parameter_values,
                                           Configuration.Type.FROM_SELECTOR,
                                           self.experiment.unique_id)
                temp_msg = f"Fully randomly sampled {config}."

            self.logger.info(temp_msg)
            self.sub.send('log', 'info', message=temp_msg)
            self.consume_channel.basic_publish(
                exchange='',
                routing_key='measure_new_configuration_queue',
                body=json.dumps({"configuration": config.to_json()}))

    def stop(self, ch=None, method=None, properties=None, body=None):
        """
        The function for stop main thread externally (e.g. from front-end)
        """
        with self.conf_lock:
            self.consume_channel.basic_publish(
                exchange='brise_termination_sender', routing_key='', body='')
            self.sub.send('log',
                          'info',
                          message=f"Terminating experiment. Reason: {body}")
            self._state = self.State.SHUTTING_DOWN
            self._is_interrupted = True
            optimal_configuration = self.experiment.get_final_report_and_result(
            )
            self._state = self.State.IDLE
            return optimal_configuration

    def get_state(self):
        return self._state
예제 #24
0
import pickle
import time
from threading import Thread

import pika
# USER
from logger.default_logger import BRISELogConfigurator
from main import MainThread
from tools.front_API import API
from tools.rabbit_API_class import RabbitApi

logger = BRISELogConfigurator().get_logger(__name__)
# from tools.main_mock import run as main_run

# Initialize the API singleton
API(api_object=RabbitApi(os.getenv("BRISE_EVENT_SERVICE_HOST"),
                         os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT")))


class ConsumerThread(Thread):
    """
    This class runs in a separate thread and handles requests client nodes (e.g. benchmark, front-end),
    connected to the `main_start_queue`, `main_status_queue`, `main_stop_queue`, `main_download_dump_queue`,
    produces results in specified queue with specified tag, works as server part of RPC
    """
    def __init__(self, host, port, *args, **kwargs):
        super(ConsumerThread, self).__init__(*args, **kwargs)

        self._host = host
        self._port = port
        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters(host=self._host, port=self._port))
예제 #25
0
class Experiment:
    def __init__(self, description: dict, search_space: Hyperparameter):
        """
        Initialization of Experiment class
        Following fields are declared:
        self.measured_configurations - list of configuration instances
                                  shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]``
        self.description - description of the current experiment, it is taken from .json file
                           shape - dict with subdicts
        """
        self.logger = logging.getLogger(__name__)
        self.api = API()

        self.evaluated_configurations: List[Configuration] = [
        ]  # repeater already evaluates these configurations
        self.measured_configurations: List[Configuration] = [
        ]  # the results for these configurations are already received
        self._default_configuration: Configuration = None
        self._description: Mapping = description
        self.search_space: Hyperparameter = search_space
        self.end_time = self.start_time = datetime.datetime.now()
        # An ID that is used to differentiate Experiments by descriptions.
        self.ed_id = hashlib.sha1(
            json.dumps(self.description,
                       sort_keys=True).encode("utf-8")).hexdigest()
        # A unique ID, different for every experiment (even with the same description)
        self.unique_id = str(uuid.uuid4())
        self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.ed_id}"
        self.current_best_configurations: List[Configuration] = []
        self.bad_configurations_number = 0
        self.model_is_valid = False

        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()

        # initialize connection to the database
        self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"),
                                os.getenv("BRISE_DATABASE_PORT"),
                                os.getenv("BRISE_DATABASE_NAME"),
                                os.getenv("BRISE_DATABASE_USER"),
                                os.getenv("BRISE_DATABASE_PASS"))

    def _get_description(self):
        return deepcopy(self._description)

    def _set_description(self, description):
        if not self._description:
            self._description = description
        else:
            self.logger.error(
                "Unable to update Experiment Description: Read-only property.")
            raise AttributeError(
                "Unable to update Experiment Description: Read-only property.")

    def _del_description(self):
        if self._description:
            self.logger.error(
                "Unable to delete Experiment Description: Read-only property.")
            raise AttributeError(
                "Unable to update Experiment Description: Read-only property.")

    description = property(_get_description, _set_description,
                           _del_description)

    def __getstate__(self):
        space = self.__dict__.copy()
        del space['api']
        del space['logger']
        del space['measured_conf_lock']
        del space['evaluated_conf_lock']
        del space['database']
        return space

    def __setstate__(self, space):
        self.__dict__ = space
        self.logger = logging.getLogger(__name__)
        self.api = API()

        # for thread-safe adding value to relevant array; protection against duplicates configurations
        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()

    @property
    def default_configuration(self) -> Configuration:
        return self._default_configuration

    @default_configuration.setter
    def default_configuration(self, default_configuration: Configuration):
        if self._is_valid_configuration_instance(default_configuration):
            if not self._default_configuration:
                self._default_configuration = default_configuration
                self.api.send(
                    "default",
                    "configuration",
                    configurations=[default_configuration.parameters],
                    results=[default_configuration.results])
                self.measured_configurations.append(default_configuration)
                if not self.current_best_configurations:
                    self.current_best_configurations = [default_configuration]
                self.database.write_one_record(
                    "Measured_configurations",
                    default_configuration.get_configuration_record())
                self.database.write_one_record(
                    collection_name="warm_startup_info",
                    record={
                        "Exp_unique_ID": self.unique_id,
                        "wsi": default_configuration.warm_startup_info
                    })
            else:
                raise ValueError(
                    "The default Configuration was registered already.")

    def try_add_configuration(self, configuration: Configuration):
        """
        Add a Configuration object to the Experiment, if the Configuration was now added previously.
        :param configuration: Configuration instance.
        :return bool flag, True if the Configuration was added to list of either measured or evaluated configurations,
        False if not.
        """
        result = False
        if configuration.is_enabled:
            if self._try_put(configuration):
                # configuration will not be added to the Experiment if it is already there
                result = True
        return result

    def _try_put(self, configuration_instance: Configuration):
        """
        Takes instance of Configuration class and appends it to the list with all configuration instances.
        :param configuration_instance: Configuration class instance.
        :return bool flag, is _put add configuration to any lists or not
        """
        if self._is_valid_configuration_instance(configuration_instance):
            if configuration_instance.status == Configuration.Status.MEASURED:
                with self.measured_conf_lock:
                    if configuration_instance not in self.measured_configurations:
                        self._add_measured_configuration_to_experiment(
                            configuration_instance)
                        return True
                    else:
                        return False
            elif configuration_instance.status == Configuration.Status.EVALUATED:
                with self.evaluated_conf_lock:
                    if configuration_instance not in self.evaluated_configurations:
                        self._add_evaluated_configuration_to_experiment(
                            configuration_instance)
                        return True
                    else:
                        return False
            else:
                raise ValueError(
                    f"Can not add Configuration with status {configuration_instance.status.name} to Experiment."
                )

    def get_any_configuration_by_parameters(
            self, parameters: tuple) -> Union[None, Configuration]:
        """
        Find and retrieve instance of Configuration that was previously added to Experiment by it's Parameters.
        :param parameters: tuple. Parameters of desired Configuration.
        :return: instance of Configuration class or`None` if the Configuration instance was not found.
        """
        for configuration_instance in self.measured_configurations:
            if configuration_instance.parameters == parameters:
                return configuration_instance
        for configuration_instance in self.evaluated_configurations:
            if configuration_instance.parameters == parameters:
                return configuration_instance
        return None

    def get_current_status(self, serializable: bool):
        """
            Returns current state of Experiment, including already elapsed time, currently found solution Configuration,
        default Configuration, Experiment description and all already evaluated Configurations.
        :param serializable: Boolean.
            Defines if returned structure should be serializable or not. If True - all Configuration objects will be
        transformed to their string representation.
        :return: Dict with following keys["Running time", "Best found Configuration",
                                        "Default configuration", "Experiment description",
                                        "Evaluated Configurations"]
        """
        current_status = {
            "Running time":
            str(self.get_running_time())
            if serializable else self.get_running_time(),
            "Best found Configuration":
            self.get_current_solution().__getstate__()
            if serializable else self.get_current_solution(),
            "Experiment description":
            self.description,
            "Evaluated Configurations": [
                conf.__getstate__() if serializable else conf
                for conf in self.measured_configurations
            ]
        }
        return current_status

    def summarize_results_to_file(self, report_format: str, folder_path: str):
        """
            Called before the BRISE proper termination. Aggregates current state of the Experiment and writes it as a
            json or yaml file.
        :param report_format: String. Format of output file, either 'yaml' or 'json'.
        :param folder_path: String. Folder, where results should be stored.
        :return: self
        """
        os.makedirs(folder_path, exist_ok=True)
        output_file_name = folder_path + self.name
        data = ""
        if report_format.lower() == "yaml":
            from yaml import safe_dump
            output_file_name += '.yaml'
            data = safe_dump(self.get_current_status(serializable=True),
                             width=120,
                             indent=4)
        elif report_format.lower() == "json":
            from json import dumps
            output_file_name += '.json'
            data = dumps(self.get_current_status(serializable=True), indent=4)
        else:
            self.logger.error(
                "Wrong serialization format provided. Supported 'yaml' and 'json'."
            )
        with open(output_file_name, 'w') as output_file:
            output_file.write(data)
            self.logger.info(
                "Results of the Experiment have been writen to file: %s" %
                output_file_name)
        return self

    def is_configuration_evaluated(self, configuration):
        """
        Check is the Configuration in the evaluated_configurations list or not.
        Could be used to filter out outdated (not added to current Experiment) Configurations.
        :param configuration: Configuration instance.
        :return: True if Configuration instance was previously added to the Experiment as those of False
        """
        return configuration in self.evaluated_configurations

    def get_final_report_and_result(self):
        self.end_time = datetime.datetime.now()
        if self.measured_configurations:
            performed_measurements = \
                self.database.get_last_record_by_experiment_id("Repeater_measurements", self.unique_id)["Performed_measurements"]
            self.logger.info("\n\nFinal report:")

            self.logger.info("ALL MEASURED CONFIGURATIONS:\n")
            for configuration in self.measured_configurations:
                self.logger.info(configuration)
            self.logger.info("Number of measured Configurations: %s" %
                             len(self.measured_configurations))
            self.logger.info("Number of Tasks: %s" % performed_measurements)
            self.logger.info("Best found Configuration: %s" %
                             self.get_current_solution())
            self.logger.info("BRISE running time: %s" %
                             str(self.get_running_time()))

            all_features = []
            for configuration in self.measured_configurations:
                all_features.append(configuration.parameters)
            results_folder = self.description["General"]["results_storage"]
            self.dump(
                folder_path=results_folder)  # Store instance of Experiment
            self.write_csv(
                folder_path=results_folder)  # Store Experiment metrics
            self.summarize_results_to_file(report_format="yaml",
                                           folder_path=results_folder)
            self.api.send(
                'final',
                'configuration',
                configurations=[self.get_current_solution().parameters],
                results=[self.get_current_solution().results],
                measured_points=[all_features],
                performed_measurements=[performed_measurements])
            return self.current_best_configurations
        else:
            self.logger.error(
                'No configuration was measured. Please, check your Experiment Description.'
            )

    def get_current_solution(self) -> Union[Configuration, None]:
        if self.current_best_configurations:
            return self.current_best_configurations[0]
        else:
            return None

    def _is_valid_configuration_instance(
            self, configuration_instance: Configuration) -> bool:
        if isinstance(configuration_instance, Configuration):
            return True
        else:
            self.logger.error(
                'Current object is not a Configuration instance, but %s' %
                type(configuration_instance))
            return False

    def _add_measured_configuration_to_experiment(
            self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        This method also sends an update to API (front-end).
        :param configuration: Configuration object.
        :return: None
        """
        self.measured_configurations.append(configuration)
        if configuration.is_better(self.get_objectives_minimization(),
                                   self.get_objectives_priorities(),
                                   self.current_best_configurations[0]):
            # we do not need warm_startup_info anymore, since better configuration was found
            self.current_best_configurations[0].warm_startup_info = {}
            self.current_best_configurations = [configuration]
            self.database.update_record(
                collection_name="warm_startup_info",
                query={"Exp_unique_ID": self.unique_id},
                new_val={"wsi": configuration.warm_startup_info})
        else:
            # this configuration did not improve the previous solution, no need to keep track its solutions.
            configuration.warm_startup_info = {}

        self.database.write_one_record(
            "Measured_configurations",
            configuration.get_configuration_record())
        self.send_state_to_db()
        self.api.send("new",
                      "configuration",
                      configurations=[configuration.parameters],
                      results=[configuration.results])
        self.logger.info("Adding to Experiment: %s" % configuration)

    def _add_evaluated_configuration_to_experiment(
            self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        :param configuration: Configuration object.
        :return: None
        """
        self.evaluated_configurations.append(configuration)

    def get_objectives(self) -> List[str]:
        return self.description["TaskConfiguration"]["Objectives"]

    def get_objectives_minimization(self) -> List[bool]:
        return self.description["TaskConfiguration"]["ObjectivesMinimization"]

    def get_objectives_priorities(self) -> List[int]:
        return self.description["TaskConfiguration"]["ObjectivesPriorities"]

    def get_models_objectives_priorities(self) -> List[int]:
        return self.description["TaskConfiguration"][
            "ObjectivesPrioritiesModels"]

    def dump(self, folder_path: str) -> None:
        """
            Save dump of experiment object. Later it could be uploaded through the web API.
        :param folder_path: str. Path to folder, where to store dump file.
            User, which is running main.py should be authorized to write into a specified folder.
        :return: None
        """
        if folder_path[-1] != "/" and folder_path[-1] != "\\":
            folder_path = folder_path + "/"
        os.makedirs(folder_path, exist_ok=True)
        dump_path = folder_path + self.name + ".pkl"

        with open(dump_path, 'wb') as output:
            pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
            self.logger.info(f"Saved experiment instance. Path: {dump_path}")
        os.environ["EXP_DUMP_NAME"] = folder_path + self.name

        self.database.update_record(
            "Experiment_description", {"Exp_unique_ID": self.unique_id},
            {"ExperimentObject": pickle.dumps(self, pickle.HIGHEST_PROTOCOL)})

    def write_csv(self, folder_path: str) -> None:
        """save .csv file with main metrics of the experiment
        Args:
            folder_path (str, optional): Path to folder, where to store the csv report.
        """
        if self.search_space.get_size() == np.inf:
            search_space_coverage = "unknown (infinite search space)"
        else:
            search_space_coverage = str(
                round((len(self.measured_configurations) /
                       self.search_space.get_size()) * 100)) + '%'

        data = dict({
            'model':
            "_".join([
                model["Type"]
                for model in self.description["Predictor"]["models"]
            ]),
            'default configuration':
            [' '.join(str(v) for v in self.default_configuration.parameters)],
            'solution configuration':
            [' '.join(str(v) for v in self.get_current_solution().parameters)],
            'default result':
            self.default_configuration.results,
            'solution result':
            self.get_current_solution().results,
            'number of measured configurations':
            len(self.measured_configurations),
            'search space coverage':
            search_space_coverage,
            'number of repetitions':
            len(self.get_all_repetition_tasks()),
            'execution time': (self.get_running_time()).seconds,
            'repeater':
            self.description['Repeater']['Type']
        })

        file_path = '{0}{1}.csv'.format(folder_path, self.name)

        keys = list(data.keys())
        values = list(data.values())

        with open(file_path, 'w') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerow(keys)
            writer.writerow(values)
            self.logger.info("Saved csv file. Path: %s" % file_path)

    def get_name(self):
        return self.name

    def get_running_time(self):
        if self.end_time is self.start_time:
            return datetime.datetime.now() - self.start_time
        else:
            return self.end_time - self.start_time

    def get_all_repetition_tasks(self):
        """ List of results for all tasks that were received on workers
        Returns:
            [List] -- List with results for all atom-tasks
        """

        all_tasks = []
        result_key = self.description['TaskConfiguration']['Objectives'][0]
        for configuration in self.measured_configurations:
            for task in configuration.get_tasks().values():
                if 'result' in task:
                    all_tasks.append(task['result'][result_key])
        return all_tasks

    def get_number_of_measured_configurations(self):
        return len(self.measured_configurations)

    def get_stop_condition_parameters(self):
        return self.description["StopCondition"]

    def get_selection_algorithm_parameters(self):
        return self.description["SelectionAlgorithm"]

    def get_outlier_detectors_parameters(self):
        return self.description["OutliersDetection"]

    def get_repeater_parameters(self):
        return self.description["Repeater"]

    def increment_bad_configuration_number(self):
        self.bad_configurations_number = self.bad_configurations_number + 1
        return self

    def get_bad_configuration_number(self):
        return self.bad_configurations_number

    def update_model_state(self, model_state: bool):
        self.model_is_valid = model_state

    def get_model_state(self) -> bool:
        return self.model_is_valid

    def send_state_to_db(self) -> None:
        """
        Send current experiment state information, or create one if not exist.
        :return: None
        """
        if self.database.get_last_record_by_experiment_id(
                "Experiment_state", self.unique_id) is None:
            self.database.write_one_record("Experiment_state",
                                           self.get_experiment_state_record())
        else:
            self.database.update_record(
                "Experiment_state", {"Exp_unique_ID": self.unique_id}, {
                    "Number_of_measured_configs":
                    self.get_number_of_measured_configurations(),
                    "Number_of_bad_configs":
                    self.get_bad_configuration_number(),
                    "Current_solution":
                    self.get_current_solution().get_configuration_record(),
                    "is_model_valid":
                    self.get_model_state()
                })

    def get_experiment_description_record(self) -> Mapping:
        '''
        The helper method that formats an experiment description to be stored as a record in a Database
        :return: Mapping. Field names of the database collection with respective information
        '''
        record = {}
        # add this specific experiment information
        record["Exp_unique_ID"] = self.unique_id
        record["Exp_ID"] = self.ed_id
        record["DateStarted"] = str(datetime.datetime.now())
        # store experiment description fields
        record.update(self.description)
        # experiment description record will be updated at the end of the experiment
        record["ExperimentObject"] = None
        return record

    def get_experiment_state_record(self) -> Mapping:
        '''
        The helper method that formats current experiment state to be stored as a record in a Database
        :return: Mapping. Field names of the database collection with respective information
        '''
        record = {}
        record["Exp_unique_ID"] = self.unique_id
        record[
            "Number_of_measured_configs"] = self.get_number_of_measured_configurations(
            )
        record["Number_of_bad_configs"] = self.get_bad_configuration_number()
        current_solution = self.get_current_solution()
        if current_solution is not None:
            current_solution = current_solution.get_configuration_record()
        record["Current_solution"] = current_solution
        record["is_model_valid"] = self.get_model_state()
        return record
예제 #26
0
socketio_logger.setLevel(logging.WARNING)
engineio_logger.setLevel(logging.WARNING)

socketIO = socketio.Server(ping_timeout=300,
                           logger=socketio_logger,
                           engineio_logger=engineio_logger)
# instance of Flask app
app = Flask(__name__, static_url_path='', template_folder="static")
CORS(app)
app.wsgi_app = socketio.WSGIApp(socketIO, app.wsgi_app)

# WebSocket
app.config['SECRET_KEY'] = 'galamaga'

# Initialize the API singleton
API(api_object=socketIO)

MAIN_PROCESS = None
data_header = {'version': '1.0'}

# add clients in room
front_clients = []


# ---   START
@app.route('/main_start', methods=['GET', 'POST'])
def main_process_start():
    """
    Verifies that Main-node is not running.
    If free - creates new process with socketio instance and starts it.
    It ensures that for each run of main.py you will use only one socketio.
예제 #27
0
 def __setstate__(self, space):
     self.__dict__ = space
     self.logger = logging.getLogger(__name__)
     self.api = API()
class Experiment:

    def __init__(self, description: dict):
        """
        Initialization of Experiment class
        Following fields are declared:
        self.measured_configurations - list of configuration instances
                                  shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]``
        """
        self.logger = logging.getLogger(__name__)
        self.api = API()

        # TODO: merge lists into a single one (https://github.com/dpukhkaiev/BRISEv2/pull/112#discussion_r371761149)
        self.evaluated_configurations: List[Configuration] = []  # repeater already evaluates these configurations
        self.measured_configurations: List[Configuration] = [] # the results for these configurations are already gotten
        self._default_configuration: Configuration = None
        self._description: dict = description
        self.end_time = self.start_time = datetime.datetime.now()
        # A unique ID that is used to differentiate Experiments by descriptions.
        self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest()
        self.name: str = f"exp_{self.description['TaskConfiguration']['TaskName']}_{self.id}"
        # TODO MultiOpt: Currently we store only one solution configuration here,
        #  but it was made as a possible Hook for multidimensional optimization.
        self.current_best_configurations: List[Configuration] = []
        self.bad_configurations_number = 0
        self.model_is_valid = False

        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()

    def _get_description(self):
        return deepcopy(self._description)

    def _set_description(self, description):
        if not self._description:
            self._description = description
        else:
            self.logger.error("Unable to update Experiment Description: Read-only property.")
            raise AttributeError("Unable to update Experiment Description: Read-only property.")

    def _del_description(self):
        if self._description:
            self.logger.error("Unable to delete Experiment Description: Read-only property.")
            raise AttributeError("Unable to update Experiment Description: Read-only property.")

    description = property(_get_description, _set_description, _del_description)

    def __getstate__(self):
        space = self.__dict__.copy()
        del space['api']
        del space['logger']
        del space['measured_conf_lock']
        del space['evaluated_conf_lock']
        return space

    def __setstate__(self, space):
        self.__dict__ = space
        self.logger = logging.getLogger(__name__)
        self.api = API()

        # for thread-safe adding value to relevant array; protection against duplicates configurations
        self.measured_conf_lock = Lock()
        self.evaluated_conf_lock = Lock()

    @property
    def default_configuration(self) -> Configuration:
        return self._default_configuration

    @default_configuration.setter
    def default_configuration(self, default_configuration: Configuration):
        if self._is_valid_configuration_instance(default_configuration):
            if not self._default_configuration:
                self._default_configuration = default_configuration
                self.api.send("default", "configuration",
                              configurations=[default_configuration.hyperparameters],
                              results=[default_configuration.results])
                self.measured_configurations.append(default_configuration)
                if not self.current_best_configurations:
                    self.current_best_configurations = [default_configuration]
                temp_msg = f"Added Default Configuration: {default_configuration}"
                self.logger.info(temp_msg)
                self.api.send('log', 'info', message=temp_msg)
            else:
                raise ValueError("The default Configuration was registered already.")

    def try_add_configuration(self, configuration: Configuration):
        """
        Add a Configuration object to the Experiment, if the Configuration was now added previously.
        :param configuration: Configuration instance.
        :return bool flag, True if the Configuration was added to list of either measured or evaluated configurations,
        False if not.
        """
        result = False
        if configuration.is_enabled:
            if self._try_put(configuration):
                # configuration will not be added to the Experiment if it is already there
                result = True
        return result

    def _try_put(self, configuration_instance: Configuration):
        """
        Takes instance of Configuration class and appends it to the list with all configuration instances.
        :param configuration_instance: Configuration class instance.
        :return bool flag, is _put add configuration to any lists or not
        """
        if self._is_valid_configuration_instance(configuration_instance):
            if configuration_instance.status == Configuration.Status.MEASURED:
                with self.measured_conf_lock:
                    if configuration_instance not in self.measured_configurations:
                        self._add_measured_configuration_to_experiment(configuration_instance)
                        return True
                    else:
                        return False
            elif configuration_instance.status == Configuration.Status.EVALUATED:
                with self.evaluated_conf_lock:
                    if configuration_instance not in self.evaluated_configurations:
                        self._add_evaluated_configuration_to_experiment(configuration_instance)
                        return True
                    else:
                        return False
            else:
                raise ValueError(
                    f"Can not add Configuration with status {configuration_instance.status} to Experiment.")

    def get_any_configuration_by_parameters(self, hyperparameters: tuple) -> Union[None, Configuration]:
        """
        Find and retrieve instance of Configuration that was previously added to Experiment by it's hyperparameters.
        :param hyperparameters: tuple. hyperparameters of desired Configuration.
        :return: instance of Configuration class or`None` if the Configuration instance was not found.
        """
        for configuration_instance in self.measured_configurations:
            if configuration_instance.hyperparameters == hyperparameters:
                return configuration_instance
        for configuration_instance in self.evaluated_configurations:
            if configuration_instance.hyperparameters == hyperparameters:
                return configuration_instance
        return None

    def is_configuration_in_experiment(self, configuration: Configuration) -> bool:
        """
        Check if provided Configuration already in Experiment.

        :param configuration: BRISE Configuration instance with required 'hyperparameters' property.
        :type configuration: Configuration
        :return: boolean True if Configuration was previously added to Experiment, False otherwise.
        :rtype bool
        """
        found_config = self.get_any_configuration_by_parameters(configuration.hyperparameters)
        return True if found_config else False

    def is_configuration_evaluated(self, configuration):
        """
        Check is the Configuration in the evaluated_configurations list or not.
        Could be used to filter out outdated (not added to current Experiment) Configurations.
        :param configuration: Configuration instance.
        :return: True if Configuration instance was previously added to the Experiment as those of False
        """
        return configuration in self.evaluated_configurations

    def get_final_report_and_result(self, repeater):
        self.end_time = datetime.datetime.now()
        if self.measured_configurations:
            self.logger.info("\n\nFinal report:")

            self.logger.info("ALL MEASURED CONFIGURATIONS:\n")
            for configuration in self.measured_configurations:
                self.logger.info(configuration)
            self.logger.info("Number of measured Configurations: %s" % len(self.measured_configurations))
            self.logger.info("Number of Tasks: %s" % repeater.performed_measurements)
            self.logger.info("Best found Configuration: %s" % self.get_current_solution())
            self.logger.info("BRISE running time: %s" % str(self.get_running_time()))

            all_features = []
            for configuration in self.measured_configurations:
                all_features.append(configuration.hyperparameters)
            self.dump()  # Store instance of Experiment
            self.api.send('final', 'configuration',
                        configurations=[self.get_current_solution().hyperparameters],
                        results=[self.get_current_solution().results],
                      measured_points=[all_features],
                      performed_measurements=[repeater.performed_measurements])
            return self.current_best_configurations
        else:
            self.logger.error('No configuration was measured. Please, check your Experiment Description.')

    def get_current_status(self, serializable: bool = False):
        """
            Returns current state of Experiment, including already elapsed time, currently found solution Configuration,
        default Configuration, Experiment description and all already evaluated Configurations.

        :param serializable: Boolean.
            Defines if returned structure should be serializable or not. If True - all Configuration objects will be
        transformed to their string representation.
        :return: Dict with following keys["Running time", "Best found Configuration",
                                        "Default configuration", "Experiment description",
                                        "Evaluated Configurations"]
        """
        current_status = {
            "Running time": str(self.get_running_time()) if serializable else self.get_running_time(),
            "Best found Configuration": self.get_current_solution().__getstate__() if serializable else self.get_current_solution(),
            "Default configuration": self.default_configuration.__getstate__() if serializable else self.default_configuration,
            "Experiment description": self.description,
            "Evaluated Configurations": [conf.__getstate__() if serializable else conf for conf in self.measured_configurations]
        }
        return current_status

    def get_current_solution(self) -> Configuration:
        return self.current_best_configurations[0]

    def _is_valid_configuration_instance(self, configuration_instance: Configuration) -> bool:
        if isinstance(configuration_instance, Configuration):
            return True
        else:
            self.logger.error('Current object is not a Configuration instance, but %s' % type(configuration_instance))
            return False

    def _add_measured_configuration_to_experiment(self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        This method also sends an update to API (front-end).
        :param configuration: Configuration object.
        :return: None
        """
        self.measured_configurations.append(configuration)
        if not self.current_best_configurations:
            # first soultion found
            self.current_best_configurations = [configuration]
        elif configuration.is_better_configuration(self.is_minimization(), self.current_best_configurations[0]):
            # new solution found
            self.current_best_configurations[0].warm_startup_info = {}
            self.current_best_configurations = [configuration]
        else:
            # this configuration did not improve the previous solution
            configuration.warm_startup_info = {}

        self.api.send("new", "configuration",
                      configurations=[configuration.hyperparameters],
                      results=[configuration.results])
        self.logger.info("Adding to Experiment: %s" % configuration)

    def _add_evaluated_configuration_to_experiment(self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        :param configuration: Configuration object.
        :return: None
        """
        self.evaluated_configurations.append(configuration)

    def is_minimization(self):
        return self.description["General"]["isMinimizationExperiment"]

    def dump(self, folder_path: str = 'Results/serialized/'):
        """ save instance of experiment class
        """
        # Used to upload Experiment dump through web API
        os.environ["EXP_DUMP_NAME"] = self.name

        create_folder_if_not_exists(folder_path)
        file_name = '{}.pkl'.format(self.name)
        # write pickle
        with open(folder_path + file_name, 'wb') as output:
            pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
            self.logger.info("Saved experiment instance. Path: %s" % (folder_path + file_name))

    def get_name(self):
        return self.name

    def get_running_time(self):
        if self.end_time is self.start_time:
            return datetime.datetime.now() - self.start_time
        else:
            return self.end_time - self.start_time

    def get_all_repetition_tasks(self):
        """ List of results for all tasks that were received on workers
        
        Returns:
            [List] -- List with results for all atom-tasks
        """

        all_tasks = []
        result_key = self.description['TaskConfiguration']['ResultStructure'][0]
        for configuration in self.measured_configurations:
            for task in configuration.get_tasks().values():
                if 'result' in task:
                    all_tasks.append(task['result'][result_key])
        return all_tasks

    def get_number_of_measured_configurations(self):
        return len(self.measured_configurations)

    def get_stop_condition_parameters(self):
        return self.description["StopCondition"]

    def get_selection_algorithm_parameters(self):
        return self.description["SelectionAlgorithm"]

    def get_outlier_detectors_parameters(self):
        return self.description["OutliersDetection"]

    def increment_bad_configuration_number(self):
        self.bad_configurations_number = self.bad_configurations_number + 1
        return self

    def get_bad_configuration_number(self):
        return self.bad_configurations_number

    def update_model_state(self, model_state: bool):
        self.model_is_valid = model_state

    def get_model_state(self) -> bool:
        return self.model_is_valid
예제 #29
0
class Experiment:

    def __init__(self, description: dict):
        """
        Initialization of Experiment class
        Following fields are declared:

        self.all_configurations - list of configuration instances
                                  shape - list, e.g. ``[config_instance_1, config_instance_2, ... ]``
        self.description - description of the current experiment, it is taken from .json file
                           shape - dict with subdicts
        """
        self.logger = logging.getLogger(__name__)
        self.api = API()

        self.default_configuration = []
        self.all_configurations = []
        self._description = description
        self.search_space = []
        self.end_time = self.start_time = datetime.datetime.now()
        # A unique ID that is used to differentiate an Experiments by descriptions.
        self.id = hashlib.sha1(json.dumps(self.description, sort_keys=True).encode("utf-8")).hexdigest()
        self.name = "exp_{task_name}_{experiment_hash}".format(
            task_name=self.description["TaskConfiguration"]["TaskName"],
            experiment_hash=self.id)
        self.current_best_configurations = []

        self.__generate_search_space()

    def _get_description(self):
        return deepcopy(self._description)

    def _set_description(self, description):
        if not self._description:
            self._description = description
        else:
            self.logger.error("Unable to update Experiment Description: Read-only property.")
            raise AttributeError("Unable to update Experiment Description: Read-only property.")

    def _del_description(self):
        if self._description:
            self.logger.error("Unable to delete Experiment Description: Read-only property.")
            raise AttributeError("Unable to update Experiment Description: Read-only property.")

    description = property(_get_description, _set_description, _del_description)

    def __getstate__(self):
        space = self.__dict__.copy()
        del space['api']
        del space['logger']
        return space

    def __setstate__(self, space):
        self.__dict__ = space
        self.logger = logging.getLogger(__name__)
        self.api = API()

    def put_default_configuration(self, default_configuration: Configuration):
        if self._is_valid_configuration_instance(default_configuration):
            if not self.default_configuration:
                self.default_configuration = default_configuration
                self.api.send("default", "configuration",
                              configurations=[default_configuration.get_parameters()],
                              results=[default_configuration.get_average_result()])
                if default_configuration not in self.all_configurations:
                    self.all_configurations.append(default_configuration)
                    self._calculate_current_best_configurations()
            else:
                raise ValueError("The default Configuration was registered already.")

    def add_configurations(self, configurations: List[Configuration]):
        """Takes the List of Configuration objects and adds it to Experiment state.
        :param configurations: List of Configuration instances.
        """
        for configuration in configurations:
            self._put(configuration)

    def _put(self, configuration_instance: Configuration):
        """
        Takes instance of Configuration class and appends it to the list with all configuration instances.
        :param configuration_instance: Configuration class instance.
        """
        if self._is_valid_configuration_instance(configuration_instance):
            if self.all_configurations is []:
                self._add_configuration_to_experiment(configuration_instance)
            else:
                is_exists = False
                for value in self.all_configurations:
                    if value.get_parameters() == configuration_instance.get_parameters():
                        is_exists = True
                if not is_exists:
                    self._add_configuration_to_experiment(configuration_instance)
                else:
                    self.logger.warning("Attempt of adding Configuration that is already in Experiment: %s" %
                                        configuration_instance)

    def get_configuration_by_parameters(self, parameters):
        """
        Returns the instance of Configuration class, which contains the concrete configuration, if configuration the exists

        :param parameters: list. Concrete experiment configuration
               shape - list, e.g. [2900.0, 32]
        :return: instance of Configuration class
        """
        for configuration_instance in self.all_configurations:
            if configuration_instance.get_parameters() == parameters:
                return configuration_instance
        return None

    def get_final_report_and_result(self, repeater):
        #   In case, if the model predicted the final point, that has less value, than the default, but there is
        # a point, that has less value, than the predicted point - report this point instead of predicted point.
        self.end_time = datetime.datetime.now()

        self.logger.info("\n\nFinal report:")

        self.logger.info("ALL MEASURED CONFIGURATIONS:\n")
        for configuration in self.all_configurations:
            self.logger.info(configuration)
        self.logger.info("Number of measured Configurations: %s" % len(self.all_configurations))
        self.logger.info("Number of Tasks: %s" % repeater.performed_measurements)
        self.logger.info("Best found Configuration: %s" % self.get_current_solution())
        self.logger.info("BRISE running time: %s" % str(self.get_running_time()))

        all_features = []
        for configuration in self.all_configurations:
            all_features.append(configuration.get_parameters())
        self.api.send('final', 'configuration',
                      configurations=[self.get_current_solution().get_parameters()],
                      results=[[round(self.get_current_solution().get_average_result()[0], 2)]],
                      measured_points=[all_features],
                      performed_measurements=[repeater.performed_measurements])
        self.dump()  # Store instance of Experiment
        self.summarize_results_to_file()
        self.write_csv()

        return self.current_best_configurations

    def get_current_status(self, serializable: bool = False):
        """
            Returns current state of Experiment, including already elapsed time, currently found solution Configuration,
        default Configuration, Experiment description and all already evaluated Configurations.

        :param serializable: Boolean.
            Defines if returned structure should be serializable or not. If True - all Configuration objects will be
        transformed to their string representation.
        :return: Dict with following keys["Running time", "Best found Configuration",
                                        "Default configuration", "Experiment description",
                                        "Evaluated Configurations"]
        """
        current_status = {
            "Running time": str(self.get_running_time()) if serializable else self.get_running_time(),
            "Best found Configuration": self.get_current_solution().__getstate__() if serializable else self.get_current_solution(),
            "Default configuration": self.default_configuration.__getstate__() if serializable else self.default_configuration,
            "Experiment description": self.description,
            "Evaluated Configurations": [conf.__getstate__() if serializable else conf for conf in self.all_configurations]
        }
        return current_status

    def get_current_solution(self):
        self._calculate_current_best_configurations()
        return self.current_best_configurations[0]

    def get_current_best_configurations(self):
        self._calculate_current_best_configurations()
        return self.current_best_configurations

    def _is_valid_configuration_instance(self, configuration_instance):
        if isinstance(configuration_instance, Configuration):
            return True
        else:
            self.logger.error('Current object is not a Configuration instance, but %s' % type(configuration_instance))
            return False

    def _calculate_current_best_configurations(self):

        best_configuration = [self.all_configurations[0]]
        for configuration in self.all_configurations:
            if configuration.is_better_configuration(self.is_minimization(),
                                                     best_configuration[0]):
                best_configuration = [configuration]
        self.current_best_configurations = best_configuration

    def _add_configuration_to_experiment(self, configuration: Configuration) -> None:
        """
        Save configuration after passing all checks.
        This method also sends an update to API (front-end).
        :param configuration: Configuration object.
        :return: None
        """
        self.all_configurations.append(configuration)
        self.api.send("new", "configuration",
                      configurations=[configuration.get_parameters()],
                      results=[configuration.get_average_result()])
        self.logger.info("Adding to Experiment: %s" % configuration)

    def is_minimization(self):
        return self.description["General"]["isMinimizationExperiment"]

    def get_number_of_configurations_per_iteration(self):
        if "ConfigurationsPerIteration" in self.description["General"]:
            return self.description["General"]["ConfigurationsPerIteration"]
        else:
            return 0

    def __generate_search_space(self):
        self.search_space = [list(configuration) for configuration in
                             itertools.product(*self.description["DomainDescription"]["AllConfigurations"])]

    def dump(self, folder_path: str = 'Results/serialized/'):
        """ save instance of experiment class
        """
        # Used to upload Experiment dump through web API
        os.environ["EXP_DUMP_NAME"] = self.name

        create_folder_if_not_exists(folder_path)
        file_name = '{}.pkl'.format(self.name)

        # write pickl
        with open(folder_path + file_name, 'wb') as output:
            pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
            self.logger.info("Saved experiment instance. Path: %s" % (folder_path + file_name))

    def summarize_results_to_file(self, report_format: str = 'yaml', path: str = 'Results/'):
        """
            Called before the BRISE proper termination. Aggregates current state of the Experiment and writes it as a
            json or yaml file.
        :param report_format: String. Format of output file, either 'yaml' or 'json'.
        :param path: String. Folder, where results should be stored.
        :return: self
        """
        if report_format.lower() == "yaml":
            from yaml import safe_dump
            output_file_name = path + self.name + '.yaml'
            data = safe_dump(self.get_current_status(serializable=True), width=120, indent=4)
        elif report_format.lower() == "json":
            from json import dumps
            output_file_name = path + self.name + '.json'
            data = dumps(self.get_current_status(serializable=True), indent=4)
        else:
            raise TypeError("Wrong serialization format provided. Supported 'yaml' and 'json'.")
        create_folder_if_not_exists(path)
        with open(output_file_name, 'w') as output_file:
            output_file.write(data)
            self.logger.info("Results of the Experiment have been writen to file: %s" % output_file_name)

        return self

    def write_csv(self, path='Results/serialized/'):
        """save .csv file with main metrics of the experiment

        Args:
            final (bool, optional): Is the Experiment finished?. Defaults to False.
        """

        search_space = 1
        for dim in self.description['DomainDescription']['AllConfigurations']:
            search_space *= len(dim)

        data = dict({
            'model': self.description['ModelConfiguration']['ModelType'],
            'default configuration': [' '.join(
                str(v) for v in self.default_configuration.get_parameters())],
            'solution configuration': [' '.join(
                str(v) for v in self.get_current_solution().get_parameters())],
            'default result': self.default_configuration.get_average_result()[0],
            'solution result': self.get_current_solution().get_average_result()[0],
            'number of measured configurations': len(self.all_configurations),
            'search space coverage': str(round((len(self.all_configurations) / search_space) * 100)) + '%',
            'number of repetitions': len(self.get_all_repetition_tasks()),
            'execution time': (self.get_running_time()).seconds,
            'repeater': self.description['Repeater']['Type']
        })

        file_path = '{0}{1}.csv'.format(path, self.name)

        keys = list(data.keys())
        values = list(data.values())

        with open(file_path, 'a') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerow(keys)
            writer.writerow(values)
            self.logger.info("Saved csv file. Path: %s" % file_path)

    def get_name(self):
        return self.name

    def get_running_time(self):
        if self.end_time is self.start_time:
            return datetime.datetime.now() - self.start_time
        else:
            return self.end_time - self.start_time

    def get_all_repetition_tasks(self):
        """ List of results for all tasks that were received on workers
        
        Returns:
            [List] -- List with results for all atom-tasks
        """

        all_tasks = []
        result_key = self.description['TaskConfiguration']['ResultStructure'][0]
        for configuration in self.all_configurations:
            for task in configuration.get_tasks().values():
                if 'result' in task:
                    all_tasks.append(task['result'][result_key])
        return all_tasks

    def get_number_of_measured_configurations(self):
        return len(self.all_configurations)

    def get_search_space_size(self):
        return len(self.search_space)

    def get_stop_condition_parameters(self):
        return self.description["StopCondition"]

    def get_selection_algorithm_parameters(self):
        return self.description["SelectionAlgorithm"]
예제 #30
0
class TestFrontApi:
    # this test set is aimed to cover the functionality of the 'front_API' tools and the 'singleton'

    api = API()

    def test_0_build_log_message(self):
        # Test #0. Build log message according to the API message format
        # Expected result: the message of type 'string' is built independently on its content
        expected_result = "This is log"
        msg = "This is log"
        actual_result = APIMessageBuilder.build('LOG', message=msg)
        assert actual_result == expected_result

    def test_1_build_log_message_of_multiple_parts(self):
        # Test #1. Build log message according to the API message format. Message consists of several parts
        # Expected result: the single message of type 'string' is built by concatenation
        expected_result = "This is log"
        msg1 = "This is "
        msg2 = "log"
        actual_result = APIMessageBuilder.build('LOG',
                                                message1=msg1,
                                                message2=msg2)
        assert actual_result == expected_result

    def test_2_build_empty_log_message(self):
        # Test #2. Build log message according to the API message format. Message is empty
        # Expected result: the single empty message of type 'string' is built
        expected_result = ""
        actual_result = APIMessageBuilder.build('LOG')
        assert actual_result == expected_result

    def test_3_build_new_task_message(self):
        # Test #3. Build task message according to the API message format. The 'result' is an empty list
        # Expected result: the task message of type 'list' is built. It contains the "configuration-result" pair as a dictionary.
        expected_result = [{'configurations': [2200.0, 8], 'results': None}]
        configurations = [[2200.0, 8]]
        actual_result = APIMessageBuilder.build('NEW',
                                                configurations=configurations,
                                                results=[None])
        assert actual_result == expected_result

    def test_4_build_task_message_with_result(self):
        # Test #4. Build task message according to the API message format. Use different supported message types
        # Expected result: the task message of type 'list' is built. It contains the "configuration-result" pair as a dictionary.
        expected_result = [{'configurations': [2200.0, 8], 'results': 123}]
        configurations = [[2200.0, 8]]
        results = [123]
        message_types = ['DEFAULT', 'PREDICTIONS', 'FINAL']
        for message_type in message_types:
            actual_result = APIMessageBuilder.build(
                message_type, configurations=configurations, results=results)
            assert actual_result == expected_result

    def test_5_build_invalid_task_message(self):
        # Test #5. Build task message with the invalid values according to the API message format
        # Expected result: error is raised, that invalid parameters are passed
        expected_result = (
            "Invalid parameters passed to send message via API: "
            "The configurations(key parameter) are not provided or have invalid format!"
        )
        with pytest.raises(KeyError) as excinfo:
            APIMessageBuilder.build('NEW',
                                    configurations="Invalid string",
                                    results="Invalid string")
        assert expected_result in str(excinfo.value)

    def test_6_build_incomplete_task_message(self):
        # Test #6. Build task message with incomplete parameters (without 'results')
        # Expected result: error is raised
        configurations = [[2200.0, 8]]
        expected_result = (
            "Invalid parameters passed to send message via API: "
            "The results(key parameter) are not provided or have invalid format!"
        )
        with pytest.raises(KeyError) as excinfo:
            APIMessageBuilder.build('NEW', configurations=configurations)
        assert expected_result in str(excinfo.value)

    def test_7_build_inconsistent_task_message(self):
        # Test #7. Build task message with inconsistent dimensionality of 'configurations' and 'results'
        # Expected result: error is raised
        configurations = [[2200.0, 8]]
        results = [123, 234]
        expected_result = "Different sizes of provided parameters!"
        with pytest.raises(KeyError) as excinfo:
            APIMessageBuilder.build('NEW',
                                    configurations=configurations,
                                    results=results)
        assert expected_result in str(excinfo.value)

    def test_8_build_experiment_message(self):
        # Test #8. Build experiment message according to the API message format
        # Expected result: the message of type 'dictionary' is built and contains all needed fields
        from tools.initial_config import load_experiment_setup
        experiment_description, searchspace_description = \
            load_experiment_setup("./Resources/EnergyExperiment/EnergyExperiment.json")
        global_config = experiment_description["General"]
        actual_result = APIMessageBuilder.build(
            'EXPERIMENT',
            global_config=global_config,
            experiment_description=experiment_description,
            searchspace_description=searchspace_description)
        assert actual_result["global_configuration"]
        assert actual_result["experiment_description"]
        assert actual_result["searchspace_description"]

    def test_9_build_incomplete_experiment_message(self):
        # Test #9. Build experiment message from incomplete parameters
        # Expected result: error is raised
        from tools.initial_config import load_experiment_setup
        expected_result = "Invalid parameters passed to send message via API: The search space description is not provided!"
        experiment_description, _ = load_experiment_setup(
            "./Resources/EnergyExperiment/EnergyExperiment.json")
        global_config = experiment_description["General"]
        with pytest.raises(KeyError) as excinfo:
            APIMessageBuilder.build(
                'EXPERIMENT',
                global_config=global_config,
                experiment_description=experiment_description)
        assert expected_result in str(excinfo.value)

    def test_10_send_valid_message(self):
        # Test #10. Send messages of all supported types and subtypes via the dummy API
        # Expected result: no errors are arised, sending is simulated
        expected_type = "log"
        expected_payload = "This is msg"
        msg = "This is msg"
        type_subtype = {}
        type_subtype["LOG"] = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
        for msg_type in type_subtype:
            for msg_subtype in type_subtype[msg_type]:
                expected_subtype = msg_subtype.lower()
                actual_result = TestFrontApi.api.send(msg_type,
                                                      msg_subtype,
                                                      message=msg)
                assert actual_result[0][0] == expected_type
                assert actual_result[0][1] == expected_subtype
                assert actual_result[0][2] == expected_payload

    def test_11_send_unknown_type_of_msg(self, caplog):
        # Test #11. Try to send unknown (unsupported) type of message
        # Expected result: error is raised, that message type is not supported
        expected_result = "Message type is not supported!"
        msg = "This is msg"
        TestFrontApi.api.send('DUMMY', 'INFO', message=msg)
        for record in caplog.records:
            assert record.levelname == "ERROR"
            assert expected_result in str(record)

    def test_12_send_unknown_subtype_of_msg(self, caplog):
        # Test #12. Try to send unknown (unsupported) subtype of message
        # Expected result: error is raised, that message subtype is not supported
        expected_result = "Message subtype is not supported!"
        msg = "This is msg"
        msg_types = [
            "LOG", "NEW", "DEFAULT", "PREDICTIONS", "FINAL", "EXPERIMENT"
        ]
        for msg_type in msg_types:
            TestFrontApi.api.send(msg_type, 'dummy', message=msg)
            for record in caplog.records:
                assert record.levelname == "ERROR"
                assert expected_result in str(record)

    def test_13_send_invalid_format_of_message(self, caplog):
        # Test #13. Try to send supported type of message, but specified in unexpected format
        # Expected result: error is raised
        expected_result = "Wrong API message type object!"
        msg = "This is msg"
        TestFrontApi.api.send(['LOG'], 'dummy', message=msg)
        for record in caplog.records:
            assert record.levelname == "ERROR"
            assert expected_result in str(record)

    def test_14_singleton_front_api(self):
        # Test #14. Try to create 2 instances of the API class
        # Expected result: only a single instance exists (due to the singleton)
        API._instance = None
        api1 = API()
        api2 = API()
        assert api1 is api2

    def test_15_singleton_front_api_different_objects(self):
        # Test #15. Try to create 2 instances of the API class, initialized with different api objects
        # Expected result: only a single instance exists (due to the singleton)
        API._instance = None
        api1 = API()
        api2 = API(api_object=RabbitApi("event-service", 49153))
        assert api1 is api2

    def test_16_api_without_emit(self):
        # Test #16. Try to create an instance of the API class, with api object without emit() method
        # Expected result: AttributeError is thrown, informing about the requirements to the api object
        class BadAPI:
            def no_emit(self):
                pass

        expected_result = "Provided API object doesn't contain 'emit()' method"
        # cleanup
        API._instance = None
        with pytest.raises(AttributeError) as excinfo:
            API(BadAPI())
        assert expected_result in str(excinfo.value)

    def test_17_api_with_wrong_emit_parameter_types(self):
        # Test #17. Try to create an instance of the API class, with wrong api object emit() parameters' types
        # Expected result: AttributeError is thrown, informing about the requirements to the api object
        class BadAPI:
            def emit(self, message_type: dict, message_subtype: dict,
                     message: dict):
                pass

        expected_result = (
            "Provided API object has unsupported 'emit()' method."
            "Its parameters do not correspond to the required!"
            "Expected parameters are: 'message_type: str', 'message_subtype: str', 'message: str'"
        )
        # cleanup
        API._instance = None
        with pytest.raises(AttributeError) as excinfo:
            API(BadAPI())
        assert expected_result in str(excinfo.value)

    def test_18_api_with_unexpected_emit_parameter_names(self, caplog):
        # Test #18. Try to create an instance of the API class, with wrong api object emit() parameters' names
        # Expected result: object is created, but user is warned about the advisable parameters' names
        import logging
        caplog.set_level(logging.WARNING)

        class BadAPI:
            def emit(self, dummy: str, message_subtype: str, message: str):
                pass

        expected_result = (
            "Parameter names of the emit() method are untypical for your API object."
            "It is advisable to check emit() parameters."
            "Expected parameters are: 'message_type', 'message_subtype', 'message'"
        )
        # cleanup
        API._instance = None
        API(BadAPI())
        for record in caplog.records:
            assert record.levelname == "WARNING"
            assert expected_result in str(record)

    def test_19_api_with_missing_emit_parameters(self):
        # Test #19. Try to create an instance of the API class, with missing api object emit() parameters
        # Expected result: AttributeError is thrown, informing about the requirements to the api object
        class BadAPI:
            def emit(self, message_type: str, message_subtype: str):
                pass

        expected_result = (
            "Provided API object has unsupported 'emit()' method."
            "Its parameters do not correspond to the required!"
            "Expected parameters are: 'message_type: str', 'message_subtype: str', 'message: str'"
        )
        # cleanup
        API._instance = None
        with pytest.raises(AttributeError) as excinfo:
            API(BadAPI())
        assert expected_result in str(excinfo.value)

    def test_20_correct_api_object(self):
        # Test #20. Try to create an instance of the API class, with expected api object
        # Expected result: an instance is created
        class GoodAPI:
            def emit(self, message_type: str, message_subtype: str,
                     message: str):
                pass

        API._instance = None
        test_api = API(GoodAPI())
        assert isinstance(test_api, API)