Exemple #1
0
    def load_model(self, folder_path, file_name = None, is_earlystopping_format = False):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Loading model from file '{}'".format(folder_path + file_name))

        if not is_earlystopping_format:
            shutil.unpack_archive(folder_path + file_name + ".zip",
                                  folder_path + file_name + "/",
                                  "zip")

        dataIO = DataIO(folder_path=folder_path + file_name + "/")
        data_dict = dataIO.load_data(file_name="fit_attributes")

        for attrib_name in data_dict.keys():
             self.__setattr__(attrib_name, data_dict[attrib_name])

        tf.compat.v1.reset_default_graph()

        q_dims = self.p_dims[::-1]

        self.vae = _MultVAE_original(self.p_dims, q_dims=q_dims, lr=self.learning_rate, lam=self.l2_reg, random_seed=98765)
        self.saver, self.logits_var, self.loss_var, self.train_op_var, self.merged_var = self.vae.build_graph()

        self.sess = tf.compat.v1.Session()
        self.sess.run(tf.compat.v1.global_variables_initializer())

        self.saver.restore(self.sess, folder_path + file_name + "/session")

        # self.summary_writer = tf.compat.v1.summary.FileWriter(self.log_dir, graph=tf.compat.v1.get_default_graph())

        shutil.rmtree(folder_path + file_name + "/", ignore_errors=True)

        self._print("Loading complete")
Exemple #2
0
    def save_model(self, folder_path, file_name=None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Saving model in file '{}'".format(folder_path +
                                                       file_name))

        data_dict_to_save = {
            "item_embeddings": self.lightFM_model.item_embeddings,
            "item_embedding_gradients":
            self.lightFM_model.item_embedding_gradients,
            "item_embedding_momentum":
            self.lightFM_model.item_embedding_momentum,
            "item_biases": self.lightFM_model.item_biases,
            "item_bias_gradients": self.lightFM_model.item_bias_gradients,
            "item_bias_momentum": self.lightFM_model.item_bias_momentum,
            "user_embeddings": self.lightFM_model.user_embeddings,
            "user_embedding_gradients":
            self.lightFM_model.user_embedding_gradients,
            "user_embedding_momentum":
            self.lightFM_model.user_embedding_momentum,
            "user_biases": self.lightFM_model.user_biases,
            "user_bias_gradients": self.lightFM_model.user_bias_gradients,
            "user_bias_momentum": self.lightFM_model.user_bias_momentum,
        }

        dataIO = DataIO(folder_path=folder_path)
        dataIO.save_data(file_name=file_name,
                         data_dict_to_save=data_dict_to_save)

        self._print("Saving complete")
Exemple #3
0
    def load_data(self, save_folder_path):

        dataIO = DataIO(folder_path=save_folder_path)

        global_attributes_dict = dataIO.load_data(
            file_name="dataset_global_attributes")

        for attrib_name, attrib_object in global_attributes_dict.items():
            self.__setattr__(attrib_name, attrib_object)

        self.AVAILABLE_URM = dataIO.load_data(file_name="dataset_URM")

        if self._HAS_ICM:
            self.AVAILABLE_ICM = dataIO.load_data(file_name="dataset_ICM")
            self.AVAILABLE_ICM_feature_mapper = dataIO.load_data(
                file_name="dataset_ICM_mappers")

        if self._HAS_UCM:
            self.AVAILABLE_UCM = dataIO.load_data(file_name="dataset_UCM")
            self.AVAILABLE_UCM_feature_mapper = dataIO.load_data(
                file_name="dataset_UCM_mappers")

        if self._HAS_additional_mapper:
            self.dataset_additional_mappers = dataIO.load_data(
                file_name="dataset_additional_mappers")
    def _set_search_attributes(self, recommender_input_args,
                               recommender_input_args_last_test,
                               hyperparameter_names, metric_to_optimize,
                               cutoff_to_optimize, output_folder_path,
                               output_file_name_root, resume_from_saved,
                               save_metadata, save_model, evaluate_on_test,
                               n_cases, terminate_on_memory_error):

        if save_model not in self._SAVE_MODEL_VALUES:
            raise ValueError(
                "{}: argument save_model must be in '{}', provided was '{}'.".
                format(self.ALGORITHM_NAME, self._SAVE_MODEL_VALUES,
                       save_model))

        if evaluate_on_test not in self._EVALUATE_ON_TEST_VALUES:
            raise ValueError(
                "{}: argument evaluate_on_test must be in '{}', provided was '{}'."
                .format(self.ALGORITHM_NAME, self._EVALUATE_ON_TEST_VALUES,
                        evaluate_on_test))

        self.output_folder_path = output_folder_path
        self.output_file_name_root = output_file_name_root

        # If directory does not exist, create
        if not os.path.exists(self.output_folder_path):
            os.makedirs(self.output_folder_path)

        self.log_file = open(
            self.output_folder_path + self.output_file_name_root +
            "_{}.txt".format(self.ALGORITHM_NAME), "a")

        if save_model == "last" and recommender_input_args_last_test is None:
            self._write_log(
                "{}: argument save_model is 'last' but no recommender_input_args_last_test provided, saving best model on train data alone."
                .format(self.ALGORITHM_NAME))
            save_model = "best"

        self.recommender_input_args = recommender_input_args
        self.recommender_input_args_last_test = recommender_input_args_last_test
        self.metric_to_optimize = metric_to_optimize
        self.cutoff_to_optimize = cutoff_to_optimize
        self.resume_from_saved = resume_from_saved
        self.terminate_on_memory_error = terminate_on_memory_error
        self.save_metadata = save_metadata
        self.save_model = save_model
        self.evaluate_on_test = "no" if self.evaluator_test is None else evaluate_on_test

        self.model_counter = 0
        self.n_cases = n_cases
        self._init_metadata_dict(n_cases=n_cases,
                                 hyperparameter_names=hyperparameter_names)

        if self.save_metadata:
            self.dataIO = DataIO(folder_path=self.output_folder_path)
Exemple #5
0
    def save_model(self, folder_path, file_name = None, is_earlystopping_format = False):

        #https://cv-tricks.com/tensorflow-tutorial/save-restore-tensorflow-models-quick-complete-tutorial/

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Saving model in file '{}'".format(folder_path + file_name))

        # Save session within a temp folder called as the desired file_name
        saver = tf.compat.v1.train.Saver()
        saver.save(self.sess, folder_path + file_name + "/session")

        data_dict_to_save = {
            "batch_size": self.batch_size,
            "dropout": self.dropout,
            "learning_rate": self.learning_rate,
            "l2_reg": self.l2_reg,
            "total_anneal_steps": self.total_anneal_steps,
            "anneal_cap": self.anneal_cap,
            "update_count": self.update_count,
            "p_dims": self.p_dims,
            "batches_per_epoch": self.batches_per_epoch,
            # "log_dir": self.log_dir,
            # "chkpt_dir": self.chkpt_dir,
        }

        dataIO = DataIO(folder_path=folder_path + file_name + "/")
        dataIO.save_data(file_name="fit_attributes", data_dict_to_save = data_dict_to_save)

        # Create a zip folder containing fit_attributes and saved session
        if not is_earlystopping_format:
            # Unfortunately I cannot avoid compression so it is too slow for earlystopping
            shutil.make_archive(
              folder_path + file_name,          # name of the file to create
              'zip',                            # archive format - or tar, bztar, gztar
              root_dir = folder_path + file_name + "/",     # root for archive
              base_dir = None)                  # start archiving from the root_dir

            shutil.rmtree(folder_path + file_name + "/", ignore_errors=True)
        #
        # else:
        #
        #     with zipfile.ZipFile(folder_path + file_name + ".zip", 'w', compression=zipfile.ZIP_STORED) as myzip:
        #         for file_to_compress in os.listdir(folder_path + file_name + "/"):
        #             myzip.write(folder_path + file_name + "/" + file_to_compress, arcname = file_to_compress)
        #



        self._print("Saving complete")
    def load_model(self, folder_path, file_name=None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Loading model from file '{}'".format(folder_path +
                                                          file_name))

        dataIO = DataIO(folder_path=folder_path)
        data_dict = dataIO.load_data(file_name=file_name)

        for attrib_name in data_dict.keys():
            self.__setattr__(attrib_name, data_dict[attrib_name])

        self._print("Loading complete")
    def save_model(self, folder_path, file_name=None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Saving model in file '{}'".format(folder_path +
                                                       file_name))

        data_dict_to_save = {"item_pop": self.item_pop}

        dataIO = DataIO(folder_path=folder_path)
        dataIO.save_data(file_name=file_name,
                         data_dict_to_save=data_dict_to_save)

        self._print("Saving complete")
    def save_model(self, folder_path, file_name=None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        print("{}: Saving model in file '{}'".format(self.RECOMMENDER_NAME,
                                                     folder_path + file_name))

        data_dict_to_save = {
            "D_best": self.D_best,
            "topK": self.topK,
            "W_sparse": self.W_sparse
        }

        dataIO = DataIO(folder_path=folder_path)
        dataIO.save_data(file_name=file_name,
                         data_dict_to_save=data_dict_to_save)

        print("{}: Saving complete".format(self.RECOMMENDER_NAME))
Exemple #9
0
    def _load_previously_built_split_and_attributes(self, save_folder_path):
        """
        Loads all URM and ICM
        :return:
        """

        if self.allow_cold_users:
            allow_cold_users_suffix = "allow_cold_users"
        else:
            allow_cold_users_suffix = "only_warm_users"

        if self.user_wise:
            user_wise_string = "user_wise"
        else:
            user_wise_string = "global_sample"

        name_suffix = "_{}_{}".format(allow_cold_users_suffix,
                                      user_wise_string)

        dataIO = DataIO(folder_path=save_folder_path)

        split_parameters_dict = dataIO.load_data(file_name="split_parameters" +
                                                 name_suffix)

        for attrib_name in split_parameters_dict.keys():
            self.__setattr__(attrib_name, split_parameters_dict[attrib_name])

        self.SPLIT_GLOBAL_MAPPER_DICT = dataIO.load_data(
            file_name="split_mappers" + name_suffix)

        self.SPLIT_URM_DICT = dataIO.load_data(file_name="split_URM" +
                                               name_suffix)

        if len(self.dataReader_object.get_loaded_ICM_names()) > 0:
            self.SPLIT_ICM_DICT = dataIO.load_data(file_name="split_ICM" +
                                                   name_suffix)

            self.SPLIT_ICM_MAPPER_DICT = dataIO.load_data(
                file_name="split_ICM_mappers" + name_suffix)

        if len(self.dataReader_object.get_loaded_UCM_names()) > 0:
            self.SPLIT_UCM_DICT = dataIO.load_data(file_name="split_UCM" +
                                                   name_suffix)

            self.SPLIT_UCM_MAPPER_DICT = dataIO.load_data(
                file_name="split_UCM_mappers" + name_suffix)
    def save_model(self, folder_path, file_name = None):

        if file_name is None:
            file_name = self.RECOMMENDER_NAME

        self._print("Saving model in file '{}'".format(folder_path + file_name))

        data_dict_to_save = {"USER_factors": self.USER_factors,
                              "ITEM_factors": self.ITEM_factors,
                              "use_bias": self.use_bias,
                            }

        if self.use_bias:
            data_dict_to_save["ITEM_bias"] = self.ITEM_bias
            data_dict_to_save["USER_bias"] = self.USER_bias
            data_dict_to_save["GLOBAL_bias"] = self.GLOBAL_bias

        dataIO = DataIO(folder_path=folder_path)
        dataIO.save_data(file_name=file_name, data_dict_to_save = data_dict_to_save)


        self._print("Saving complete")
    def save_data(self, save_folder_path):

        dataIO = DataIO(folder_path = save_folder_path)

        global_attributes_dict = {
            "item_original_ID_to_index": self.item_original_ID_to_index,
            "user_original_ID_to_index": self.user_original_ID_to_index,
            "DATASET_NAME": self.DATASET_NAME,
            "_IS_IMPLICIT": self._IS_IMPLICIT,
            "_HAS_ICM": self._HAS_ICM,
            "_HAS_UCM": self._HAS_UCM,
            "_HAS_additional_mapper": self._HAS_additional_mapper
        }

        dataIO.save_data(data_dict_to_save = global_attributes_dict,
                         file_name = "dataset_global_attributes")

        dataIO.save_data(data_dict_to_save = self.AVAILABLE_URM,
                         file_name = "dataset_URM")

        if self._HAS_ICM:
            dataIO.save_data(data_dict_to_save = self.AVAILABLE_ICM,
                             file_name = "dataset_ICM")

            dataIO.save_data(data_dict_to_save = self.AVAILABLE_ICM_feature_mapper,
                             file_name = "dataset_ICM_mappers")

        if self._HAS_UCM:
            dataIO.save_data(data_dict_to_save = self.AVAILABLE_UCM,
                             file_name = "dataset_UCM")

            dataIO.save_data(data_dict_to_save = self.AVAILABLE_UCM_feature_mapper,
                             file_name = "dataset_UCM_mappers")

        if self._HAS_additional_mapper:
            dataIO.save_data(data_dict_to_save = self.additional_data_mapper,
                             file_name = "dataset_additional_mappers")
Exemple #12
0
    def _save_split(self, save_folder_path):

        if save_folder_path:

            if self.allow_cold_users:
                allow_cold_users_suffix = "allow_cold_users"

            else:
                allow_cold_users_suffix = "only_warm_users"

            if self.user_wise:
                user_wise_string = "user_wise"
            else:
                user_wise_string = "global_sample"

            name_suffix = "_{}_{}".format(allow_cold_users_suffix,
                                          user_wise_string)

            split_parameters_dict = {
                "input_split_interaction_quota_list":
                self.input_split_interaction_quota_list,
                "actual_split_interaction_quota_list":
                self.actual_split_interaction_quota_list,
                "allow_cold_users": self.allow_cold_users
            }

            dataIO = DataIO(folder_path=save_folder_path)

            dataIO.save_data(data_dict_to_save=split_parameters_dict,
                             file_name="split_parameters" + name_suffix)

            dataIO.save_data(data_dict_to_save=self.SPLIT_GLOBAL_MAPPER_DICT,
                             file_name="split_mappers" + name_suffix)

            dataIO.save_data(data_dict_to_save=self.SPLIT_URM_DICT,
                             file_name="split_URM" + name_suffix)

            if len(self.SPLIT_ICM_DICT) > 0:
                dataIO.save_data(data_dict_to_save=self.SPLIT_ICM_DICT,
                                 file_name="split_ICM" + name_suffix)

                dataIO.save_data(data_dict_to_save=self.SPLIT_ICM_MAPPER_DICT,
                                 file_name="split_ICM_mappers" + name_suffix)

            if len(self.SPLIT_UCM_DICT) > 0:
                dataIO.save_data(data_dict_to_save=self.SPLIT_UCM_DICT,
                                 file_name="split_UCM" + name_suffix)

                dataIO.save_data(data_dict_to_save=self.SPLIT_UCM_MAPPER_DICT,
                                 file_name="split_UCM_mappers" + name_suffix)
    def _save_split(self, save_folder_path):

        if save_folder_path:

            if self.allow_cold_users:
                allow_cold_users_suffix = "allow_cold_users"

            else:
                allow_cold_users_suffix = "only_warm_users"

            if self.use_validation_set:
                validation_set_suffix = "use_validation_set"
            else:
                validation_set_suffix = "no_validation_set"

            name_suffix = "_{}_{}".format(allow_cold_users_suffix,
                                          validation_set_suffix)

            split_parameters_dict = {
                "k_out_value": self.k_out_value,
                "allow_cold_users": self.allow_cold_users,
                "removed_cold_users": self.removed_cold_users,
            }

            dataIO = DataIO(folder_path=save_folder_path)

            dataIO.save_data(data_dict_to_save=split_parameters_dict,
                             file_name="split_parameters" + name_suffix)

            dataIO.save_data(data_dict_to_save=self.SPLIT_GLOBAL_MAPPER_DICT,
                             file_name="split_mappers" + name_suffix)

            dataIO.save_data(data_dict_to_save=self.SPLIT_URM_DICT,
                             file_name="split_URM" + name_suffix)

            if len(self.SPLIT_ICM_DICT) > 0:
                dataIO.save_data(data_dict_to_save=self.SPLIT_ICM_DICT,
                                 file_name="split_ICM" + name_suffix)

                dataIO.save_data(data_dict_to_save=self.SPLIT_ICM_MAPPER_DICT,
                                 file_name="split_ICM_mappers" + name_suffix)

            if len(self.SPLIT_UCM_DICT) > 0:
                dataIO.save_data(data_dict_to_save=self.SPLIT_UCM_DICT,
                                 file_name="split_UCM" + name_suffix)

                dataIO.save_data(data_dict_to_save=self.SPLIT_UCM_MAPPER_DICT,
                                 file_name="split_UCM_mappers" + name_suffix)
class SearchAbstractClass(object):

    ALGORITHM_NAME = "SearchAbstractClass"

    # Available values for the save_model attribute
    _SAVE_MODEL_VALUES = ["all", "best", "last", "no"]

    # Available values for the evaluate_on_test attribute
    _EVALUATE_ON_TEST_VALUES = ["all", "best", "last", "no"]

    # Value to be assigned to invalid configuration or if an Exception is raised
    INVALID_CONFIG_VALUE = np.finfo(np.float16).max

    def __init__(self,
                 recommender_class,
                 evaluator_validation=None,
                 evaluator_test=None,
                 verbose=True):

        super(SearchAbstractClass, self).__init__()

        self.recommender_class = recommender_class
        self.verbose = verbose
        self.log_file = None
        self.evaluator_validation = evaluator_validation

        if evaluator_test is None:
            self.evaluator_test = None
        else:
            self.evaluator_test = evaluator_test

    def search(
        self,
        recommender_input_args,
        hyperparameter_search_space,
        metric_to_optimize="MAP",
        cutoff_to_optimize=None,
        n_cases=None,
        output_folder_path=None,
        output_file_name_root=None,
        parallelize=False,
        save_model="best",
        evaluate_on_test="best",
        save_metadata=True,
        terminate_on_memory_error=True,
    ):

        raise NotImplementedError(
            "Function search not implemented for this class")

    def _was_already_evaluated_check(self, current_fit_hyperparameters_dict):
        """
        Check if the current hyperparameter configuration was already evaluated
        :param current_fit_hyperparameters_dict:
        :return:
        """

        raise NotImplementedError(
            "Function search not implemented for this class")

    def _set_search_attributes(self, recommender_input_args,
                               recommender_input_args_last_test,
                               hyperparameter_names, metric_to_optimize,
                               cutoff_to_optimize, output_folder_path,
                               output_file_name_root, resume_from_saved,
                               save_metadata, save_model, evaluate_on_test,
                               n_cases, terminate_on_memory_error):

        if save_model not in self._SAVE_MODEL_VALUES:
            raise ValueError(
                "{}: argument save_model must be in '{}', provided was '{}'.".
                format(self.ALGORITHM_NAME, self._SAVE_MODEL_VALUES,
                       save_model))

        if evaluate_on_test not in self._EVALUATE_ON_TEST_VALUES:
            raise ValueError(
                "{}: argument evaluate_on_test must be in '{}', provided was '{}'."
                .format(self.ALGORITHM_NAME, self._EVALUATE_ON_TEST_VALUES,
                        evaluate_on_test))

        self.output_folder_path = output_folder_path
        self.output_file_name_root = output_file_name_root

        # If directory does not exist, create
        if not os.path.exists(self.output_folder_path):
            os.makedirs(self.output_folder_path)

        self.log_file = open(
            self.output_folder_path + self.output_file_name_root +
            "_{}.txt".format(self.ALGORITHM_NAME), "a")

        if save_model == "last" and recommender_input_args_last_test is None:
            self._write_log(
                "{}: argument save_model is 'last' but no recommender_input_args_last_test provided, saving best model on train data alone."
                .format(self.ALGORITHM_NAME))
            save_model = "best"

        self.recommender_input_args = recommender_input_args
        self.recommender_input_args_last_test = recommender_input_args_last_test
        self.metric_to_optimize = metric_to_optimize
        self.cutoff_to_optimize = cutoff_to_optimize
        self.resume_from_saved = resume_from_saved
        self.terminate_on_memory_error = terminate_on_memory_error
        self.save_metadata = save_metadata
        self.save_model = save_model
        self.evaluate_on_test = "no" if self.evaluator_test is None else evaluate_on_test

        self.model_counter = 0
        self.n_cases = n_cases
        self._init_metadata_dict(n_cases=n_cases,
                                 hyperparameter_names=hyperparameter_names)

        if self.save_metadata:
            self.dataIO = DataIO(folder_path=self.output_folder_path)

    def _init_metadata_dict(self, n_cases, hyperparameter_names):

        self.metadata_dict = {
            "algorithm_name_search":
            self.ALGORITHM_NAME,
            "algorithm_name_recommender":
            self.recommender_class.RECOMMENDER_NAME,
            "metric_to_optimize":
            self.metric_to_optimize,
            "cutoff_to_optimize":
            self.cutoff_to_optimize,
            "exception_list": [None] * n_cases,
            "hyperparameters_df":
            pd.DataFrame(columns=hyperparameter_names,
                         index=np.arange(n_cases),
                         dtype=object),
            "hyperparameters_best":
            None,
            "hyperparameters_best_index":
            None,
            "result_on_validation_df":
            None,
            "result_on_validation_best":
            None,
            "result_on_test_df":
            None,
            "result_on_test_best":
            None,
            "time_df":
            pd.DataFrame(columns=["train", "validation", "test"],
                         index=np.arange(n_cases)),
            "time_on_train_total":
            0.0,
            "time_on_train_avg":
            0.0,
            "time_on_validation_total":
            0.0,
            "time_on_validation_avg":
            0.0,
            "time_on_test_total":
            0.0,
            "time_on_test_avg":
            0.0,
            "result_on_last":
            None,
            "time_on_last_df":
            pd.DataFrame(columns=["train", "test"], index=[0]),
        }

    def _print(self, string):
        if self.verbose:
            print(string)

    def _write_log(self, string):

        self._print(string)

        if self.log_file is not None:
            self.log_file.write(string)
            self.log_file.flush()

    def _fit_model(self, current_fit_hyperparameters):

        start_time = time.time()

        # Construct a new recommender instance
        recommender_instance = self.recommender_class(
            *self.recommender_input_args.CONSTRUCTOR_POSITIONAL_ARGS,
            **self.recommender_input_args.CONSTRUCTOR_KEYWORD_ARGS)

        recommender_instance.fit(
            *self.recommender_input_args.FIT_POSITIONAL_ARGS,
            **self.recommender_input_args.FIT_KEYWORD_ARGS,
            **self.recommender_input_args.EARLYSTOPPING_KEYWORD_ARGS,
            **current_fit_hyperparameters)

        train_time = time.time() - start_time

        return recommender_instance, train_time

    def _evaluate_on_validation(self, current_fit_hyperparameters,
                                was_already_evaluated_flag,
                                was_already_evaluated_index):
        """
        Fit and evaluate model with the given hyperparameter configuration on the validation set, or
        load previously explored configuration
        :param current_fit_hyperparameters:
        :param was_already_evaluated_flag:
        :param was_already_evaluated_index:
        :return:
        """

        if not was_already_evaluated_flag:
            # Add hyperparameter values into dataframe iteratively because the simple solution
            # hyperparameters_df.loc[self.model_counter] = current_fit_hyperparameters
            # would (sometimes?) automatically convert integers to floats, which is undesirable (e.g., for the topK value)
            # This occurs when the dictionary contains only numerical data (int, floats) but not when it contains also strings or booleans
            for key in current_fit_hyperparameters.keys():
                self.metadata_dict["hyperparameters_df"].loc[
                    self.model_counter, key] = current_fit_hyperparameters[key]

            recommender_instance, train_time = self._fit_model(
                current_fit_hyperparameters)
            start_time = time.time()

            # Evaluate recommender and get results for the first cutoff
            result_df, _ = self.evaluator_validation.evaluateRecommender(
                recommender_instance)

            evaluation_time = time.time() - start_time

            # If the recommender uses Earlystopping, get the selected number of epochs instead of the maximum
            if isinstance(recommender_instance,
                          Incremental_Training_Early_Stopping):
                for epoch_key, epoch_value in recommender_instance.get_early_stopping_final_epochs_dict(
                ).items():
                    self.metadata_dict["hyperparameters_df"].loc[
                        self.model_counter, epoch_key] = int(epoch_value)

        else:
            # If it was already evaluated load the data
            recommender_instance = None

            self.metadata_dict["hyperparameters_df"].loc[
                self.
                model_counter] = self.metadata_dict["hyperparameters_df"].loc[
                    was_already_evaluated_index].copy()
            result_df = self.metadata_dict["result_on_validation_df"].loc[
                was_already_evaluated_index].copy()
            train_time = self.metadata_dict["time_df"].loc[
                was_already_evaluated_index, "train"]
            evaluation_time = self.metadata_dict["time_df"].loc[
                was_already_evaluated_index, "validation"]

        if self.metadata_dict["result_on_validation_df"] is None:
            # The dataframe will have the case number and cutoff as index, the metric name as column
            self.metadata_dict[
                "result_on_validation_df"] = create_result_multiindex_dataframe(
                    self.n_cases, result_df)

        add_result_to_multiindex_dataframe(
            self.metadata_dict["result_on_validation_df"], result_df,
            self.model_counter)

        self.metadata_dict["time_df"].loc[self.model_counter,
                                          "train"] = train_time
        self.metadata_dict["time_df"].loc[self.model_counter,
                                          "validation"] = evaluation_time

        self.metadata_dict["time_on_train_avg"] = self.metadata_dict[
            "time_df"]["train"].mean(axis=0, skipna=True)
        self.metadata_dict["time_on_train_total"] = self.metadata_dict[
            "time_df"]["train"].sum(axis=0, skipna=True)
        self.metadata_dict["time_on_validation_avg"] = self.metadata_dict[
            "time_df"]["validation"].mean(axis=0, skipna=True)
        self.metadata_dict["time_on_validation_total"] = self.metadata_dict[
            "time_df"]["validation"].sum(axis=0, skipna=True)

        return result_df, recommender_instance

    def _evaluate_on_test(self,
                          recommender_instance,
                          current_fit_hyperparameters_dict,
                          was_already_evaluated_flag,
                          was_already_evaluated_index,
                          print_log=True):

        if was_already_evaluated_flag:
            result_df_test = self.metadata_dict['result_on_test_df'].loc[
                was_already_evaluated_index].copy()
            evaluation_test_time = self.metadata_dict["time_df"].loc[
                was_already_evaluated_index, "test"]

        else:
            start_time = time.time()
            result_df_test, _ = self.evaluator_test.evaluateRecommender(
                recommender_instance)
            evaluation_test_time = time.time() - start_time

        result_string = get_result_string_df(result_df_test)

        if print_log:
            self._write_log(
                "{}: Config evaluated with evaluator_test. Config: {} - results:\n{}\n"
                .format(self.ALGORITHM_NAME, current_fit_hyperparameters_dict,
                        result_string))

        if self.metadata_dict["result_on_test_df"] is None:
            # The dataframe will have the case number and cutoff as index, the metric name as column
            self.metadata_dict[
                "result_on_test_df"] = create_result_multiindex_dataframe(
                    self.n_cases, result_df_test)

        add_result_to_multiindex_dataframe(
            self.metadata_dict["result_on_test_df"], result_df_test,
            self.model_counter)

        self.metadata_dict["time_df"].loc[self.model_counter,
                                          "test"] = evaluation_test_time
        self.metadata_dict["time_on_test_avg"] = self.metadata_dict["time_df"][
            "test"].mean(axis=0, skipna=True)
        self.metadata_dict["time_on_test_total"] = self.metadata_dict[
            "time_df"]["test"].sum(axis=0, skipna=True)

        return result_df_test

    def _evaluate_on_test_with_data_last(self):

        start_time = time.time()

        # Construct a new recommender instance
        recommender_instance = self.recommender_class(
            *self.recommender_input_args_last_test.CONSTRUCTOR_POSITIONAL_ARGS,
            **self.recommender_input_args_last_test.CONSTRUCTOR_KEYWORD_ARGS)

        # Check if last was already evaluated
        if self.resume_from_saved and self.metadata_dict[
                "result_on_last"] is not None:
            self._print(
                "{}: Resuming '{}'... Result on last already available.".
                format(self.ALGORITHM_NAME, self.output_file_name_root))
            return

        self._print(
            "{}: Evaluation with constructor data for final test. Using best config: {}"
            .format(self.ALGORITHM_NAME,
                    self.metadata_dict["hyperparameters_best"]))

        # Use the hyperparameters that have been saved
        assert self.metadata_dict[
            "hyperparameters_best"] is not None, "{}: Best hyperparameters not available, the search might have failed.".format(
                self.ALGORITHM_NAME)
        hyperparameters_best_args = self.metadata_dict[
            "hyperparameters_best"].copy()

        recommender_instance.fit(
            *self.recommender_input_args_last_test.FIT_POSITIONAL_ARGS,
            **self.recommender_input_args.FIT_KEYWORD_ARGS,
            **hyperparameters_best_args)

        train_time = time.time() - start_time
        self.metadata_dict["time_on_last_df"].loc[0, "train"] = train_time

        if self.evaluate_on_test in ["all", "best", "last"]:
            start_time = time.time()
            result_df_test, _ = self.evaluator_test.evaluateRecommender(
                recommender_instance)
            evaluation_test_time = time.time() - start_time

            self._write_log(
                "{}: Best config evaluated with evaluator_test with constructor data for final test. Config: {} - results:\n{}\n"
                .format(self.ALGORITHM_NAME,
                        self.metadata_dict["hyperparameters_best"],
                        get_result_string_df(result_df_test)))
            self.metadata_dict["result_on_last"] = result_df_test
            self.metadata_dict["time_on_last_df"].loc[
                0, "test"] = evaluation_test_time

        if self.save_metadata:
            self.dataIO.save_data(data_dict_to_save=self.metadata_dict.copy(),
                                  file_name=self.output_file_name_root +
                                  "_metadata")

        if self.save_model in ["all", "best", "last"]:
            self._print("{}: Saving model in {}\n".format(
                self.ALGORITHM_NAME,
                self.output_folder_path + self.output_file_name_root))
            recommender_instance.save_model(
                self.output_folder_path,
                file_name=self.output_file_name_root + "_best_model_last")

    def _objective_function(self, current_fit_hyperparameters_dict):

        try:
            self._print("{}: Testing config: {}".format(
                self.ALGORITHM_NAME, current_fit_hyperparameters_dict))

            was_already_evaluated_flag, was_already_evaluated_index = self._was_already_evaluated_check(
                current_fit_hyperparameters_dict)
            result_df, recommender_instance = self._evaluate_on_validation(
                current_fit_hyperparameters_dict, was_already_evaluated_flag,
                was_already_evaluated_index)

            result_series = result_df.loc[
                self.metadata_dict["cutoff_to_optimize"]]
            current_result = -result_series[self.metric_to_optimize]

            current_fit_hyperparameters_dict = self.metadata_dict[
                "hyperparameters_df"].loc[self.model_counter].to_dict()

            # Save current model if "all" is chosen
            if self.save_model in ["all"] and not was_already_evaluated_flag:
                self._print("{}: Saving model in {}\n".format(
                    self.ALGORITHM_NAME,
                    self.output_folder_path + self.output_file_name_root))
                recommender_instance.save_model(
                    self.output_folder_path,
                    file_name=self.output_file_name_root +
                    "_model_{}".format(self.model_counter))

            # Check if this is a new best hyperparameter configuration
            if self.metadata_dict["result_on_validation_best"] is None:
                new_best_config_found = True
            else:
                best_solution_val = self.metadata_dict[
                    "result_on_validation_best"][self.metric_to_optimize]
                new_best_config_found = best_solution_val < result_series[
                    self.metric_to_optimize]

            if new_best_config_found:
                self._write_log(
                    "{}: New best config found. Config {}: {} - results: {}\n".
                    format(
                        self.ALGORITHM_NAME, self.model_counter,
                        current_fit_hyperparameters_dict,
                        get_result_string_prettyprint(result_series,
                                                      n_decimals=7)))

                if self.evaluate_on_test in ["all", "best"]:
                    result_df_test = self._evaluate_on_test(
                        recommender_instance,
                        current_fit_hyperparameters_dict,
                        was_already_evaluated_flag,
                        was_already_evaluated_index,
                        print_log=True)

            else:

                # Config is either suboptimal or was already explored previously
                self._write_log(
                    "{}: Config {} {}. Config: {} - results: {}\n".format(
                        self.ALGORITHM_NAME, self.model_counter,
                        "is suboptimal" if not was_already_evaluated_flag else
                        "was already explored at index {}".format(
                            was_already_evaluated_index),
                        current_fit_hyperparameters_dict,
                        get_result_string_prettyprint(result_series,
                                                      n_decimals=7)))

                if self.evaluate_on_test in ["all"]:
                    result_df_test = self._evaluate_on_test(
                        recommender_instance,
                        current_fit_hyperparameters_dict,
                        was_already_evaluated_flag,
                        was_already_evaluated_index,
                        print_log=True)

            if current_result >= self.INVALID_CONFIG_VALUE:
                self._write_log(
                    "{}: WARNING! Config {} returned a value equal or worse than the default value to be assigned to invalid configurations."
                    " If no better valid configuration is found, this hyperparameter search may produce an invalid result.\n"
                )

            if new_best_config_found:
                self.metadata_dict[
                    "hyperparameters_best"] = current_fit_hyperparameters_dict.copy(
                    )
                self.metadata_dict[
                    "hyperparameters_best_index"] = self.model_counter
                self.metadata_dict[
                    "result_on_validation_best"] = result_series.to_dict()

                if self.evaluate_on_test in ["all", "best"]:
                    self.metadata_dict[
                        "result_on_test_best"] = result_df_test.copy()

                # Clean any previous data about the "last"
                # If the search has been extended then the "last" is recomputed only if a better solution is found
                self.metadata_dict["result_on_last"] = None
                self.metadata_dict["time_on_last_df"] = pd.DataFrame(
                    columns=["train", "test"], index=[0])

                # Save best model if "all" and "best" are chosen
                if self.save_model in ["all", "best"]:
                    self._print("{}: Saving model in {}\n".format(
                        self.ALGORITHM_NAME,
                        self.output_folder_path + self.output_file_name_root))
                    recommender_instance.save_model(
                        self.output_folder_path,
                        file_name=self.output_file_name_root + "_best_model")

        except (KeyboardInterrupt, SystemExit) as e:
            # If getting a interrupt, terminate without saving the exception
            raise e

        # Catch exception only if terminate_on_memory_error is True
        except (_ArrayMemoryError,
                MemoryError) if self.terminate_on_memory_error else (
                    NeverMatch) as e:
            self._print(
                "{}: Search for '{}' interrupted due to MemoryError.".format(
                    self.ALGORITHM_NAME,
                    self.metadata_dict["algorithm_name_recommender"]))
            return

        except:
            # Catch any error: Exception, Tensorflow errors etc...
            traceback_string = traceback.format_exc()
            self._write_log(
                "{}: Config {} Exception. Config: {} - Exception: {}\n".format(
                    self.ALGORITHM_NAME, self.model_counter,
                    current_fit_hyperparameters_dict, traceback_string))

            self.metadata_dict["exception_list"][
                self.model_counter] = traceback_string

            # Assign to this configuration the worst possible score
            # Being a minimization problem, set it to the max value of a float
            current_result = +self.INVALID_CONFIG_VALUE
            traceback.print_exc()

        if self.save_metadata:
            self.dataIO.save_data(data_dict_to_save=self.metadata_dict.copy(),
                                  file_name=self.output_file_name_root +
                                  "_metadata")

        self.model_counter += 1

        return current_result