Exemple #1
0
    def feature_training_folding(self,
                                 initialize_model=False,
                                 handle_bias=False,
                                 verbose=False):

        # Initialize the model with previous results if available
        if initialize_model:
            self.svd_v = np.zeros([self.dimensionality, self.nbr_users
                                   ]) + self.feature_init
            self.svd_u = np.zeros([self.dimensionality, self.nbr_items
                                   ]) + self.feature_init

        ratings_index, ratings = self.get_ratings(randomize_order=True)

        if handle_bias:

            estimator_loop_with_bias(self.min_epochs, self.max_epochs,
                                     self.min_improvement, self.dimensionality,
                                     self.feature_init, self.learning_rate,
                                     self.K, self.overall_bias, self.svd_u,
                                     self.svd_v, ratings_index, ratings,
                                     self.items_bias, self.users_bias,
                                     self.nbr_users, self.nbr_items,
                                     int(verbose))
        else:
            estimator_loop_without_bias(self.min_epochs, self.max_epochs,
                                        self.min_improvement,
                                        self.dimensionality, self.feature_init,
                                        self.learning_rate, self.K, self.svd_u,
                                        self.svd_v, ratings_index, ratings,
                                        self.nbr_users, self.nbr_items,
                                        int(verbose))

        self.compute_components_mean()
Exemple #2
0
    def feature_training(self,
                         initialize_model=True,
                         handle_bias=False,
                         verbose=False):
        '''
        Compute each features using a Gradient Descent approach. This method is the core of the recommender. Once we have
        the matrix containing the rating of the users, we run this method for training the recommendation engine. More precisely,
        this method use a Stochastic Gradient Descent (SGD) for determining the right model parameters. Those model parameters
        are then used by the predictor for computing the ratings for each users-items pair.
        
        The loop intensive part of the code is done in the estimator_loop_without_bias() function, which is a optimize with Cython.
        
            * initialize_model: If True, the model parameters are initialized to zero before the training. If False
              the model parameters already stored are used as the initial value for the training process. [True]
            * handle_bias: Handle the bias if True [False]
            * verbose: Print some info on the terminal if True [False]

        '''

        # Initialize the model with previous results if available
        if initialize_model:
            self.svd_v = np.zeros([self.dimensionality, self.nbr_users
                                   ]) + self.feature_init
            self.svd_u = np.zeros([self.dimensionality, self.nbr_items
                                   ]) + self.feature_init

        ratings_index, ratings = self.get_ratings(randomize_order=True)

        if handle_bias:
            #self.items_bias = np.zeros(self.nbr_items) + 0.1
            #self.users_bias = np.zeros(self.nbr_users) + 0.1
            self.compute_overall_avg()
            self.compute_items_bias_bk()
            self.compute_users_bias_bk()

            estimator_loop_with_bias(
                self.min_epochs, self.max_epochs, self.min_improvement,
                self.dimensionality, self.feature_init, self.learning_rate,
                self.learning_rate_users, self.learning_rate_items, self.K,
                self.overall_bias, self.svd_u, self.svd_v, ratings_index,
                ratings, self.items_bias, self.users_bias, self.nbr_users,
                self.nbr_items, int(verbose))
        else:
            estimator_loop_without_bias(self.min_epochs, self.max_epochs,
                                        self.min_improvement,
                                        self.dimensionality, self.feature_init,
                                        self.learning_rate, self.K, self.svd_u,
                                        self.svd_v, ratings_index, ratings,
                                        self.nbr_users, self.nbr_items,
                                        int(verbose))
Exemple #3
0
 def feature_training_folding(self, initialize_model = False, handle_bias = False, verbose = False):
     
     # Initialize the model with previous results if available
     if initialize_model:
         self.svd_v = np.zeros([self.dimensionality, self.nbr_users]) + self.feature_init
         self.svd_u = np.zeros([self.dimensionality, self.nbr_items]) + self.feature_init
     
     ratings_index, ratings = self.get_ratings(randomize_order = True)
     
     if handle_bias:
         
         estimator_loop_with_bias(self.min_epochs, self.max_epochs, self.min_improvement, self.dimensionality, self.feature_init, self.learning_rate,
                                  self.K, self.overall_bias, self.svd_u, self.svd_v, ratings_index, ratings, self.items_bias, self.users_bias, self.nbr_users,
                                  self.nbr_items, int(verbose))
     else:
         estimator_loop_without_bias(self.min_epochs, self.max_epochs, self.min_improvement, self.dimensionality, self.feature_init, self.learning_rate,
                                     self.K, self.svd_u, self.svd_v, ratings_index, ratings, self.nbr_users,
                                     self.nbr_items, int(verbose))
         
     self.compute_components_mean()
Exemple #4
0
    def feature_training(self, initialize_model = True, handle_bias = False, verbose = False):
        '''
        Compute each features using a Gradient Descent approach. This method is the core of the recommender. Once we have
        the matrix containing the rating of the users, we run this method for training the recommendation engine. More precisely,
        this method use a Stochastic Gradient Descent (SGD) for determining the right model parameters. Those model parameters
        are then used by the predictor for computing the ratings for each users-items pair.
        
        The loop intensive part of the code is done in the estimator_loop_without_bias() function, which is a optimize with Cython.
        
            * initialize_model: If True, the model parameters are initialized to zero before the training. If False
              the model parameters already stored are used as the initial value for the training process. [True]
            * handle_bias: Handle the bias if True [False]
            * verbose: Print some info on the terminal if True [False]

        '''
        
        # Initialize the model with previous results if available
        if initialize_model:
            self.svd_v = np.zeros([self.dimensionality, self.nbr_users]) + self.feature_init
            self.svd_u = np.zeros([self.dimensionality, self.nbr_items]) + self.feature_init
        
        ratings_index, ratings = self.get_ratings(randomize_order = True)
        
        if handle_bias:
            #self.items_bias = np.zeros(self.nbr_items) + 0.1
            #self.users_bias = np.zeros(self.nbr_users) + 0.1
            self.compute_overall_avg()
            self.compute_items_bias_bk()
            self.compute_users_bias_bk()

            estimator_loop_with_bias(self.min_epochs, self.max_epochs, self.min_improvement, self.dimensionality, self.feature_init, self.learning_rate,
                                     self.learning_rate_users, self.learning_rate_items, self.K, self.overall_bias, self.svd_u, self.svd_v, ratings_index,
                                     ratings, self.items_bias, self.users_bias, self.nbr_users, self.nbr_items, int(verbose))
        else:
            estimator_loop_without_bias(self.min_epochs, self.max_epochs, self.min_improvement, self.dimensionality, self.feature_init, self.learning_rate,
                                        self.K, self.svd_u, self.svd_v, ratings_index, ratings, self.nbr_users,
                                        self.nbr_items, int(verbose))