コード例 #1
0
    def shape(self):
        nb_users = global_parameters(self.sets_parameters)[0]
        nb_movies = global_parameters(self.sets_parameters)[1]

        if self.sets_parameters['learning_type'] == 'V':
            nb_users, nb_movies = permute(nb_users, nb_movies)

        shape = (nb_users, nb_movies)
        return shape
    def __init__(self, dataset, sets_parameters, stability_parameters):
        super().__init__(dataset)

        self.nb_users, self.nb_movies = global_parameters(
            sets_parameters=sets_parameters)[0:2]

        self.differences = stability_parameters['differences']
        self.probability = stability_parameters['probability']
        self.rmse = stability_parameters['rmse']

        print('check self.rmse different from zero:')
        print(self.rmse)

        self.subsets_number = stability_parameters['subsets_number']
        self.landa_array = stability_parameters['landa_array']

        assert np.size(self.landa_array) == self.subsets_number + 1

        self.coefficients = self.run()

        self.category_indices = {'user': 0, 'rmse': 1, 'coefficients': 2}
        self.category_matrix = {
            'user': self.ratings,
            'rmse': self.ratings,
            'coefficients': self.coefficients
        }
        self.category_permute = {
            'user': self.permute,
            'rmse': self.identity,
            'coefficients': self.permute
        }
コード例 #3
0
    def __init__(self, dataset, sets_parameters, stability_parameters):
        super().__init__(dataset)

        self.nb_users, self.nb_movies = global_parameters(sets_parameters=sets_parameters)[0:2]

        self.differences = stability_parameters['differences']
        self.probability = stability_parameters['probability']
        self.rmse = stability_parameters['rmse']
        print('check self.rmse different from zero:')
        print(self.rmse)
        self.subsets_number = stability_parameters['subsets_number']
        self.landa_array = stability_parameters['landa_array']

        print(self.landa_array)
        assert np.size(self.landa_array) == self.subsets_number + 1

        self.coefficients = self.run()

        self.category_indices = {'user': 0, 'rmse': 1, 'coefficients': 2}
        self.category_matrix = {'user': self.ratings,
                                'rmse': self.ratings,
                                'coefficients': self.coefficients}
        self.category_permute = {'user': self.permute,
                                 'rmse': self.identity,
                                 'coefficients': self.permute}
コード例 #4
0
 def __init__(self, sets_parameters, Train_set, batch_size, learning_rate0, learning_decay):
     self.nb_users, self.nb_movies = global_parameters(sets_parameters=sets_parameters)[0:2]
     self.Train_set = Train_set
     self.batch_size = batch_size
     self.learning_rate0 = learning_rate0
     self.learning_rate = learning_rate0
     self.learning_decay = learning_decay
コード例 #5
0
    def __init__(self, parameters, sets):

        self.database = parameters['sets']['database_id']
        self.hidden1_units = parameters['autoencoder']['hidden1_units']
        self.regularisation = parameters['autoencoder']['regularisation']
        self.learning_rate0 = parameters['autoencoder']['learning_rate0']
        self.learning_decay = parameters['autoencoder']['learning_decay']
        self.batch_size_evaluate = parameters['autoencoder']['batch_size_evaluate']
        self.batch_size_train = parameters['autoencoder']['batch_size_train']
        self.is_test = parameters['autoencoder']['is_test']
        self.nb_users, self.nb_movies = global_parameters(sets_parameters=parameters['sets'])[0:2]

        self.difference_matrix = 0
        self.rmse = 0
        self.rmse_train = 0

        self.epoch_steps = int(self.nb_users / self.batch_size_train)
        self.nb_steps = parameters['autoencoder']['nb_epoch'] * self.epoch_steps

        self.Train_set = Dataset(sets['autoencoder'][0])
        self.Validation_set = Dataset(sets['autoencoder'][1])
        self.Test_set = Dataset(sets['autoencoder'][2])

        self.Loss = Loss()

        self.Train = Train(sets_parameters=parameters['sets'],
                           Train_set=self.Train_set,
                           batch_size=self.batch_size_train,
                           learning_decay=self.learning_decay,
                           learning_rate0=self.learning_rate0)

        self.Evaluation = Evaluation(sets_parameters=parameters['sets'],
                                     batch_size_evaluate=self.batch_size_evaluate,
                                     Train_set=self.Train_set)
コード例 #6
0
    def __init__(self, sets_parameters):
        self.sets_parameters = sets_parameters
        self.validation_ratio = sets_parameters['validation_ratio']
        self.test_ratio = sets_parameters['test_ratio']
        self.nb_users, self.nb_movies = global_parameters(sets_parameters=sets_parameters)[0:2]
        self.database_id = sets_parameters['database_id']
        self.learning_type = sets_parameters['learning_type']

        self.train_val, self.test = self.first_split()
コード例 #7
0
    def __init__(self, factorisation_sets, factorisation_parameters, sets_parameters):

        if sets_parameters['learning_type'] == 'U':
            self.nb_users, self.nb_movies = global_parameters(sets_parameters=sets_parameters)[0:2]
        else:
            self.nb_movies, self.nb_users = global_parameters(sets_parameters=sets_parameters)[0:2]

        self.dimension = factorisation_parameters['dimension']
        self.iterations = factorisation_parameters['iterations']
        self.landa = factorisation_parameters['landa']

        self.train_set = factorisation_sets[0]
        self.TrainSet_movies = factorisation_sets[0].transpose(copy=True).tocsr()
        self.validation_set = factorisation_sets[1]

        self.R = np.empty((self.nb_users, self.nb_movies))
        self.U = np.random.rand(self.dimension, self.nb_users)
        self.V = np.random.rand(self.nb_movies, self.dimension)

        self.rmse, self.difference_matrix = self.run()
    def __init__(self, factorisation_sets, factorisation_parameters,
                 sets_parameters):

        if sets_parameters['learning_type'] == 'U':
            self.nb_users, self.nb_movies = global_parameters(
                sets_parameters=sets_parameters)[0:2]
        else:
            self.nb_movies, self.nb_users = global_parameters(
                sets_parameters=sets_parameters)[0:2]

        self.dimension = factorisation_parameters['dimension']
        self.iterations = factorisation_parameters['iterations']
        self.landa = factorisation_parameters['landa']

        self.train_set = factorisation_sets[0]
        self.TrainSet_movies = factorisation_sets[0].transpose(
            copy=True).tocsr()
        self.validation_set = factorisation_sets[1]

        self.R = np.empty((self.nb_users, self.nb_movies))
        self.U = np.random.rand(self.dimension, self.nb_users)
        self.V = np.random.rand(self.nb_movies, self.dimension)

        self.rmse, self.difference_matrix = self.run()
    def __init__(self, parameters, sets):

        self.database = parameters['sets']['database_id']
        self.hidden1_units = parameters['autoencoder']['hidden1_units']
        self.regularisation = parameters['autoencoder']['regularisation']
        self.learning_rate0 = parameters['autoencoder']['learning_rate0']
        self.learning_decay = parameters['autoencoder']['learning_decay']
        self.batch_size_evaluate = parameters['autoencoder'][
            'batch_size_evaluate']
        self.batch_size_train = parameters['autoencoder']['batch_size_train']
        self.is_test = parameters['autoencoder']['is_test']
        self.nb_users, self.nb_movies = global_parameters(
            sets_parameters=parameters['sets'])[0:2]

        self.difference_matrix = 0
        self.rmse = 0
        self.rmse_train = 0

        self.epoch_steps = int(self.nb_users / self.batch_size_train)
        self.nb_steps = parameters['autoencoder']['nb_epoch'] * self.epoch_steps

        self.Train_set = Dataset(sets['autoencoder'][0])
        self.Validation_set = Dataset(sets['autoencoder'][1])
        self.Test_set = Dataset(sets['autoencoder'][2])

        self.Loss = Loss()

        self.Train = Train(sets_parameters=parameters['sets'],
                           Train_set=self.Train_set,
                           batch_size=self.batch_size_train,
                           learning_decay=self.learning_decay,
                           learning_rate0=self.learning_rate0)

        self.Evaluation = Evaluation(
            sets_parameters=parameters['sets'],
            batch_size_evaluate=self.batch_size_evaluate,
            Train_set=self.Train_set)
コード例 #10
0
 def __init__(self, batch_size_evaluate, sets_parameters, Train_set):
     self.batch_size_evaluate = batch_size_evaluate
     self.Train_set = Train_set
     self.nb_users, self.nb_movies = global_parameters(sets_parameters=sets_parameters)[0:2]
コード例 #11
0
 def __init__(self, batch_size_evaluate, sets_parameters, Train_set):
     self.batch_size_evaluate = batch_size_evaluate
     self.Train_set = Train_set
     self.nb_users, self.nb_movies = global_parameters(sets_parameters=sets_parameters)[0:2]
コード例 #12
0
 def full_import(sets_parameters):
     data_file = global_parameters(sets_parameters)[3]
     database = np.genfromtxt(data_file, delimiter=',')[:, 0:3]
     database[:, 0:2] -= 1
     return database