def __init__(self, data, config, params, *args, **kwargs): """ """ self._random = np.random self._random_p = random self._ratings = self._data.train_dict self._sampler = sp.Sampler(self._data.sp_i_train) self._iteration = 0 if self._batch_size < 1: self._batch_size = self._num_users ###################################### self._params_list = [ ("_intermediate_dim", "intermediate_dim", "intermediate_dim", 600, None, None), ("_latent_dim", "latent_dim", "latent_dim", 200, None, None), ("_lambda", "reg_lambda", "reg_lambda", 0.01, None, None), ("_learning_rate", "lr", "lr", 0.001, None, None), ("_dropout_rate", "dropout_pkeep", "dropout_pkeep", 1, None, None), ] self.autoset_params() self._dropout_rate = 1. - self._dropout_rate self._model = DenoisingAutoEncoder(self._num_items, self._intermediate_dim, self._latent_dim, self._learning_rate, self._dropout_rate, self._lambda)
def __init__(self, data, config, params, *args, **kwargs): """ AutoRec: Autoencoders Meet Collaborative Filtering Link: https://users.cecs.anu.edu.au/~akmenon/papers/autorec/autorec-paper.pdf Args: data: config: params: *args: **kwargs: """ self._random = np.random self._params_list = [ ("_lr", "lr", "lr", 0.0001, None, None), ("_hidden_neuron", "hidden_neuron", "hidden_neuron", 500, None, None), ("_l_w", "l_w", "l_w", 0.001, None, None), ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._data.sp_u_train = self._data.sp_i_train.transpose( ) # transpose the Matrix self._sampler = sp.Sampler(self._data.sp_u_train) self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._model = ItemAutoRecModel(self._data, self._num_users, self._num_items, self._lr, self._hidden_neuron, self._l_w)
def __init__(self, data, config, params, *args, **kwargs): """ """ self._ratings = self._data.train_dict self._sampler = sp.Sampler(self._data.sp_i_train) if self._batch_size < 1: self._batch_size = self._num_users ###################################### self._params_list = [ ("_intermediate_dim", "intermediate_dim", "intermediate_dim", 600, int, None), ("_latent_dim", "latent_dim", "latent_dim", 200, int, None), ("_lambda", "reg_lambda", "reg_lambda", 0.01, None, None), ("_learning_rate", "lr", "lr", 0.001, None, None), ("_dropout_rate", "dropout_pkeep", "dropout_pkeep", 1, None, None), ("_seed", "seed", "seed", 42, None, None) ] self.autoset_params() np.random.seed(self._seed) random.seed(self._seed) self._random = np.random self._random_p = random self._dropout_rate = 1. - self._dropout_rate self._model = VariationalAutoEncoder(self._num_items, self._intermediate_dim, self._latent_dim, self._learning_rate, self._dropout_rate, self._lambda) # the total number of gradient updates for annealing self._total_anneal_steps = 200000 # largest annealing parameter self._anneal_cap = 0.2