def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._sampler = pws.Sampler(self._data.i_train_dict) self._params_list = [ ("_learning_rate", "lr", "lr", 0.001, None, None), ("_mf_factors", "mf_factors", "mffactors", 10, int, None), ("_mlp_factors", "mlp_factors", "mlpfactors", 10, int, None), ("_mlp_hidden_size", "mlp_hidden_size", "mlpunits", "(64,32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")), ("_dropout", "dropout", "drop", 0, None, None), ("_is_mf_train", "is_mf_train", "mftrain", True, None, None), ("_is_mlp_train", "is_mlp_train", "mlptrain", True, None, None), ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._model = NeuralMatrixFactorizationModel( self._num_users, self._num_items, self._mf_factors, self._mlp_factors, self._mlp_hidden_size, self._dropout, self._is_mf_train, self._is_mlp_train, self._learning_rate)
def __init__(self, data, learning_rate=0.001, l_w=0, l_b=0, l_gan=0, num_users=100, num_items=100, name="CFGAN-DIS", **kwargs): super().__init__(name=name, **kwargs) tf.random.set_seed(42) self._learning_rate = learning_rate self._l_w = l_w self._l_b = l_b self._l_gan = l_gan self._num_items = num_items self._num_users = num_users self.data = data self.initializer = tf.initializers.GlorotUniform() self.sampler = pws.Sampler(self.data.i_train_dict) # Discriminator Model Parameters self.B = tf.Variable(tf.zeros(shape=[1]), name='B_dis', dtype=tf.float32) self.G = tf.Variable(self.initializer(shape=[self._num_items * 2, 1]), name='G_dis', dtype=tf.float32) self.optimizer = tf.optimizers.Adam(self._learning_rate)
def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._params_list = [ ("_factors", "factors", "factors", 100, None, None), ("_hidden_neurons", "hidden_neurons", "hidden_neurons", "(64,32)", lambda x: list(make_tuple(x)), lambda x: self._batch_remove(str(x), " []").replace(",", "-")), ("_hidden_activations", "hidden_activations", "hidden_activations", "('relu','relu')", lambda x: list(make_tuple(x)), lambda x: self._batch_remove(str(x), " []").replace(",", "-")), ("_learning_rate", "lr", "lr", 0.001, None, None), ("_l_w", "reg", "reg", 0.0001, None, None) ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._sampler = pws.Sampler(self._data.i_train_dict) self._model = DeepFMModel( self._num_users, self._num_items, self._factors, tuple( m for m in zip(self._hidden_neurons, self._hidden_activations)), self._l_w, self._learning_rate)
def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._params_list = [ ("_learning_rate", "lr", "lr", 0.001, None, None), ("_factors", "factors", "factors", 10, None, None), ("_l_w", "reg", "reg", 0.1, None, None), ("_alpha", "alpha", "alpha", 0.5, None, None), ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._sampler = pws.Sampler(self._data.i_train_dict) self._model = LogisticMatrixFactorizationModel(self._num_users, self._num_items, self._factors, self._l_w, self._alpha, self._learning_rate)
def __init__(self, data, config, params, *args, **kwargs): """ Create a IRGAN instance. (see https://arxiv.org/abs/1705.10513 for details about the algorithm design choices). Args: data: data loader object params: model parameters {embed_k: embedding size, [l_w, l_b]: regularization, lr: learning rate} """ self._random = np.random self._params_list = [ ("_predict_model", "predict_model", "predict_model", "generator", None, None), ("_factors", "factors", "factors", 10, None, None), ("_learning_rate", "lr", "lr", 0.001, None, None), ("_l_w", "l_w", "l_w", 0.1, None, None), ("_l_b", "l_b", "l_b", 0.001, None, None), ("_l_gan", "l_gan", "l_gan", 0.001, None, None), ("_g_epochs", "g_epochs", "g_epochs", 5, None, None), ("_d_epochs", "d_epochs", "d_epochs", 1, None, None), ("_g_pretrain_epochs", "g_pretrain_epochs", "g_pt_ep", 1, None, None), ("_d_pretrain_epochs", "d_pretrain_epochs", "d_pt_ep", 1, None, None), ("_sample_lambda", "sample_lambda", "sample_lambda", 0.2, None, None) ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions if self._predict_model not in ["generator", "discriminator"]: raise Exception( f"It is necessary to specify the model component to use as recommender (generator/discriminator)" ) self._ratings = self._data.train_dict self._sampler = pws.Sampler(self._data.i_train_dict) self._model = IRGAN_model(self._predict_model, self._data, self._batch_size, self._factors, self._learning_rate, self._l_w, self._l_b, self._l_gan, self._num_users, self._num_items, self._g_pretrain_epochs, self._d_pretrain_epochs, self._g_epochs, self._d_epochs, self._sample_lambda)
def __init__(self, data, config, params, *args, **kwargs): """ http://dm.postech.ac.kr/~cartopy/ConvMF/ConvMF_RecSys16_for_public.pdf Args: data: config: params: *args: **kwargs: """ self._random = np.random self._sampler = pws.Sampler(self._data.i_train_dict) self._params_list = [ ("_lr", "lr", "lr", 0.001, None, None), ("_embedding_size", "embedding_size", "embedding_size", 100, None, None), ("_cnn_channels", "cnn_channels", "cnn_channels", "(1, 32, 32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")), ("_cnn_kernels", "cnn_kernels", "cnn_kernels", "(2,2)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")), ("_cnn_strides", "cnn_strides", "cnn_strides", "(2,2)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")), ("_dropout_prob", "dropout_prob", "dropout_prob", 0, None, None), ("_l_w", "l_w", "l_w", 0.005, None, None), ("_l_b", "l_b", "l_b", 0.0005, None, None), ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._model = ConvMatrixFactorizationModel( self._num_users, self._num_items, self._embedding_size, self._lr, self._cnn_channels, self._cnn_kernels, self._cnn_strides, self._dropout_prob, self._l_w, self._l_b)
def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._params_list = [ ("_predict_model", "predict_model", "predict_model", "generator", None, None), ("_factors", "factors", "factors", 10, None, None), ("_learning_rate", "lr", "lr", 0.001, None, None), ("_l_w", "l_w", "l_w", 0.1, None, None), ("_l_b", "l_b", "l_b", 0.001, None, None), ("_l_gan", "l_gan", "l_gan", 0.001, None, None), ("_g_epochs", "g_epochs", "g_epochs", 5, None, None), ("_d_epochs", "d_epochs", "d_epochs", 1, None, None), ("_g_pretrain_epochs", "g_pretrain_epochs", "g_pt_ep", 10, None, None), ("_d_pretrain_epochs", "d_pretrain_epochs", "d_pt_ep", 10, None, None), ("_sample_lambda", "sample_lambda", "sample_lambda", 0.2, None, None) ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions if self._predict_model not in ["generator", "discriminator"]: raise Exception(f"It is necessary to specify the model component to use as recommender (generator/discriminator)") self._ratings = self._data.train_dict self._sampler = pws.Sampler(self._data.i_train_dict) self._model = IRGAN_model(self._predict_model, self._data, self._batch_size, self._factors, self._learning_rate, self._l_w, self._l_b, self._l_gan, self._num_users, self._num_items, self._g_pretrain_epochs, self._d_pretrain_epochs, self._g_epochs, self._d_epochs, self._sample_lambda)
def __init__(self, data, factors=200, learning_rate=0.001, l_w=0, l_b=0, l_gan=0, num_users=100, num_items=100, name="IRGAN-GEN", **kwargs): super().__init__(name=name, **kwargs) tf.random.set_seed(42) self._factors = factors self._learning_rate = learning_rate self._l_w = l_w self._l_b = l_b self._l_gan = l_gan self._num_items = num_items self._num_users = num_users self.data = data self.initializer = tf.random_uniform_initializer(minval=-0.05, maxval=0.05, seed=1234) self.sampler = pws.Sampler(self.data.i_train_dict) # Generator self.Bi = tf.Variable(tf.zeros(self._num_items), name='Bi_gen', dtype=tf.float32) self.Gu = tf.Variable( self.initializer(shape=[self._num_users, self._factors]), name='Gu_gen', dtype=tf.float32) self.Gi = tf.Variable( self.initializer(shape=[self._num_items, self._factors]), name='Gi_gen', dtype=tf.float32) self.optimizer = tf.optimizers.Adam(self._learning_rate)
def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._sampler = pws.Sampler(self._data.i_train_dict) self._params_list = [("_learning_rate", "lr", "lr", 0.001, None, None), ("_mf_factors", "mf_factors", "mffactors", 10, None, None), ("_is_edge_weight_train", "is_edge_weight_train", "isedgeweighttrain", True, None, None)] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._model = GeneralizedMatrixFactorizationModel( self._num_users, self._num_items, self._mf_factors, self._is_edge_weight_train, self._learning_rate)
def __init__(self, data, config, params, *args, **kwargs): self._random = np.random self._params_list = [ ("_factors", "factors", "factors", 10, None, None), ("_learning_rate", "lr", "lr", 0.001, None, None), ("_lambda_weights", "reg_w", "reg_w", 0.1, None, None), ("_lambda_bias", "reg_b", "reg_b", 0.001, None, None), ] self.autoset_params() if self._batch_size < 1: self._batch_size = self._data.transactions self._ratings = self._data.train_dict self._sp_i_train = self._data.sp_i_train self._i_items_set = list(range(self._num_items)) self._sampler = pws.Sampler(self._data.i_train_dict) self._model = FunkSVDModel(self._num_users, self._num_items, self._factors, self._lambda_weights, self._lambda_bias, self._learning_rate)