def __init__(self, sess, dataset, conf): super(DeepICF, self).__init__(dataset, conf) self.pretrain_file = conf["pretrain_file"] self.verbose = conf["verbose"] self.batch_size = conf["batch_size"] self.use_batch_norm = conf["batch_norm"] self.num_epochs = conf["epochs"] self.weight_size = conf["weight_size"] self.embedding_size = conf["embedding_size"] self.n_hidden = conf["layers"] regs = conf["regs"] self.reg_W = conf["regw"] self.lambda_bilinear = regs[0] self.gamma_bilinear = regs[1] self.eta_bilinear = regs[2] self.alpha = conf["alpha"] self.beta = conf["beta"] self.num_negatives = conf["num_neg"] self.learning_rate = conf["learning_rate"] self.activation = conf["activation"] self.algorithm = conf["algorithm"] self.learner = conf["learner"] self.embed_init_method = conf["embed_init_method"] self.weight_init_method = conf["weight_init_method"] self.bias_init_method = conf["bias_init_method"] self.stddev = conf["stddev"] self.dataset = dataset self.num_items = dataset.num_items self.num_users = dataset.num_users self.train_dict = csr_to_user_dict(dataset.train_matrix) self.sess = sess
def get_user_train_dict(self, by_time=False): if by_time: train_dict = csr_to_user_dict_bytime(self.time_matrix, self.train_matrix) else: train_dict = csr_to_user_dict(self.train_matrix) return train_dict
def __init__(self, sess, dataset, conf): super(NAIS, self).__init__(dataset, conf) self.pretrain = conf["pretrain"] self.verbose = conf["verbose"] self.batch_size = conf["batch_size"] self.num_epochs = conf["epochs"] self.weight_size = conf["weight_size"] self.embedding_size = conf["embedding_size"] self.data_alpha = conf["data_alpha"] self.regs = conf["regs"] self.is_pairwise = conf["is_pairwise"] self.topK = conf["topk"] self.lambda_bilinear = self.regs[0] self.gamma_bilinear = self.regs[1] self.eta_bilinear = self.regs[2] self.alpha = conf["alpha"] self.beta = conf["beta"] self.num_negatives = conf["num_neg"] self.learning_rate = conf["learning_rate"] self.activation = conf["activation"] self.loss_function = conf["loss_function"] self.algorithm = conf["algorithm"] self.learner = conf["learner"] self.embed_init_method = conf["embed_init_method"] self.weight_init_method = conf["weight_init_method"] self.stddev = conf["stddev"] self.pretrain_file = conf["pretrain_file"] self.dataset = dataset self.num_items = dataset.num_items self.num_users = dataset.num_users self.train_dict = csr_to_user_dict(self.dataset.train_matrix) self.sess = sess
def __init__(self, sess, dataset, conf): super(DiffNet, self).__init__(dataset, conf) self.learning_rate = conf["learning_rate"] self.embedding_size = conf["embedding_size"] self.learner = conf["learner"] self.loss_function = conf["loss_function"] self.num_epochs = conf["epochs"] self.reg_mf = conf["reg_mf"] self.batch_size = conf["batch_size"] self.num_negatives = conf["num_negatives"] self.user_feature_file = conf["user_feature_file"] self.item_feature_file = conf["item_feature_file"] self.feature_dimension = conf["feature_dimension"] self.init_method = conf["init_method"] self.stddev = conf["stddev"] self.verbose = conf["verbose"] self.num_users = dataset.num_users self.num_items = dataset.num_items self.userids = dataset.userids self.itemids = dataset.itemids self.dataset = dataset self.trainMatrix = dataset.trainMatrix self.trainDict = csr_to_user_dict(self.trainMatrix) self.social_matrix = self.social_matrix + self.social_matrix.transpose() self.consumed_items_sparse_matrix, self.social_neighbors_sparse_matrix = self.input_supply() self.sess = sess
def __init__(self, sess, dataset, conf): super(DAE, self).__init__(dataset, conf) self.hidden_neuron = conf["hidden_neuron"] self.learning_rate = conf["learning_rate"] self.learner = conf["learner"] self.reg = conf["reg"] self.num_epochs = conf["epochs"] self.batch_size = conf["batch_size"] self.verbose = conf["verbose"] self.h_act = conf["h_act"] self.g_act = conf["g_act"] self.corruption_level = conf["corruption_level"] self.init_method = conf["init_method"] self.stddev = conf["stddev"] self.num_users = dataset.num_users self.num_items = dataset.num_items self.dataset = dataset self.train_dict = csr_to_user_dict(dataset.train_matrix) self.sess = sess
def __init__(self, sess, dataset, conf): super(MultiDAE, self).__init__(dataset, conf) self.learning_rate = conf["learning_rate"] self.learner = conf["learner"] self.batch_size = conf["batch_size"] self.dataset = dataset self.num_users = dataset.num_users self.num_items = dataset.num_items self.p_dims = conf["p_dim"] + [self.num_items] self.q_dims = self.p_dims[::-1] self.dims = self.q_dims + self.p_dims[1:] self.act = conf["activation"] self.reg = conf["reg"] self.num_epochs = conf["epochs"] self.weight_init_method = conf["weight_init_method"] self.bias_init_method = conf["bias_init_method"] self.stddev = conf["stddev"] self.verbose = conf["verbose"] self.train_dict = csr_to_user_dict(dataset.train_matrix) self.sess = sess
def __init__(self, sess, dataset, conf): super(SBPR, self).__init__(dataset, conf) self.learning_rate = conf["learning_rate"] self.embedding_size = conf["embedding_size"] self.learner = conf["learner"] self.loss_function = conf["loss_function"] self.num_epochs = conf["num_epochs"] self.reg_mf = conf["reg_mf"] self.batch_size = conf["batch_size"] self.init_method = conf["init_method"] self.stddev = conf["stddev"] self.verbose = conf["verbose"] self.dataset = dataset self.num_users = dataset.num_users self.num_items = dataset.num_items self.userids = self.dataset.userids self.train_dict = csr_to_user_dict(self.dataset.train_matrix) # self.social_matrix = self._get_social_matrix() self.userSocialItemsSetList = self._get_SocialItemsSet() self.sess = sess
def __init__(self, sess, dataset, conf): super(FISM, self).__init__(dataset, conf) self.batch_size = conf["batch_size"] self.num_epochs = conf["epochs"] self.embedding_size = conf["embedding_size"] self.lambda_bilinear = conf["lambda"] self.gamma_bilinear = conf["gamma"] self.alpha = conf["alpha"] self.num_negatives = conf["num_neg"] self.learning_rate = conf["learning_rate"] self.learner = conf["learner"] self.topK = conf["topk"] self.loss_function = conf["loss_function"] self.is_pairwise = conf["is_pairwise"] self.num_negatives = conf["num_neg"] self.init_method = conf["init_method"] self.stddev = conf["stddev"] self.verbose = conf["verbose"] self.num_users = dataset.num_users self.num_items = dataset.num_items self.dataset = dataset self.train_dict = csr_to_user_dict(self.dataset.train_matrix) self.sess = sess
def get_user_test_neg_dict(self): test_neg_dict = None if self.negative_matrix is not None: test_neg_dict = csr_to_user_dict(self.negative_matrix) return test_neg_dict
def get_user_test_dict(self): test_dict = csr_to_user_dict(self.test_matrix) return test_dict
tf.set_random_seed(2017) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' if __name__ == "__main__": is_windows = sys.platform.startswith('win') if is_windows: root_folder = 'XXXXXX/PythonProjects/SGL/' else: root_folder = 'XXXXXX/PythonProjects/SGL/' conf = Configurator(root_folder + "NeuRec.properties", default_section="hyperparameters") dataset = Dataset(conf) num_users = dataset.num_users num_items = dataset.num_items train_dict = tool.csr_to_user_dict(dataset.train_matrix) test_dict = tool.csr_to_user_dict(dataset.test_matrix) num_trainings = dataset.train_matrix.nnz count = 0 while count < num_trainings * conf.ratio: u_id = np.random.randint(num_users) i_id = np.random.randint(num_items) if i_id not in train_dict[u_id]: if u_id not in test_dict: train_dict[u_id].append(i_id) count += 1 else: if i_id not in test_dict[u_id]: train_dict[u_id].append(i_id) count += 1 with open(
def get_user_val_neg_dict(self): val_neg_dict = None if self.negative_matrix is not None: val_neg_dict = csr_to_user_dict(self.negative_matrix) return val_neg_dict