コード例 #1
0
    def _forward(self, item_embs, user_emb):  # (b,l,d), (b,d)
        gate = tf.sigmoid(
            self.feature_gate_item(item_embs) + tf.expand_dims(
                self.feature_gate_user(user_emb), axis=1))  # (b,l,d)

        self._reg_loss += l2_loss(*self.feature_gate_item.trainable_weights)
        self._reg_loss += l2_loss(*self.feature_gate_user.trainable_weights)

        # feature gating
        gated_item = tf.multiply(item_embs, gate)  # (b,l,d)

        # instance gating
        term1 = tf.matmul(gated_item,
                          tf.expand_dims(self.instance_gate_item,
                                         axis=0))  # (b,l,d)x(1,d,1)->(b,l,1)
        term2 = tf.matmul(user_emb,
                          self.instance_gate_user)  # (b,d)x(d,l)->(b,l)
        self._reg_loss += l2_loss(self.instance_gate_user,
                                  self.instance_gate_item)

        instance_score = tf.sigmoid(tf.squeeze(term1) + term2)  # (b,l)

        union_out = tf.multiply(gated_item,
                                tf.expand_dims(instance_score,
                                               axis=2))  # (b,l,d)
        union_out = tf.reduce_sum(union_out, axis=1)  # (b,d)
        instance_score = tf.reduce_sum(instance_score, axis=1, keep_dims=True)
        union_out = union_out / instance_score  # (b,d)
        return union_out  # (b,d)
コード例 #2
0
ファイル: CFGAN.py プロジェクト: zjfng1733/NeuRec-1
    def build_graph(self):
        self._create_layer()

        # generator
        self.condition = tf.placeholder(tf.float32, [None, self.num_items])
        self.g_zr_dims = tf.placeholder(tf.float32, [None, self.num_items])
        self.g_output = self.gen(self.condition)
        self.g_zr_loss = tf.reduce_mean(
            tf.reduce_sum(tf.square(self.g_output - 0) * self.g_zr_dims,
                          1,
                          keepdims=True))

        # discriminator
        self.mask = tf.placeholder(
            tf.float32, [None, self.num_items])  # purchased = 1, otherwise 0
        fake_data = self.g_output * self.mask
        fake_data = tf.concat([self.condition, fake_data], 1)

        self.real_data = tf.placeholder(tf.float32, [None, self.num_items])
        real_data = tf.concat([self.condition, self.real_data], 1)

        d_fake = self.dis(fake_data)
        d_real = self.dis(real_data)

        g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                   scope='gen')
        d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                   scope='dis')

        # define loss & optimizer for G
        g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_fake, labels=tf.ones_like(d_fake)))

        g_loss = g_loss + self.reg_G * l2_loss(*g_vars)
        g_loss = g_loss + self.ZR_coefficient * self.g_zr_loss

        if self.opt_g == 'sgd':
            self.trainer_g = tf.train.GradientDescentOptimizer(
                self.lr_G).minimize(g_loss, var_list=g_vars)
        elif self.opt_g == 'adam':
            self.trainer_g = tf.train.AdamOptimizer(self.lr_G).minimize(
                g_loss, var_list=g_vars)

        # define loss & optimizer for D
        d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_real, labels=tf.ones_like(d_real)))
        d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_fake, labels=tf.zeros_like(d_fake)))
        d_loss = d_loss_real + d_loss_fake + self.reg_D * l2_loss(*d_vars)

        if self.opt_d == 'sgd':
            self.trainer_d = tf.train.GradientDescentOptimizer(
                self.lr_D).minimize(d_loss, var_list=d_vars)
        elif self.opt_d == 'adam':
            self.trainer_d = tf.train.AdamOptimizer(self.lr_D).minimize(
                d_loss, var_list=d_vars)
コード例 #3
0
    def _create_loss(self):
        with tf.name_scope("loss"):
            self.loss = tf.losses.log_loss(self.labels, self.output) + \
                        self.lambda_bilinear * l2_loss(self.embedding_Q) + \
                        self.gamma_bilinear * l2_loss(self.embedding_Q_) + \
                        self.eta_bilinear * l2_loss(self.W)

            for i in range(min(len(self.n_hidden), len(self.reg_W))):
                if self.reg_W[i] > 0:
                    self.loss = self.loss + self.reg_W[i] * l2_loss(
                        self.weights['h%d' % i])
コード例 #4
0
 def _create_loss(self):
     with tf.name_scope("loss"):
         p1, q1, self.output = self._create_inference(self.item_input)
         if self.is_pairwise is True:
             _, q2, self.output_neg = self._create_inference(
                 self.item_input_neg)
             result = self.output - self.output_neg
             self.loss = learner.pairwise_loss(
                 self.loss_function,
                 result) + self.reg_mlp * l2_loss(p1, q2, q1)
         else:
             self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
                         self.reg_mlp * l2_loss(p1, q1)
コード例 #5
0
 def _create_loss(self):
     with tf.name_scope("loss"):
         # loss for L(Theta)
         p1, r1, q1, b1, self.output = self._create_inference(
             self.item_input)
         if self.is_pairwise is True:
             _, _, q2, b2, output_neg = self._create_inference(
                 self.item_input_neg)
             self.result = self.output - output_neg
             self.loss = learner.pairwise_loss(self.loss_function, self.result) + \
                         self.reg_mf * l2_loss(p1, r1, q2, q1, b1, b2, self.global_embedding)
         else:
             self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
                         self.reg_mf * l2_loss(p1, r1, q1, b1, self.global_embedding)
コード例 #6
0
 def _create_loss(self):
     with tf.name_scope("loss"):
         UI_u, IU_i, IL_i, LI_l, self.output = self._create_inference(
             self.item_input)
         if self.is_pairwise is True:
             _, IU_j, IL_j, _, output_neg = self._create_inference(
                 self.item_input_neg)
             self.result = self.output - output_neg
             self.loss = learner.pairwise_loss(self.loss_function, self.result) + \
                         self.reg_mf * l2_loss(UI_u, IU_i, IL_i, LI_l, IU_j, IL_j) + \
                         self.reg_w * l2_loss(self.W, self.h)
         else:
             self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
                         self.reg_mf * l2_loss(UI_u, IU_i, IL_i, LI_l)
コード例 #7
0
 def _create_loss(self):
     with tf.name_scope("loss"):
         p1, q1, self.output = self._create_inference(self.user_input, self.item_input, self.num_idx)
         if self.is_pairwise is True:
             _, q2, output_neg = self._create_inference(self.user_input_neg, self.item_input_neg, self.num_idx_neg)
             self.result = self.output - output_neg
             self.loss = learner.pairwise_loss(self.loss_function, self.result) + \
                         self.lambda_bilinear * l2_loss(p1) + \
                         self.gamma_bilinear * l2_loss(q2, q1)
         
         else:
             self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
                         self.lambda_bilinear * l2_loss(p1) + \
                         self.gamma_bilinear * l2_loss(q1)
コード例 #8
0
    def build_graph(self):
        self._create_placeholder()
        self._create_variable()

        item_embs = tf.nn.embedding_lookup(self.item_embeddings,
                                           self.item_seqs_ph)  # (b,l,d)
        user_emb = tf.nn.embedding_lookup(self.user_embeddings,
                                          self.user_ph)  # (b,d)
        self._reg_loss += l2_loss(item_embs, user_emb)

        union_out = self._forward(item_embs, user_emb)  # (b,d)
        train_ratings = self._train_rating(item_embs, user_emb,
                                           union_out)  # (b,2t)

        pos_ratings, neg_ratings = tf.split(train_ratings,
                                            [self.seq_T, self.seq_T],
                                            axis=1)
        loss = tf.reduce_sum(log_loss(pos_ratings - neg_ratings))

        final_loss = loss + self.reg * self._reg_loss

        train_opt = tf.train.AdamOptimizer(self.lr).minimize(final_loss)
        weights = self.feature_gate_item.trainable_weights + self.feature_gate_user.trainable_weights
        weights.extend([self.instance_gate_item, self.instance_gate_user])
        with tf.control_dependencies([train_opt]):
            self.train_opt = [
                tf.assign(weight, tf.clip_by_norm(weight, 1.0))
                for weight in weights
            ]

        # for testing
        self.bat_ratings = self._test_rating(item_embs, user_emb,
                                             union_out)  # (b,n)
コード例 #9
0
ファイル: NGCF.py プロジェクト: GuoTong96/LocalGCN
    def _create_loss(self):
        with tf.name_scope("loss"):

            self.pos_scores = tf.reduce_sum(tf.multiply(
                self.u_g_embeddings, self.pos_i_g_embeddings),
                                            axis=1)
            neg_scores = tf.reduce_sum(tf.multiply(self.u_g_embeddings,
                                                   self.neg_i_g_embeddings),
                                       axis=1)

            embedding_regularizer = l2_loss(self.u_g_embeddings,
                                            self.pos_i_g_embeddings,
                                            self.neg_i_g_embeddings)

            maxi = tf.nn.softplus(-(self.pos_scores - neg_scores))
            mf_loss = tf.reduce_sum(maxi)

            emb_loss = self.reg * embedding_regularizer

            #             for k in range(self.n_layers):
            #                 emb_loss = emb_loss + self.reg_w*(tf.reduce_sum(tf.square(self.weights['W_gc_%d' % k])) +
            #                                                   tf.reduce_sum(tf.square(self.weights['W_bi_%d' % k])) +
            #                                                   tf.reduce_sum(tf.square(self.weights['b_gc_%d' % k])) +
            #                                                   tf.reduce_sum(tf.square(self.weights['b_bi_%d' % k])))

            self.loss = mf_loss + emb_loss
コード例 #10
0
ファイル: GRU4Rec.py プロジェクト: zjfng1733/NeuRec-1
    def build_graph(self):
        self._create_variable()
        # get embedding and bias
        # b: batch size
        # l1: the dim of the first layer
        # ln: the dim of the last layer
        # size_y: the length of Y_ph, i.e., n_sample+batch_size

        cells = [tf.nn.rnn_cell.GRUCell(size, activation=self.hidden_act) for size in self.layers]
        drop_cell = [tf.nn.rnn_cell.DropoutWrapper(cell) for cell in cells]
        stacked_cell = tf.nn.rnn_cell.MultiRNNCell(drop_cell)
        inputs = tf.nn.embedding_lookup(self.input_embeddings, self.X_ph)  # (b, l1)
        outputs, state = stacked_cell(inputs, state=self.state_ph)
        self.u_emb = outputs  # outputs: (b, ln)
        self.final_state = state  # [(b, l1), (b, l2), ..., (b, ln)]

        # for training
        items_embed = tf.nn.embedding_lookup(self.item_embeddings, self.Y_ph)  # (size_y, ln)
        items_bias = tf.gather(self.item_biases, self.Y_ph)  # (size_y,)

        logits = tf.matmul(outputs, items_embed, transpose_b=True) + items_bias  # (b, size_y)
        logits = self.final_act(logits)

        loss = self.loss_fun(logits)

        # reg loss

        reg_loss = l2_loss(inputs, items_embed, items_bias)
        final_loss = loss + self.reg*reg_loss
        self.update_opt = tf.train.AdamOptimizer(self.lr).minimize(final_loss)
コード例 #11
0
ファイル: JCA.py プロジェクト: zjfng1733/NeuRec-1
    def _create_loss(self):
        with tf.name_scope("loss"):

            cost1 = tf.reduce_sum(self.pre_cost1)  # prediction squared error
            pre_cost2 = l2_loss(self.UW, self.UV, self.IW, self.IV, self.Ib1,
                                self.Ib2, self.Ub1, self.Ub2)
            cost2 = self.reg * 0.5 * pre_cost2  # regularization term

            self.cost = cost1 + cost2  # the loss function
コード例 #12
0
    def _create_loss(self):
        with tf.name_scope("loss"):
            
            self.loss = - tf.reduce_sum(self.input_R*tf.log(self.output) + (1 - self.input_R)*tf.log(1 - self.output))

            self.reg_loss = self.reg * l2_loss(self.weights['encoder'], self.weights['decoder'],
                                               self.biases['encoder'], self.biases['decoder'],
                                               self.user_latent)
            self.loss = self.loss + self.reg_loss
コード例 #13
0
ファイル: DiffNet.py プロジェクト: zjfng1733/NeuRec-1
 def _create_loss(self):
     with tf.name_scope("loss"):
         # loss for L(Theta)
         # reg = l2_regularizer(self.reg_mf)
         #
         # reg_var = apply_regularization(reg, self.user_embedding + self.item_embedding)
         
         self.loss = tf.losses.sigmoid_cross_entropy(self.labels, self.output) + \
         self.reg_mf*l2_loss(self.latest_user_latent, self.latest_item_latent)
コード例 #14
0
    def __init__(self, sess, input_size, total_pasts, num_components,
                 component_size, belief_depth):
        self.sess = sess
        self.input_initializer = tf.placeholder(tf.float32, [1, input_size])
        self.pasts_initializer = tf.placeholder(tf.float32,
                                                [total_pasts, input_size])

        self.input = tf.Variable(self.input_initializer,
                                 trainable=False,
                                 collections=[])
        self.pasts = tf.Variable(self.pasts_initializer,
                                 trainable=False,
                                 collections=[])

        self.generated_thoughts = tf.zeros([1, input_size], dtype=tf.float32)
        self.backward_thoughts = tf.zeros([1, input_size], dtype=tf.float32)
        self.improve_focus_operations = []
        self.memorize_operations = []
        self.reset_memory_operations = []
        self.reseed_memory_operations = []
        self.components = []

        self.components.append(
            temporal_lobe.Temporal_Component(component_size, input_size,
                                             total_pasts, 3, "C"))
        for i in xrange(1, num_components):
            self.components.append(
                lobe.Component(component_size, input_size, total_pasts,
                               belief_depth, "C" + str(i)))

        for i in xrange(num_components):
            u, v = self.components[i].build_graphs(
                self.input,
                tf.reshape(self.pasts, [-1, total_pasts * input_size]))
            self.generated_thoughts = self.generated_thoughts + u
            self.backward_thoughts = self.backward_thoughts + u
            self.improve_focus_operations.append(
                self.components[i].get_improve_focus_operation())
            self.memorize_operations.append(
                self.components[i].get_memorize_operation())
            self.reset_memory_operations.append(
                self.components[i].get_reset_memory_operation())
            self.reseed_memory_operations.append(
                self.components[i].get_reseed_memory_operation())

        variables = [
            var for var in tf.global_variables() if "content" in var.name
        ]
        # print [var.name for var in variables]
        self.learn_content_operation = util.l2_loss(self.backward_thoughts,
                                                    self.input,
                                                    variables,
                                                    rate=0.01)

        self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
コード例 #15
0
ファイル: IRGAN.py プロジェクト: zjfng1733/NeuRec-1
    def __init__(self, item_num, user_num, emb_dim, lamda, param=None, init_delta=0.05, learning_rate=0.05):
        self.itemNum = item_num
        self.userNum = user_num
        self.emb_dim = emb_dim
        self.lamda = lamda  # regularization parameters
        self.param = param
        self.init_delta = init_delta
        self.learning_rate = learning_rate
        self.d_params = []

        with tf.variable_scope('discriminator'):
            if self.param is None:
                self.user_embeddings = tf.Variable(
                    tf.random_uniform([self.userNum, self.emb_dim], minval=-self.init_delta, maxval=self.init_delta,
                                      dtype=tf.float32))
                self.item_embeddings = tf.Variable(
                    tf.random_uniform([self.itemNum, self.emb_dim], minval=-self.init_delta, maxval=self.init_delta,
                                      dtype=tf.float32))
                self.item_bias = tf.Variable(tf.zeros([self.itemNum]))
            else:
                self.user_embeddings = tf.Variable(self.param[0])
                self.item_embeddings = tf.Variable(self.param[1])
                self.item_bias = tf.Variable(self.param[2])

        self.d_params = [self.user_embeddings, self.item_embeddings, self.item_bias]

        # placeholder definition
        self.u = tf.placeholder(tf.int32)
        self.i = tf.placeholder(tf.int32)
        self.label = tf.placeholder(tf.float32)

        self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u)
        self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i)
        self.i_bias = tf.gather(self.item_bias, self.i)

        self.pre_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.i_embedding), 1) + self.i_bias
        self.pre_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.label, logits=self.pre_logits) + \
                        self.lamda * l2_loss(self.u_embedding, self.i_embedding, self.i_bias)

        d_opt = tf.train.GradientDescentOptimizer(self.learning_rate)
        self.d_updates = d_opt.minimize(self.pre_loss, var_list=self.d_params)

        self.reward_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.i_embedding),
                                           1) + self.i_bias
        self.reward = 2 * (tf.sigmoid(self.reward_logits) - 0.5)

        # for test stage, self.u: [batch_size]
        self.all_rating = tf.matmul(self.u_embedding, self.item_embeddings, transpose_b=True) + self.item_bias

        self.all_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
        self.NLL = -tf.reduce_mean(tf.log(
            tf.gather(tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, -1])), [-1]), self.i))
        )
        # for dns sample
        self.dns_rating = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
コード例 #16
0
ファイル: LightGCN.py プロジェクト: zjfng1733/NeuRec-1
    def create_bpr_loss(self, users, pos_items, neg_items):
        pos_scores = inner_product(users, pos_items)
        neg_scores = inner_product(users, neg_items)

        regularizer = l2_loss(self.u_g_embeddings_pre,
                              self.pos_i_g_embeddings_pre,
                              self.neg_i_g_embeddings_pre)

        mf_loss = tf.reduce_sum(log_loss(pos_scores - neg_scores))

        emb_loss = self.reg * regularizer

        return mf_loss, emb_loss
コード例 #17
0
 def _create_loss(self):
     with tf.name_scope("loss"):
         # loss for L(Theta)
         p1, q1, b1, self.output = self._create_inference(
             self.item_input_pos)
         _, q2, b2, output_social = self._create_inference(
             self.item_input_social)
         _, q3, b3, output_neg = self._create_inference(self.item_input_neg)
         result1 = tf.divide(self.output - output_social, self.suk)
         result2 = output_social - output_neg
         self.loss = learner.pairwise_loss(self.loss_function, result1) + \
                     learner.pairwise_loss(self.loss_function, result2) + \
                     self.reg_mf * l2_loss(p1, q2, q1, q3, b1, b2, b3)
コード例 #18
0
ファイル: ConvNCF.py プロジェクト: zjfng1733/NeuRec-1
    def _create_loss(self):
        with tf.name_scope("loss"):
            # BPR loss for L(Theta)
            self.p1, self.q1, self.output = self._create_inference(
                self.item_input_pos)
            self.p2, self.q2, self.output_neg = self._create_inference(
                self.item_input_neg)
            self.result = self.output - self.output_neg
            self.loss = learner.pairwise_loss(self.loss_function, self.result)

            self.opt_loss = self.loss + self.lambda_bilinear * l2_loss(self.p1, self.q2, self.q1) + \
                            self.gamma_bilinear * self._regular([(self.W, self.b)]) + \
                            self.lambda_weight * (self._regular(self.P) + self._regular([(self.W, self.b)]))
コード例 #19
0
    def _create_loss(self):
        with tf.name_scope("loss"):
            self.output = tf.reduce_sum(tf.multiply(self.u_embeddings,
                                                    self.pos_i_embeddings),
                                        axis=1)
            output_neg = tf.reduce_sum(tf.multiply(self.u_embeddings,
                                                   self.neg_j_embeddings),
                                       axis=1)
            regularizer = self.reg * l2_loss(self.u_embeddings,
                                             self.pos_i_embeddings,
                                             self.neg_j_embeddings)

            self.loss = learner.pairwise_loss(
                self.loss_function, self.output - output_neg) + regularizer
コード例 #20
0
ファイル: IRGAN.py プロジェクト: zjfng1733/NeuRec-1
    def __init__(self, item_num, user_num, emb_dim, lamda, param=None, init_delta=0.05, learning_rate=0.05):
        self.itemNum = item_num
        self.userNum = user_num
        self.emb_dim = emb_dim
        self.lamda = lamda  # regularization parameters
        self.param = param
        self.init_delta = init_delta
        self.learning_rate = learning_rate
        self.g_params = []

        with tf.variable_scope('generator'):
            if self.param is None:
                self.user_embeddings = tf.Variable(
                    tf.random_uniform([self.userNum, self.emb_dim], minval=-self.init_delta, maxval=self.init_delta,
                                      dtype=tf.float32))
                self.item_embeddings = tf.Variable(
                    tf.random_uniform([self.itemNum, self.emb_dim], minval=-self.init_delta, maxval=self.init_delta,
                                      dtype=tf.float32))
                self.item_bias = tf.Variable(tf.zeros([self.itemNum]))
            else:
                self.user_embeddings = tf.Variable(self.param[0])
                self.item_embeddings = tf.Variable(self.param[1])
                self.item_bias = tf.Variable(param[2])

            self.g_params = [self.user_embeddings, self.item_embeddings, self.item_bias]

        self.u = tf.placeholder(tf.int32)
        self.i = tf.placeholder(tf.int32)
        self.reward = tf.placeholder(tf.float32)

        self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u)
        self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i)
        self.i_bias = tf.gather(self.item_bias, self.i)

        self.all_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
        self.i_prob = tf.gather(
            tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, -1])), [-1]),
            self.i)

        self.gan_loss = -tf.reduce_mean(tf.log(self.i_prob) * self.reward) + \
                        self.lamda * l2_loss(self.u_embedding, self.i_embedding, self.i_bias)

        g_opt = tf.train.GradientDescentOptimizer(self.learning_rate)
        self.gan_updates = g_opt.minimize(self.gan_loss, var_list=self.g_params)

        # for test stage, self.u: [batch_size]
        self.all_rating = tf.matmul(self.u_embedding, self.item_embeddings, transpose_a=False,
                                    transpose_b=True) + self.item_bias
コード例 #21
0
    def _create_loss(self):
        with tf.name_scope("loss"):
            # loss for L(Theta)
            self.output = self._create_inference(self.item_input_pos)
            self.output_neg = self._create_inference(self.item_input_neg)
            self.result = self.output - self.output_neg
            # self.loss = tf.reduce_sum(tf.log(1 + tf.exp(-self.result))) # this is numerically unstable
            self.loss = tf.reduce_sum(tf.nn.softplus(-self.result))

            # loss to be omptimized
            self.opt_loss = self.loss + self.reg * l2_loss(
                self.embedding_P, self.embedding_Q)

            if self.adver:
                # loss for L(Theta + adv_Delta)
                self.output_adv = self._create_inference_adv(
                    self.item_input_pos)
                self.output_neg_adv = self._create_inference_adv(
                    self.item_input_neg)
                self.result_adv = self.output_adv - self.output_neg_adv
                # self.loss_adv = tf.reduce_sum(tf.log(1 + tf.exp(-self.result_adv)))
                self.loss_adv = tf.reduce_sum(tf.nn.softplus(-self.result_adv))
                self.opt_loss += self.reg_adv * self.loss_adv
コード例 #22
0
    def _train_rating(self, item_embs, user_emb, union_out):
        w2 = tf.nn.embedding_lookup(self.W2, self.predict_item)  # (b,2t,d)
        b2 = tf.gather(self.b2, self.predict_item)  # (b,2t)
        self._reg_loss += l2_loss(w2, b2)

        # MF
        term3 = tf.squeeze(tf.matmul(w2, tf.expand_dims(
            user_emb, axis=2)))  # (b,2t,d)x(b,d,1)->(b,2t,1)->(b,2t)
        res = b2 + term3  # (b,2t)

        # union-level
        term4 = tf.matmul(tf.expand_dims(union_out, axis=1),
                          w2,
                          transpose_b=True)  # (b,1,d)x(b,d,2l)->(b,1,2l)
        res += tf.squeeze(term4)  # (b,2t)

        # item-item product
        rel_score = tf.matmul(item_embs, w2,
                              transpose_b=True)  # (b,l,d)x(b,d,2t)->(b,l,2t)
        rel_score = tf.reduce_sum(rel_score, axis=1)  # (b,2t)

        res += rel_score  # (b,2t)
        return res
コード例 #23
0
    def build_graph(self):
        self._create_variable()
        hidden_ori = self._encoding(self.users_ph, self.sp_mat_ph)  # (b,d)

        # decoding
        de_item_embs = tf.nn.embedding_lookup(self.de_embeddings,
                                              self.items_ph)  # (l,d)
        de_bias = tf.gather(self.de_bias, self.items_ph)  # (l,d)
        hidden = tf.nn.embedding_lookup(hidden_ori, self.remap_idx_ph)
        ratings = inner_product(hidden, de_item_embs) + de_bias

        # reg loss
        item_ids, _ = tf.unique(self.items_ph)
        reg_loss = l2_loss(
            tf.nn.embedding_lookup(self.en_embeddings, item_ids),
            self.en_offset,
            tf.nn.embedding_lookup(self.user_embeddings, self.users_ph),
            tf.nn.embedding_lookup(self.de_embeddings, item_ids),
            tf.gather(self.de_bias, item_ids))

        if self.loss_func == "square":
            model_loss = tf.squared_difference(ratings, self.labels_ph)
        elif self.loss_func == "sigmoid_cross_entropy":
            model_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=ratings, labels=self.labels_ph)
        else:
            raise ValueError("%s is an invalid loss function." %
                             self.loss_func)

        final_loss = tf.reduce_sum(model_loss) + self.reg * reg_loss

        self.train_opt = tf.train.AdamOptimizer(self.lr).minimize(
            final_loss, name="train_opt")

        # for evaluation
        self.batch_ratings = tf.matmul(
            hidden_ori, self.de_embeddings, transpose_b=True) + self.de_bias
コード例 #24
0
    def create_bpr_loss(self):
        batch_u_embeddings = tf.nn.embedding_lookup(self.ua_embeddings,
                                                    self.users)
        batch_pos_i_embeddings = tf.nn.embedding_lookup(
            self.ia_embeddings, self.pos_items)
        batch_neg_i_embeddings = tf.nn.embedding_lookup(
            self.ia_embeddings, self.neg_items)
        batch_u_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['user_embedding'], self.users)
        batch_pos_i_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.pos_items)
        batch_neg_i_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.neg_items)
        regularizer = l2_loss(batch_u_embeddings_pre,
                              batch_pos_i_embeddings_pre,
                              batch_neg_i_embeddings_pre)
        emb_loss = self.reg * regularizer

        pos_scores = inner_product(batch_u_embeddings, batch_pos_i_embeddings)
        neg_scores = inner_product(batch_u_embeddings, batch_neg_i_embeddings)
        bpr_loss = tf.reduce_sum(log_loss(pos_scores - neg_scores))
        # self.score_sigmoid = tf.sigmoid(pos_scores)

        self.grad_score = 1 - tf.sigmoid(pos_scores - neg_scores)
        self.grad_user_embed = (
            1 - tf.sigmoid(pos_scores - neg_scores)) * tf.sqrt(
                tf.reduce_sum(tf.multiply(batch_u_embeddings,
                                          batch_u_embeddings),
                              axis=1))
        self.grad_item_embed = (
            1 - tf.sigmoid(pos_scores - neg_scores)) * tf.sqrt(
                tf.reduce_sum(tf.multiply(batch_pos_i_embeddings,
                                          batch_pos_i_embeddings),
                              axis=1))

        return bpr_loss, emb_loss
コード例 #25
0
    def build_graph(self):
        '''
                *********************************************************
                Create Placeholder for Input Data & Dropout.
                '''
        # placeholder definition
        self.users_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.pos_items_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.anchor = tf.placeholder(tf.float32,
                                     shape=(self.localmodel, self.localmodel))

        for i in range(self.localmodel):
            exec(
                "self.model_adj%d = tf.sparse_placeholder(tf.float32,name='model_adj%d')"
                % (i, i))

        self.node_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        self.mess_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        """
        *********************************************************
        Create Model Parameters (i.e., Initialize Weights).
        """
        # initialization of model parameters
        self.weights = self._init_weights()
        self._init_constant()
        """
        *********************************************************
        Compute Graph-based Representations of all users & items via Message-Passing Mechanism of Graph Neural Networks.
        Different Convolutional Layers:
            1. ngcf: defined in 'Neural Graph Collaborative Filtering', SIGIR2019;
            2. gcn:  defined in 'Semi-Supervised Classification with Graph Convolutional Networks', ICLR2018;
            3. gcmc: defined in 'Graph Convolutional Matrix Completion', KDD2018;
        """
        ego_embeddings = tf.concat(
            [self.weights['user_embedding'], self.weights['item_embedding']],
            axis=0)
        for n_cluster in range(self.localmodel):
            self.all_embeddings_temp = self._create_lightgcn_embed(
                self.norm_adj[n_cluster], ego_embeddings)
            if n_cluster == 0:
                self.all_embeddings = [self.all_embeddings_temp]
                continue
            self.all_embeddings += [self.all_embeddings_temp]
        self.all_embeddings = tf.stack(self.all_embeddings, 1)
        self.latent = tf.reduce_mean(self.all_embeddings,
                                     axis=0,
                                     keepdims=False)

        self.all_embeddings = tf.reduce_mean(self.all_embeddings,
                                             axis=1,
                                             keepdims=False)
        #         self.all_embeddings = tf.reduce_max(self.all_embeddings, reduction_indices=[1])
        self.ua_embeddings, self.ia_embeddings = tf.split(
            tf.reshape(self.all_embeddings,
                       (self.n_users + self.n_items, self.emb_dim)),
            [self.n_users, self.n_items], 0)

        local_embeddings = tf.concat([
            self.weights['local_user_embedding'],
            self.weights['local_item_embedding']
        ],
                                     axis=0)
        for n_cluster in range(self.localmodel):
            self.local_embeddings_temp = self._create_lightgcn_embed(
                self.norm_adj[n_cluster], local_embeddings)
            if n_cluster == 0:
                self.local_embeddings_all = [self.local_embeddings_temp]
                continue
            self.local_embeddings_all += [self.local_embeddings_temp]
        self.local_embeddings_all = tf.stack(self.local_embeddings_all, 0)
        self.local_embeddings_all = tf.matmul(
            self.anchor,
            tf.reshape(self.local_embeddings_all, [self.localmodel, -1]))
        self.local_embeddings_all = tf.reduce_mean(self.local_embeddings_all,
                                                   axis=0,
                                                   keepdims=False)
        self.local_u, self.local_i = tf.split(
            tf.reshape(self.local_embeddings_all,
                       (self.n_users + self.n_items, self.emb_dim)),
            [self.n_users, self.n_items], 0)
        """
        *********************************************************
        Inference for the testing phase.
        """
        # for prediction
        self.item_embeddings_final = tf.Variable(tf.zeros(
            [self.n_items, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="item_embeddings_final",
                                                 trainable=False)
        self.user_embeddings_final = tf.Variable(tf.zeros(
            [self.n_users, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="user_embeddings_final",
                                                 trainable=False)

        self.assign_opt = [
            tf.assign(self.user_embeddings_final, self.ua_embeddings),
            tf.assign(self.item_embeddings_final, self.ia_embeddings)
        ]

        u_embed = tf.nn.embedding_lookup(self.user_embeddings_final,
                                         self.users_ph)
        self.batch_ratings = tf.matmul(u_embed,
                                       self.item_embeddings_final,
                                       transpose_a=False,
                                       transpose_b=True)
        self.u_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['user_embedding'], self.user_idx)
        self.i_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.item_idx)
        """
        *********************************************************
        Generate Predictions & Optimize via fast loss.
        """
        term1 = tf.matmul(self.ua_embeddings,
                          self.ua_embeddings,
                          transpose_a=True)
        term2 = tf.matmul(self.ia_embeddings,
                          self.ia_embeddings,
                          transpose_a=True)
        loss1 = tf.reduce_sum(tf.multiply(term1, term2))

        user_embed = tf.nn.embedding_lookup(self.ua_embeddings, self.user_idx)
        item_embed = tf.nn.embedding_lookup(self.ia_embeddings, self.item_idx)
        pos_ratings = inner_product(user_embed, item_embed)

        loss1 += tf.reduce_sum((self.r_alpha - 1) * tf.square(pos_ratings) -
                               2.0 * self.r_alpha * pos_ratings)
        # reg
        reg_loss = l2_loss(self.u_g_embeddings_pre, self.i_g_embeddings_pre)

        self.loss = loss1 + self.fast_reg * reg_loss

        self.opt = tf.train.AdagradOptimizer(learning_rate=self.lr).minimize(
            self.loss)

        self.local_u_pre = tf.nn.embedding_lookup(
            self.weights['local_user_embedding'], self.user_idx)
        self.local_i_pre = tf.nn.embedding_lookup(
            self.weights['local_item_embedding'], self.item_idx)
        local_term1 = tf.matmul(self.local_u, self.local_u, transpose_a=True)
        local_term2 = tf.matmul(self.local_i, self.local_i, transpose_a=True)
        loss2 = tf.reduce_sum(tf.multiply(local_term1, local_term2))

        local_user_embed = tf.nn.embedding_lookup(self.local_u, self.user_idx)
        local_item_embed = tf.nn.embedding_lookup(self.local_i, self.item_idx)
        local_pos_ratings = inner_product(local_user_embed, local_item_embed)

        loss2 += tf.reduce_sum((self.r_alpha - 1) *
                               tf.square(local_pos_ratings) -
                               2.0 * self.r_alpha * local_pos_ratings)
        # reg
        reg_loss = l2_loss(self.local_u_pre, self.local_i_pre)

        self.local_loss = loss2 + self.fast_reg * reg_loss

        self.local_opt = tf.train.AdagradOptimizer(
            learning_rate=self.lr).minimize(self.local_loss)
コード例 #26
0
ファイル: FastLightGCN.py プロジェクト: GuoTong96/LocalGCN
    def build_graph(self):
        '''
                *********************************************************
                Create Placeholder for Input Data & Dropout.
                '''
        # placeholder definition
        self.users_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.pos_items_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.neg_items_ph = tf.placeholder(tf.int32, shape=(None, ))

        # dropout: node dropout (adopted on the ego-networks);
        #          ... since the usage of node dropout have higher computational cost,
        #          ... please use the 'node_dropout_flag' to indicate whether use such technique.
        #          message dropout (adopted on the convolution operations).

        self.node_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        self.mess_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        """
        *********************************************************
        Create Model Parameters (i.e., Initialize Weights).
        """
        # initialization of model parameters
        self.weights = self._init_weights()
        self._init_constant()
        """
        *********************************************************
        Compute Graph-based Representations of all users & items via Message-Passing Mechanism of Graph Neural Networks.
        Different Convolutional Layers:
            1. ngcf: defined in 'Neural Graph Collaborative Filtering', SIGIR2019;
            2. gcn:  defined in 'Semi-Supervised Classification with Graph Convolutional Networks', ICLR2018;
            3. gcmc: defined in 'Graph Convolutional Matrix Completion', KDD2018;
        """
        if self.alg_type in ['lightgcn']:
            self.ego_embeddings = tf.concat([
                self.weights['user_embedding'], self.weights['item_embedding']
            ],
                                            axis=0)

            self.all_embeddings = self._create_lightgcn_embed(
                self.ego_embeddings)

            self.ua_embeddings, self.ia_embeddings = tf.split(
                self.all_embeddings, [self.n_users, self.n_items], 0)

        elif self.alg_type in ['ngcf']:
            self.ua_embeddings, self.ia_embeddings = self._create_ngcf_embed()

        elif self.alg_type in ['gcn']:
            self.ua_embeddings, self.ia_embeddings = self._create_gcn_embed()

        elif self.alg_type in ['gcmc']:
            self.ua_embeddings, self.ia_embeddings = self._create_gcmc_embed()
        """
        *********************************************************
        Inference for the testing phase.
        """
        # for prediction
        self.item_embeddings_final = tf.Variable(tf.zeros(
            [self.n_items, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="item_embeddings_final",
                                                 trainable=False)
        self.user_embeddings_final = tf.Variable(tf.zeros(
            [self.n_users, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="user_embeddings_final",
                                                 trainable=False)

        self.assign_opt = [
            tf.assign(self.user_embeddings_final, self.ua_embeddings),
            tf.assign(self.item_embeddings_final, self.ia_embeddings)
        ]
        self.emb = tf.concat(
            [self.user_embeddings_final, self.item_embeddings_final], 0)
        u_embed = tf.nn.embedding_lookup(self.user_embeddings_final,
                                         self.users_ph)
        self.batch_ratings = tf.matmul(u_embed,
                                       self.item_embeddings_final,
                                       transpose_a=False,
                                       transpose_b=True)

        self.u_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['user_embedding'], self.user_idx)
        self.i_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.item_idx)
        """
        *********************************************************
        Generate Predictions & Optimize via BPR loss.
        """
        # rating
        term1 = tf.matmul(self.ua_embeddings,
                          self.ua_embeddings,
                          transpose_a=True)
        term2 = tf.matmul(self.ia_embeddings,
                          self.ia_embeddings,
                          transpose_a=True)
        loss1 = tf.reduce_sum(tf.multiply(term1, term2))

        user_embed = tf.nn.embedding_lookup(self.ua_embeddings, self.user_idx)
        item_embed = tf.nn.embedding_lookup(self.ia_embeddings, self.item_idx)
        pos_ratings = inner_product(user_embed, item_embed)

        loss1 += tf.reduce_sum((self.r_alpha - 1) * tf.square(pos_ratings) -
                               2.0 * self.r_alpha * pos_ratings)
        # reg
        reg_loss = l2_loss(self.u_g_embeddings_pre, self.i_g_embeddings_pre)

        self.loss = loss1 + self.fast_reg * reg_loss

        # self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss)
        self.opt = tf.train.AdagradOptimizer(learning_rate=self.lr).minimize(
            self.loss)
コード例 #27
0
ファイル: NPE.py プロジェクト: zjfng1733/NeuRec-1
 def _create_loss(self):
     with tf.name_scope("loss"):
         UI_u, IU_i, LI_l, self.output = self._create_inference()
         self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
                     self.reg * l2_loss(UI_u, IU_i, LI_l)
コード例 #28
0
ファイル: ConvNCF.py プロジェクト: zjfng1733/NeuRec-1
 def _regular(self, params):
     res = 0
     for param in params:
         res += l2_loss(param[0], param[1])
     return res
コード例 #29
0
 def _create_loss(self):
     with tf.name_scope("loss"):
         # loss for L(Theta)
         p1, q1, r1, self.output = self._create_inference()
         self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
                     self.reg_mf * l2_loss(p1, r1, q1)
コード例 #30
0
ファイル: MF.py プロジェクト: JaireYu/NeuRec
#!/usr/local/bin/python