def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1, q1,b1,self.output= self._create_inference(self.item_input_pos) _, q2,b2, output_social = self._create_inference(self.item_input_social) _, q3,b3, output_neg = self._create_inference(self.item_input_neg) result1 = tf.divide(self.output - output_social,self.suk) result2 = output_social - output_neg self.loss = learner.pairwise_loss(self.loss_function,result1)+learner.pairwise_loss(self.loss_function,result2)+ self.reg_mf * ( tf.reduce_sum(tf.square(p1)) \ + tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))+tf.reduce_sum(tf.square(q3))+tf.reduce_sum(tf.square(b1))+tf.reduce_sum(tf.square(b2))+tf.reduce_sum(tf.square(b3)))
def _create_loss(self): with tf.name_scope("loss"): # BPR loss for L(Theta) self.p1, self.q1, self.output = self._create_inference(self.item_input_pos) self.p2, self.q2, self.output_neg = self._create_inference(self.item_input_neg) self.result = self.output - self.output_neg self.loss = learner.pairwise_loss(self.loss_function, self.result)######## self.opt_loss = self.loss + self.lambda_bilinear * ( tf.reduce_sum(tf.square(self.p1)) \ + tf.reduce_sum(tf.square(self.q2)) + tf.reduce_sum(tf.square(self.q1)))\ + self.gamma_bilinear * self._regular([(self.W, self.b)]) \ + self.lambda_weight * (self._regular(self.P) + self._regular([(self.W, self.b)]))
def _create_loss(self): with tf.name_scope("loss"): p1, q1, self.output = self._create_inference(self.item_input) if self.ispairwise == True: _, q2, self.output_neg = self._create_inference( self.item_input_neg) result = self.output - self.output_neg self.loss = learner.pairwise_loss(self.loss_function,result) + self.reg_mf * ( tf.reduce_sum(tf.square(p1)) \ + tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))) else: self.loss = learner.pointwise_loss(self.loss_function, self.lables,self.output)+self.reg_mlp * (tf.reduce_sum(tf.square(p1)) \ + tf.reduce_sum(tf.square(q1)))
def _create_loss(self): with tf.name_scope("loss"): self.output = tf.reduce_sum(tf.multiply(self.u_embeddings, self.pos_i_embeddings), axis=1) output_neg = tf.reduce_sum(tf.multiply(self.u_embeddings, self.neg_j_embeddings), axis=1) regularizer = self.reg * ( tf.reduce_sum(tf.square(self.u_embeddings)) \ + tf.reduce_sum(tf.square(self.pos_i_embeddings)) + tf.reduce_sum(tf.square(self.neg_j_embeddings))) self.loss = learner.pairwise_loss( self.loss_function, self.output - output_neg) + regularizer
def _create_loss(self): with tf.name_scope("loss"): p1, q1, self.output = self._create_inference( self.user_input, self.item_input, self.num_idx) if self.ispairwise == True: _, q2, output_neg = self._create_inference( self.user_input_neg, self.item_input_neg, self.num_idx_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function,self.result) + self.lambda_bilinear * ( tf.reduce_sum(tf.square(p1))) \ +self.gamma_bilinear*(tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))) else: self.loss = learner.pointwise_loss(self.loss_function, \ self.lables,self.output) + self.lambda_bilinear *\ (tf.reduce_sum(tf.square(p1)))+self.gamma_bilinear *(tf.reduce_sum(tf.square(q1)))
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) UI_u, IU_i, IL_i, LI_l, self.output = self._create_inference( self.item_input) if self.ispairwise == True: _, IU_j, IL_j, _, output_neg = self._create_inference( self.item_input_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function,self.result) + self.reg_mf * ( tf.reduce_sum(tf.square(UI_u)) \ + tf.reduce_sum(tf.square(IU_i)) + tf.reduce_sum(tf.square(IL_i)) + tf.reduce_sum(tf.square(LI_l))+ \ tf.reduce_sum(tf.square(LI_l))+tf.reduce_sum(tf.square(IU_j))+tf.reduce_sum(tf.square(IL_j))) else: self.loss = learner.pointwise_loss(self.loss_function,self.lables,self.output) + self.reg_mf * (tf.reduce_sum(tf.square(UI_u)) \ +tf.reduce_sum(tf.square(IU_i))+ tf.reduce_sum(tf.square(IL_i))+tf.reduce_sum(tf.square(LI_l)))