def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1, q1, b1, self.output = self._create_inference( self.item_input_pos) _, q2, b2, output_social = self._create_inference( self.item_input_social) _, q3, b3, output_neg = self._create_inference(self.item_input_neg) result1 = tf.divide(self.output - output_social, self.suk) result2 = output_social - output_neg self.loss = learner.pairwise_loss(self.loss_function,result1)+learner.pairwise_loss(self.loss_function,result2)+ self.reg_mf * ( tf.reduce_sum(tf.square(p1)) \ + tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))+tf.reduce_sum(tf.square(q3))+tf.reduce_sum(tf.square(b1))+tf.reduce_sum(tf.square(b2))+tf.reduce_sum(tf.square(b3)))
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1, q1, b1, self.output = self._create_inference( self.item_input_pos) _, q2, b2, output_social = self._create_inference( self.item_input_social) _, q3, b3, output_neg = self._create_inference(self.item_input_neg) result1 = tf.divide(self.output - output_social, self.suk) result2 = output_social - output_neg self.loss = learner.pairwise_loss(self.loss_function, result1) + \ learner.pairwise_loss(self.loss_function, result2) + \ self.reg_mf * l2_loss(p1, q2, q1, q3, b1, b2, b3)
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) # loss for L(Theta) UI_u, IU_i, IL_i, LI_l, predict_vector = self._create_inference( self.item_input) if self.ispairwise.lower() == "true": self.output = tf.reduce_sum(predict_vector, 1) _, IU_j, IL_j, _, predict_vector_neg = self._create_inference( self.item_input_neg) output_neg = tf.reduce_sum(predict_vector_neg, 1) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function, self.result) + self.reg_mf * ( tf.reduce_sum(tf.square(UI_u)) + tf.reduce_sum(tf.square(IU_i)) + tf.reduce_sum(tf.square(IL_i)) + tf.reduce_sum(tf.square(LI_l)) + tf.reduce_sum(tf.square(LI_l)) + tf.reduce_sum(tf.square(IU_j)) + tf.reduce_sum(tf.square(IL_j))) + \ self.reg_w * (tf.reduce_sum(tf.square(self.W)) + tf.reduce_sum(tf.square(self.h))) else: prediction = tf.layers.dense(inputs=predict_vector, units=1, activation=tf.nn.sigmoid) self.output = tf.squeeze(prediction) self.loss = learner.pointwise_loss( self.loss_function, self.labels, self.output) + self.reg_mf * ( tf.reduce_sum(tf.square(UI_u)) + tf.reduce_sum( tf.square(IU_i)) + tf.reduce_sum(tf.square(IL_i)) + tf.reduce_sum(tf.square(LI_l)))
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1, q1, r1, predict_vector = self._create_inference( self.item_input) if self.ispairwise.lower() == "true": self.output = tf.reduce_sum(predict_vector, 1) _, q2, _, predict_vector_neg = self._create_inference( self.item_input_neg) output_neg = tf.reduce_sum(predict_vector_neg, 1) self.result = self.output - output_neg self.loss = learner.pairwise_loss( self.loss_function, self.result) + self.reg_mf * ( tf.reduce_sum(tf.square(p1)) + tf.reduce_sum( tf.square(r1)) + tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))) else: prediction = tf.layers.dense(inputs=predict_vector, units=1, activation=tf.nn.sigmoid) self.output = tf.squeeze(prediction) self.loss = learner.pointwise_loss( self.loss_function, self.lables, self.output) + self.reg_mf * (tf.reduce_sum( tf.square(p1)) + tf.reduce_sum(tf.square(r1)) + tf.reduce_sum(tf.square(q1)))
def _create_loss(self): with tf.name_scope("loss"): self.output = tf.reduce_sum(tf.multiply(self.u_embeddings, self.pos_i_embeddings), axis=1) output_neg = tf.reduce_sum(tf.multiply(self.u_embeddings, self.neg_j_embeddings), axis=1) regularizer = self.reg * ( tf.reduce_sum(tf.square(self.u_embeddings)) \ + tf.reduce_sum(tf.square(self.pos_i_embeddings)) + tf.reduce_sum(tf.square(self.neg_j_embeddings))) self.loss = learner.pairwise_loss(self.loss_function,self.output - output_neg)+ regularizer
def _create_loss(self): with tf.name_scope("loss"): p1, q1, self.output = self._create_inference(self.item_input) if self.is_pairwise is True: _, q2, self.output_neg = self._create_inference(self.item_input_neg) result = self.output - self.output_neg self.loss = learner.pairwise_loss(self.loss_function, result) + self.reg_mlp * l2_loss(p1, q2, q1) else: self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \ self.reg_mlp * l2_loss(p1, q1)
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1,q1,r1,self.output = self._create_inference(self.item_input) if self.ispairwise.lower() =="true": _, q2,_,output_neg = self._create_inference(self.item_input_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function,self.result) + self.reg_mf * ( tf.reduce_sum(tf.square(p1)) \ +tf.reduce_sum(tf.square(r1)) + tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))) else : self.loss = learner.pointwise_loss(self.loss_function,self.lables,self.output) + self.reg_mf * (tf.reduce_sum(tf.square(p1)) \ +tf.reduce_sum(tf.square(r1))+ tf.reduce_sum(tf.square(q1)))
def _create_loss(self): with tf.name_scope("loss"): # BPR loss for L(Theta) self.p1, self.q1, self.output = self._create_inference( self.item_input_pos) self.p2, self.q2, self.output_neg = self._create_inference( self.item_input_neg) self.result = self.output - self.output_neg self.loss = learner.pairwise_loss(self.loss_function, self.result) self.opt_loss = self.loss + self.lambda_bilinear * l2_loss(self.p1, self.q2, self.q1) + \ self.gamma_bilinear * self._regular([(self.W, self.b)]) + \ self.lambda_weight * (self._regular(self.P) + self._regular([(self.W, self.b)]))
def _create_loss(self): with tf.name_scope("loss"): p1, q1, self.output = self._create_inference(self.user_input, self.item_input, self.num_idx) if self.is_pairwise is True: _, q2, output_neg = self._create_inference(self.user_input_neg, self.item_input_neg, self.num_idx_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function, self.result) + \ self.lambda_bilinear * l2_loss(p1) + \ self.gamma_bilinear * l2_loss(q2, q1) else: self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \ self.lambda_bilinear * l2_loss(p1) + \ self.gamma_bilinear * l2_loss(q1)
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1, r1, q1, b1, self.output = self._create_inference( self.item_input) if self.is_pairwise is True: _, _, q2, b2, output_neg = self._create_inference( self.item_input_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function, self.result) + \ self.reg_mf * l2_loss(p1, r1, q2, q1, b1, b2, self.global_embedding) else: self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \ self.reg_mf * l2_loss(p1, r1, q1, b1, self.global_embedding)
def _create_loss(self): with tf.name_scope("loss"): UI_u, IU_i, IL_i, LI_l, self.output = self._create_inference( self.item_input) if self.is_pairwise is True: _, IU_j, IL_j, _, output_neg = self._create_inference( self.item_input_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function, self.result) + \ self.reg_mf * l2_loss(UI_u, IU_i, IL_i, LI_l, IU_j, IL_j) + \ self.reg_w * l2_loss(self.W, self.h) else: self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \ self.reg_mf * l2_loss(UI_u, IU_i, IL_i, LI_l)
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) UI_u, IU_i, IL_i, LI_l, self.output = self._create_inference( self.item_input) if self.ispairwise.lower() == "true": _, IU_j, IL_j, _, output_neg = self._create_inference( self.item_input_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function,self.result) + self.reg_mf * ( tf.reduce_sum(tf.square(UI_u)) \ + tf.reduce_sum(tf.square(IU_i)) + tf.reduce_sum(tf.square(IL_i)) + tf.reduce_sum(tf.square(LI_l))+ \ tf.reduce_sum(tf.square(LI_l))+tf.reduce_sum(tf.square(IU_j))+tf.reduce_sum(tf.square(IL_j))) else: self.loss = learner.pointwise_loss(self.loss_function,self.lables,self.output) + self.reg_mf * (tf.reduce_sum(tf.square(UI_u)) \ +tf.reduce_sum(tf.square(IU_i))+ tf.reduce_sum(tf.square(IL_i))+tf.reduce_sum(tf.square(LI_l)))
def _create_loss(self): with tf.name_scope("loss"): p1, q1, self.output = self._create_inference( self.user_input, self.item_input, self.num_idx) if self.ispairwise.lower() == "true": _, q2, output_neg = self._create_inference( self.user_input_neg, self.item_input_neg, self.num_idx_neg) self.result = self.output - output_neg self.loss = learner.pairwise_loss(self.loss_function,self.result) + self.lambda_bilinear * ( tf.reduce_sum(tf.square(p1))) \ +self.gamma_bilinear*(tf.reduce_sum(tf.square(q2)) + tf.reduce_sum(tf.square(q1))) else: self.loss = learner.pointwise_loss(self.loss_function, \ self.lables,tf.sigmoid(self.output)) + self.lambda_bilinear *\ (tf.reduce_sum(tf.square(p1)))+self.gamma_bilinear *(tf.reduce_sum(tf.square(q1)))
def _create_loss(self): with tf.name_scope("loss"): # loss for L(Theta) p1, q1, bu, bi1, y1, predict_vector = self._create_inference( self.item_input) if self.ispairwise.lower() == "true": self.output = tf.reduce_sum(predict_vector, 1) _, q2, _, bi2, _, predict_vector_neg = self._create_inference( self.item_input_neg) self.output_neg = tf.reduce_sum(predict_vector_neg, 1) result = self.output - self.output_neg self.loss = learner.pairwise_loss(self.loss_function, result) + self.lambdaP * (tf.reduce_sum(tf.square(p1)))\ +self.lambdaQ*(tf.reduce_sum(tf.square(q2))+tf.reduce_sum(tf.square(q1)))+\ self.lambdaB*((tf.reduce_sum(tf.square(bu)))+tf.reduce_sum(tf.square(bi1))+ tf.reduce_sum(tf.square(bi2)))+self.lambdaY*(tf.reduce_sum(tf.square(y1),[0,1,2])) '''else:
#!/usr/local/bin/python