def _create_inference(self): with tf.name_scope('inference'): self.coeff = tf.pow(tf.cast(self.u_neighbors_num, tf.float32), -self.alpha) # Calculate u's preference score to i self.ui_scores = tf.einsum( 'ab,ab->a', self.i_embed, tf.einsum('a,ab->ab', self.coeff, self.u_neighbors_embed)) + self.i_bias if self.is_pairwise == 'True': self.uj_scores = tf.einsum( 'ab,ab->a', self.j_embed, tf.einsum('a,ab->ab', self.coeff, self.u_neighbors_embed)) + self.j_bias # Calculate loss self.loss = (self.reg * (tf.nn.l2_loss(self.P) + tf.nn.l2_loss(self.Q)) ) / self.batch_size + self.reg_bias * tf.nn.l2_loss( self.b) if self.is_pairwise == 'True': self.loss += get_loss(self.loss_func, self.ui_scores - self.uj_scores) else: self.loss += get_loss(self.loss_func, y, logits=self.ui_scores) # Optimize self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): # Optimize self.loss = get_loss(self.loss_func, self.ui_scores-self.uk_scores) + get_loss(self.loss_func, (self.uk_scores-self.uj_scores)/(self.s+1.0)) + \ self.reg*(tf.nn.l2_loss(self.u_embed) + tf.nn.l2_loss(self.i_embed) + tf.nn.l2_loss(self.i_s_embed) + tf.nn.l2_loss(self.i_neg_embed) + \ tf.nn.l2_loss(self.i_bias) + tf.nn.l2_loss(self.i_s_bias) + tf.nn.l2_loss(self.i_neg_bias)) self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): # Get neighborhood-based representations # Item-level GAT self.u_nbr_embed_i = self._build_gat(self.user_nbrs_i, self.u_idx, self.u_embed_i, self.data.item_nums, self.Q, self.max_i) # User-level GAT self.i_nbr_embed = self._build_gat(self.item_nbrs, self.i_idx, self.i_embed, self.data.user_nums, self.P, self.max_i) self.j_nbr_embed = self._build_gat(self.item_nbrs, self.j_idx, self.j_embed, self.data.user_nums, self.P, self.max_i) # Friend-level GATs self.u_nbr_embed_s = self._build_gat(self.user_nbrs_s, self.u_idx_s, self.u_embed_s, self.data.user_nums, self.P, self.max_s) self.v_nbr_embed = self._build_gat(self.user_nbrs_s, self.v_idx, self.v_embed, self.data.user_nums, self.P, self.max_s) self.w_nbr_embed = self._build_gat(self.user_nbrs_s, self.w_idx, self.w_embed, self.data.user_nums, self.P, self.max_s) # Get relation vectors self.ui_vec = self._build_mlp(self.u_nbr_embed_i, self.i_nbr_embed) self.uj_vec = self._build_mlp(self.u_nbr_embed_i, self.j_nbr_embed) self.uv_vec = self._build_mlp(self.u_nbr_embed_s, self.v_nbr_embed) self.uw_vec = self._build_mlp(self.u_nbr_embed_s, self.w_nbr_embed) # Get distance scores self.ui_dist = tf.reduce_sum( tf.square(self.u_embed_i + self.ui_vec - self.i_embed), 1) self.uj_dist = tf.reduce_sum( tf.square(self.u_embed_i + self.uj_vec - self.j_embed), 1) self.uv_dist = tf.reduce_sum( tf.square(self.u_embed_s + self.uv_vec - self.v_embed), 1) self.uw_dist = tf.reduce_sum( tf.square(self.u_embed_s + self.uw_vec - self.w_embed), 1) # Loss loss_i = get_loss(self.loss_func, self.ui_dist - self.uj_dist, margin=self.margin) # Loss in item domain loss_s = get_loss(self.loss_func, self.uv_dist - self.uw_dist, margin=self.margin) # Loss in social domain self.loss = loss_i + self.gamma * loss_s self.loss += self._get_regularizations() # Optimize self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): # Neighborhood aggregation all_u_nbr_embed = tf.sparse_tensor_dense_matmul( self.ui_sp_mat, self.Q) # [user_nums, embed_size] self.all_i_nbr_embed = tf.sparse_tensor_dense_matmul( self.iu_sp_mat, self.P) self.u_nbr_embed = tf.gather( all_u_nbr_embed, self.u_idx) # u's neighborhood-based representation self.i_nbr_embed = tf.gather(self.all_i_nbr_embed, self.i_idx) self.j_nbr_embed = tf.gather(self.all_i_nbr_embed, self.j_idx) # Generate relation vectors self.ui_r = tf.einsum('ab,ab->ab', self.u_nbr_embed, self.i_nbr_embed) self.uj_r = tf.einsum('ab,ab->ab', self.u_nbr_embed, self.j_nbr_embed) # Calculate distance scores self.ui_dist = tf.reduce_sum( tf.square(self.u_embed + self.ui_r - self.i_embed), 1) self.uj_dist = tf.reduce_sum( tf.square(self.u_embed + self.uj_r - self.j_embed), 1) # Optimize self.loss = get_loss(self.loss_func, self.ui_dist - self.uj_dist, margin=self.margin) self.loss += self._get_regularizations() # Optimize self.train = self.optimizer.minimize(self.loss) # Unit clipping self._unit_clipping()
def _create_inference(self): with tf.name_scope('inference'): y_gmf_tr = self._get_y_gmf() y_mlp_tr = self._get_y_mlp() # Fuse GMF and MLP self.logits = self._get_logits(y_gmf_tr, y_mlp_tr) self.loss = get_loss(self.loss_func, self.y, logits=self.logits) + self.reg1*(tf.nn.l2_loss(self.u_embed_gmf)+tf.nn.l2_loss(self.i_embed_gmf)) + \ self.reg2*(tf.nn.l2_loss(self.u_embed_mlp)+tf.nn.l2_loss(self.i_embed_mlp)) self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): y_tr = self._get_y() # Calculate logits self.logits = self._get_logits(y_tr) self.loss = get_loss( self.loss_func, self.y, logits=self.logits) + self.reg * ( tf.nn.l2_loss(self.u_embed) + tf.nn.l2_loss(self.i_embed)) self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): # Calculate preference scores self.ui_scores = tf.einsum('ab,ab->a', self.u_embed, self.i_embed) self.uj_scores = tf.einsum('ab,ab->a', self.u_embed, self.j_embed) # Optimize self.loss = get_loss(self.loss_func, self.ui_scores - self.uj_scores) + self.reg*(tf.nn.l2_loss(self.u_embed) + tf.nn.l2_loss(self.i_embed) + \ tf.nn.l2_loss(self.j_embed)) self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): # Calculate relation vectors self.ui_vec = self._lram(self.u_embed, self.i_embed) self.uj_vec = self._lram(self.u_embed, self.j_embed) # Calculate distance scores self.ui_dist = tf.reduce_sum(tf.square(self.u_embed + self.ui_vec - self.i_embed), 1) self.uj_dist = tf.reduce_sum(tf.square(self.u_embed + self.uj_vec - self.j_embed), 1) # Optimize self.loss = get_loss(self.loss_func, self.ui_dist - self.uj_dist, margin=self.margin) + \ self.reg * (tf.nn.l2_loss(self.u_embed) + tf.nn.l2_loss(self.i_embed) + tf.nn.l2_loss(self.j_embed)) self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): self._get_friend_vec() self._get_u_frien() # u's final representation self.u_vec = self.u_embed + self.u_frien # Calculate preference scores self.ui_scores = tf.einsum('b,ab->a', self.u_vec, self.i_embed) + self.i_b_embed self.uj_scores = tf.einsum('b,ab->a', self.u_vec, self.j_embed) + self.j_b_embed # Loss l2_loss1 = tf.nn.l2_loss(self.u_vec) + tf.nn.l2_loss(self.i_embed) + tf.nn.l2_loss(self.j_embed) + tf.nn.l2_loss(self.i_b_embed) + \ tf.nn.l2_loss(self.j_b_embed) l2_loss2 = tf.nn.l2_loss(self.W3) + tf.nn.l2_loss(self.b) + tf.nn.l2_loss(self.h) self.loss = get_loss(self.loss_func, self.ui_scores - self.uj_scores) + self.reg1 * l2_loss1 + self.reg2 * l2_loss2 # Optimize self.train = self.optimizer.minimize(self.loss)
def _create_inference(self): with tf.name_scope('inference'): # Calculate prediction scores if self.is_real_valued: # Consider real values squared_sum_embed = tf.square( tf.reduce_sum( tf.einsum('ab,abc->abc', self.x_value, self.vif_embed), 1)) sum_squared_embed = tf.reduce_sum( tf.einsum('ab,abc->abc', tf.square(self.x_value), tf.square(self.vif_embed)), 1) else: squared_sum_embed = tf.square(tf.reduce_sum(self.vif_embed, 1)) sum_squared_embed = tf.reduce_sum(tf.square(self.vif_embed), 1) y_2nd = tf.reduce_sum(squared_sum_embed - sum_squared_embed, 1) self.y_pre = self.w0 + tf.reduce_sum(self.wi_embed, 1) + 0.5 * y_2nd # Optimize self.loss = get_loss( self.loss_func, self.y, logits=self.y_pre) + self.reg * ( tf.nn.l2_loss(self.wi) + tf.nn.l2_loss(self.vif)) self.train = self.optimizer.minimize(self.loss)