示例#1
0
    def create_bpr_loss(self, users, pos_items, neg_items):
        pos_scores = inner_product(users, pos_items)
        neg_scores = inner_product(users, neg_items)

        regularizer = l2_loss(self.u_g_embeddings_pre,
                              self.pos_i_g_embeddings_pre,
                              self.neg_i_g_embeddings_pre)

        mf_loss = tf.reduce_sum(log_loss(pos_scores - neg_scores))

        emb_loss = self.reg * regularizer

        return mf_loss, emb_loss
示例#2
0
    def create_bpr_loss(self):
        batch_u_embeddings = tf.nn.embedding_lookup(self.ua_embeddings,
                                                    self.users)
        batch_pos_i_embeddings = tf.nn.embedding_lookup(
            self.ia_embeddings, self.pos_items)
        batch_neg_i_embeddings = tf.nn.embedding_lookup(
            self.ia_embeddings, self.neg_items)
        batch_u_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['user_embedding'], self.users)
        batch_pos_i_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.pos_items)
        batch_neg_i_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.neg_items)
        regularizer = l2_loss(batch_u_embeddings_pre,
                              batch_pos_i_embeddings_pre,
                              batch_neg_i_embeddings_pre)
        emb_loss = self.reg * regularizer

        pos_scores = inner_product(batch_u_embeddings, batch_pos_i_embeddings)
        neg_scores = inner_product(batch_u_embeddings, batch_neg_i_embeddings)
        bpr_loss = tf.reduce_sum(log_loss(pos_scores - neg_scores))
        # self.score_sigmoid = tf.sigmoid(pos_scores)

        self.grad_score = 1 - tf.sigmoid(pos_scores - neg_scores)
        self.grad_user_embed = (
            1 - tf.sigmoid(pos_scores - neg_scores)) * tf.sqrt(
                tf.reduce_sum(tf.multiply(batch_u_embeddings,
                                          batch_u_embeddings),
                              axis=1))
        self.grad_item_embed = (
            1 - tf.sigmoid(pos_scores - neg_scores)) * tf.sqrt(
                tf.reduce_sum(tf.multiply(batch_pos_i_embeddings,
                                          batch_pos_i_embeddings),
                              axis=1))

        return bpr_loss, emb_loss
示例#3
0
    def build_graph(self):
        self._create_variable()
        hidden_ori = self._encoding(self.users_ph, self.sp_mat_ph)  # (b,d)

        # decoding
        de_item_embs = tf.nn.embedding_lookup(self.de_embeddings,
                                              self.items_ph)  # (l,d)
        de_bias = tf.gather(self.de_bias, self.items_ph)  # (l,d)
        hidden = tf.nn.embedding_lookup(hidden_ori, self.remap_idx_ph)
        ratings = inner_product(hidden, de_item_embs) + de_bias

        # reg loss
        item_ids, _ = tf.unique(self.items_ph)
        reg_loss = l2_loss(
            tf.nn.embedding_lookup(self.en_embeddings, item_ids),
            self.en_offset,
            tf.nn.embedding_lookup(self.user_embeddings, self.users_ph),
            tf.nn.embedding_lookup(self.de_embeddings, item_ids),
            tf.gather(self.de_bias, item_ids))

        if self.loss_func == "square":
            model_loss = tf.squared_difference(ratings, self.labels_ph)
        elif self.loss_func == "sigmoid_cross_entropy":
            model_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=ratings, labels=self.labels_ph)
        else:
            raise ValueError("%s is an invalid loss function." %
                             self.loss_func)

        final_loss = tf.reduce_sum(model_loss) + self.reg * reg_loss

        self.train_opt = tf.train.AdamOptimizer(self.lr).minimize(
            final_loss, name="train_opt")

        # for evaluation
        self.batch_ratings = tf.matmul(
            hidden_ori, self.de_embeddings, transpose_b=True) + self.de_bias
train_x_gauss, _ = mnist_gauss.run(train_x)
train_x_gauss_krao, _ = mnist_gauss_krao.run(train_x)
train_x_gauss_krao_vr, _ = mnist_gauss_krao_vr.run(train_x)
train_x_sp0, _ = mnist_sp0.run(train_x)
train_x_sp0_krao, _ = mnist_sp0_krao.run(train_x)
train_x_sp0_krao_vr, _ = mnist_sp0_krao_vr.run(train_x)
train_x_sp1, _ = mnist_sp1.run(train_x)
train_x_sp1_krao, _ = mnist_sp1_krao.run(train_x)
train_x_sp1_krao_vr, _ = mnist_sp1_krao_vr.run(train_x)

print("inner")

# In[15]:

inner_gauss = inner_product(train_x_gauss)
inner_gauss_krao = inner_product(train_x_gauss_krao)
inner_gauss_krao_vr = inner_product(train_x_gauss_krao_vr)
inner_sp0 = inner_product(train_x_sp0)
inner_sp0_krao = inner_product(train_x_sp0_krao)
inner_sp0_krao_vr = inner_product(train_x_sp0_krao_vr)
inner_sp1 = inner_product(train_x_sp1)
inner_sp1_krao = inner_product(train_x_sp1_krao)
inner_sp1_krao_vr = inner_product(train_x_sp1_krao_vr)

# In[2]:

inners = [
    inner_gauss, inner_gauss_krao, inner_gauss_krao_vr, inner_sp0,
    inner_sp0_krao, inner_sp0_krao_vr, inner_sp1, inner_sp1_krao,
    inner_sp1_krao_vr
示例#5
0
    def build_graph(self):
        '''
                *********************************************************
                Create Placeholder for Input Data & Dropout.
                '''
        # placeholder definition
        self.users_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.pos_items_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.anchor = tf.placeholder(tf.float32,
                                     shape=(self.localmodel, self.localmodel))

        for i in range(self.localmodel):
            exec(
                "self.model_adj%d = tf.sparse_placeholder(tf.float32,name='model_adj%d')"
                % (i, i))

        self.node_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        self.mess_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        """
        *********************************************************
        Create Model Parameters (i.e., Initialize Weights).
        """
        # initialization of model parameters
        self.weights = self._init_weights()
        self._init_constant()
        """
        *********************************************************
        Compute Graph-based Representations of all users & items via Message-Passing Mechanism of Graph Neural Networks.
        Different Convolutional Layers:
            1. ngcf: defined in 'Neural Graph Collaborative Filtering', SIGIR2019;
            2. gcn:  defined in 'Semi-Supervised Classification with Graph Convolutional Networks', ICLR2018;
            3. gcmc: defined in 'Graph Convolutional Matrix Completion', KDD2018;
        """
        ego_embeddings = tf.concat(
            [self.weights['user_embedding'], self.weights['item_embedding']],
            axis=0)
        for n_cluster in range(self.localmodel):
            self.all_embeddings_temp = self._create_lightgcn_embed(
                self.norm_adj[n_cluster], ego_embeddings)
            if n_cluster == 0:
                self.all_embeddings = [self.all_embeddings_temp]
                continue
            self.all_embeddings += [self.all_embeddings_temp]
        self.all_embeddings = tf.stack(self.all_embeddings, 1)
        self.latent = tf.reduce_mean(self.all_embeddings,
                                     axis=0,
                                     keepdims=False)

        self.all_embeddings = tf.reduce_mean(self.all_embeddings,
                                             axis=1,
                                             keepdims=False)
        #         self.all_embeddings = tf.reduce_max(self.all_embeddings, reduction_indices=[1])
        self.ua_embeddings, self.ia_embeddings = tf.split(
            tf.reshape(self.all_embeddings,
                       (self.n_users + self.n_items, self.emb_dim)),
            [self.n_users, self.n_items], 0)

        local_embeddings = tf.concat([
            self.weights['local_user_embedding'],
            self.weights['local_item_embedding']
        ],
                                     axis=0)
        for n_cluster in range(self.localmodel):
            self.local_embeddings_temp = self._create_lightgcn_embed(
                self.norm_adj[n_cluster], local_embeddings)
            if n_cluster == 0:
                self.local_embeddings_all = [self.local_embeddings_temp]
                continue
            self.local_embeddings_all += [self.local_embeddings_temp]
        self.local_embeddings_all = tf.stack(self.local_embeddings_all, 0)
        self.local_embeddings_all = tf.matmul(
            self.anchor,
            tf.reshape(self.local_embeddings_all, [self.localmodel, -1]))
        self.local_embeddings_all = tf.reduce_mean(self.local_embeddings_all,
                                                   axis=0,
                                                   keepdims=False)
        self.local_u, self.local_i = tf.split(
            tf.reshape(self.local_embeddings_all,
                       (self.n_users + self.n_items, self.emb_dim)),
            [self.n_users, self.n_items], 0)
        """
        *********************************************************
        Inference for the testing phase.
        """
        # for prediction
        self.item_embeddings_final = tf.Variable(tf.zeros(
            [self.n_items, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="item_embeddings_final",
                                                 trainable=False)
        self.user_embeddings_final = tf.Variable(tf.zeros(
            [self.n_users, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="user_embeddings_final",
                                                 trainable=False)

        self.assign_opt = [
            tf.assign(self.user_embeddings_final, self.ua_embeddings),
            tf.assign(self.item_embeddings_final, self.ia_embeddings)
        ]

        u_embed = tf.nn.embedding_lookup(self.user_embeddings_final,
                                         self.users_ph)
        self.batch_ratings = tf.matmul(u_embed,
                                       self.item_embeddings_final,
                                       transpose_a=False,
                                       transpose_b=True)
        self.u_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['user_embedding'], self.user_idx)
        self.i_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.item_idx)
        """
        *********************************************************
        Generate Predictions & Optimize via fast loss.
        """
        term1 = tf.matmul(self.ua_embeddings,
                          self.ua_embeddings,
                          transpose_a=True)
        term2 = tf.matmul(self.ia_embeddings,
                          self.ia_embeddings,
                          transpose_a=True)
        loss1 = tf.reduce_sum(tf.multiply(term1, term2))

        user_embed = tf.nn.embedding_lookup(self.ua_embeddings, self.user_idx)
        item_embed = tf.nn.embedding_lookup(self.ia_embeddings, self.item_idx)
        pos_ratings = inner_product(user_embed, item_embed)

        loss1 += tf.reduce_sum((self.r_alpha - 1) * tf.square(pos_ratings) -
                               2.0 * self.r_alpha * pos_ratings)
        # reg
        reg_loss = l2_loss(self.u_g_embeddings_pre, self.i_g_embeddings_pre)

        self.loss = loss1 + self.fast_reg * reg_loss

        self.opt = tf.train.AdagradOptimizer(learning_rate=self.lr).minimize(
            self.loss)

        self.local_u_pre = tf.nn.embedding_lookup(
            self.weights['local_user_embedding'], self.user_idx)
        self.local_i_pre = tf.nn.embedding_lookup(
            self.weights['local_item_embedding'], self.item_idx)
        local_term1 = tf.matmul(self.local_u, self.local_u, transpose_a=True)
        local_term2 = tf.matmul(self.local_i, self.local_i, transpose_a=True)
        loss2 = tf.reduce_sum(tf.multiply(local_term1, local_term2))

        local_user_embed = tf.nn.embedding_lookup(self.local_u, self.user_idx)
        local_item_embed = tf.nn.embedding_lookup(self.local_i, self.item_idx)
        local_pos_ratings = inner_product(local_user_embed, local_item_embed)

        loss2 += tf.reduce_sum((self.r_alpha - 1) *
                               tf.square(local_pos_ratings) -
                               2.0 * self.r_alpha * local_pos_ratings)
        # reg
        reg_loss = l2_loss(self.local_u_pre, self.local_i_pre)

        self.local_loss = loss2 + self.fast_reg * reg_loss

        self.local_opt = tf.train.AdagradOptimizer(
            learning_rate=self.lr).minimize(self.local_loss)
示例#6
0
文件: SASRec.py 项目: wubinzzu/TNNLS
    def build_graph(self):
        self._create_variable()
        reuse = None
        with tf.variable_scope("SASRec", reuse=reuse):
            self.seq = tf.nn.embedding_lookup(self.item_embeddings,
                                              self.item_seq_ph)
            item_emb_table = self.item_embeddings

            # Positional Encoding
            position = tf.tile(
                tf.expand_dims(tf.range(tf.shape(self.item_seq_ph)[1]), 0),
                [tf.shape(self.item_seq_ph)[0], 1])
            t = tf.nn.embedding_lookup(self.position_embeddings, position)
            # pos_emb_table = self.position_embeddings

            self.seq += t

            # Dropout
            self.seq = tf.layers.dropout(self.seq,
                                         rate=self.dropout_rate,
                                         training=tf.convert_to_tensor(
                                             self.is_training))

            mask = tf.expand_dims(
                tf.to_float(tf.not_equal(self.item_seq_ph, self.items_num)),
                -1)
            self.seq *= mask

            # Build blocks

            for i in range(self.num_blocks):
                with tf.variable_scope("num_blocks_%d" % i):
                    # Self-attention
                    self.seq = multihead_attention(
                        queries=normalize(self.seq),
                        keys=self.seq,
                        num_units=self.embedding_size,
                        num_heads=self.num_heads,
                        dropout_rate=self.dropout_rate,
                        is_training=self.is_training,
                        causality=True,
                        scope="self_attention")

                    # Feed forward
                    self.seq = feedforward(
                        normalize(self.seq),
                        num_units=[self.embedding_size, self.embedding_size],
                        dropout_rate=self.dropout_rate,
                        is_training=self.is_training)
                    self.seq *= mask

            self.seq = normalize(self.seq)  # (b, l, d)
            # self.seq = normalize(self.seq) + tf.expand_dims(self.user_emb, axis=1)   # (b, l, d)

            last_emb = self.seq[:,
                                -1, :]  # (b, d), the embedding of last item of each session

        pos = tf.reshape(
            self.item_pos_ph,
            [tf.shape(self.item_seq_ph)[0] * self.max_len])  # (b*l,)
        neg = tf.reshape(
            self.item_neg_ph,
            [tf.shape(self.item_seq_ph)[0] * self.max_len])  # (b*l,)
        pos_emb = tf.nn.embedding_lookup(item_emb_table, pos)  # (b*l, d)
        neg_emb = tf.nn.embedding_lookup(item_emb_table, neg)  # (b*l, d)
        seq_emb = tf.reshape(self.seq, [
            tf.shape(self.item_seq_ph)[0] * self.max_len, self.embedding_size
        ])  # (b*l, d)

        # prediction layer
        self.pos_logits = inner_product(pos_emb, seq_emb)  # (b*l,)
        self.neg_logits = inner_product(neg_emb, seq_emb)  # (b*l,)

        # ignore padding items (self.items_num)
        is_target = tf.reshape(tf.to_float(tf.not_equal(pos, self.items_num)),
                               [tf.shape(self.item_seq_ph)[0] * self.max_len])

        pos_loss = -tf.log(tf.sigmoid(self.pos_logits) + 1e-24) * is_target
        neg_loss = -tf.log(1 - tf.sigmoid(self.neg_logits) + 1e-24) * is_target
        self.loss = tf.reduce_sum(pos_loss +
                                  neg_loss) / tf.reduce_sum(is_target)

        try:
            reg_losses = tf.add_n(
                tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
            self.loss = self.loss + reg_losses
        except:
            pass

        self.train_opt = tf.train.AdamOptimizer(learning_rate=self.lr,
                                                beta2=0.98).minimize(self.loss)

        # for predication/test
        items_embeddings = item_emb_table[:-1]  # remove the padding item
        self.all_logits = tf.matmul(last_emb,
                                    items_embeddings,
                                    transpose_b=True)
示例#7
0
    def build_graph(self):
        '''
                *********************************************************
                Create Placeholder for Input Data & Dropout.
                '''
        # placeholder definition
        self.users_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.pos_items_ph = tf.placeholder(tf.int32, shape=(None, ))
        self.neg_items_ph = tf.placeholder(tf.int32, shape=(None, ))

        # dropout: node dropout (adopted on the ego-networks);
        #          ... since the usage of node dropout have higher computational cost,
        #          ... please use the 'node_dropout_flag' to indicate whether use such technique.
        #          message dropout (adopted on the convolution operations).

        self.node_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        self.mess_dropout_ph = tf.placeholder(tf.float32, shape=[None])
        """
        *********************************************************
        Create Model Parameters (i.e., Initialize Weights).
        """
        # initialization of model parameters
        self.weights = self._init_weights()
        self._init_constant()
        """
        *********************************************************
        Compute Graph-based Representations of all users & items via Message-Passing Mechanism of Graph Neural Networks.
        Different Convolutional Layers:
            1. ngcf: defined in 'Neural Graph Collaborative Filtering', SIGIR2019;
            2. gcn:  defined in 'Semi-Supervised Classification with Graph Convolutional Networks', ICLR2018;
            3. gcmc: defined in 'Graph Convolutional Matrix Completion', KDD2018;
        """
        if self.alg_type in ['lightgcn']:
            self.ego_embeddings = tf.concat([
                self.weights['user_embedding'], self.weights['item_embedding']
            ],
                                            axis=0)

            self.all_embeddings = self._create_lightgcn_embed(
                self.ego_embeddings)

            self.ua_embeddings, self.ia_embeddings = tf.split(
                self.all_embeddings, [self.n_users, self.n_items], 0)

        elif self.alg_type in ['ngcf']:
            self.ua_embeddings, self.ia_embeddings = self._create_ngcf_embed()

        elif self.alg_type in ['gcn']:
            self.ua_embeddings, self.ia_embeddings = self._create_gcn_embed()

        elif self.alg_type in ['gcmc']:
            self.ua_embeddings, self.ia_embeddings = self._create_gcmc_embed()
        """
        *********************************************************
        Inference for the testing phase.
        """
        # for prediction
        self.item_embeddings_final = tf.Variable(tf.zeros(
            [self.n_items, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="item_embeddings_final",
                                                 trainable=False)
        self.user_embeddings_final = tf.Variable(tf.zeros(
            [self.n_users, self.emb_dim]),
                                                 dtype=tf.float32,
                                                 name="user_embeddings_final",
                                                 trainable=False)

        self.assign_opt = [
            tf.assign(self.user_embeddings_final, self.ua_embeddings),
            tf.assign(self.item_embeddings_final, self.ia_embeddings)
        ]
        self.emb = tf.concat(
            [self.user_embeddings_final, self.item_embeddings_final], 0)
        u_embed = tf.nn.embedding_lookup(self.user_embeddings_final,
                                         self.users_ph)
        self.batch_ratings = tf.matmul(u_embed,
                                       self.item_embeddings_final,
                                       transpose_a=False,
                                       transpose_b=True)

        self.u_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['user_embedding'], self.user_idx)
        self.i_g_embeddings_pre = tf.nn.embedding_lookup(
            self.weights['item_embedding'], self.item_idx)
        """
        *********************************************************
        Generate Predictions & Optimize via BPR loss.
        """
        # rating
        term1 = tf.matmul(self.ua_embeddings,
                          self.ua_embeddings,
                          transpose_a=True)
        term2 = tf.matmul(self.ia_embeddings,
                          self.ia_embeddings,
                          transpose_a=True)
        loss1 = tf.reduce_sum(tf.multiply(term1, term2))

        user_embed = tf.nn.embedding_lookup(self.ua_embeddings, self.user_idx)
        item_embed = tf.nn.embedding_lookup(self.ia_embeddings, self.item_idx)
        pos_ratings = inner_product(user_embed, item_embed)

        loss1 += tf.reduce_sum((self.r_alpha - 1) * tf.square(pos_ratings) -
                               2.0 * self.r_alpha * pos_ratings)
        # reg
        reg_loss = l2_loss(self.u_g_embeddings_pre, self.i_g_embeddings_pre)

        self.loss = loss1 + self.fast_reg * reg_loss

        # self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss)
        self.opt = tf.train.AdagradOptimizer(learning_rate=self.lr).minimize(
            self.loss)
示例#8
0
文件: perceptron.py 项目: tengkz/tiny
import random
from util import inner_product

# Generate data
_a0 = range(100)
_b0 = [random.gauss(0,1) for i in range(100)]

x0 = [(_a0[i], _a0[i] + _b0[i]) for i in range(100)]
y0 = [1 if x >0 else -1 for x in _b0]

# Intialization
w0 = [1.0,1.0]
alpha = 1.0

# Perceptron algorithm
loop = 0
while True:
      cnt = 0
      for i,x in enumerate(x0):
            t = inner_product(x,w0)
            if t*y0[i] < 0:
                  w0[0] += alpha*x[0]*y0[i]
                  w0[1] += alpha*x[1]*y0[i]
                  cnt += 1
      if cnt == 0:
            break
      else:
            loop += 1
            print loop,cnt,w0