Esempio n. 1
0
    def _setup_ak(self, post, nn, n2):
        # This is the equivalent of CalculateAk in Fabber
        #
        # Some of this could probably be better done using linalg
        # operations but bear in mind this is one parameter only

        self.sigmaK = self.log_tf(tf.matrix_diag_part(post.cov)[:, self.idx], name="sigmak") # [W]
        self.wK = self.log_tf(post.mean[:, self.idx], name="wk") # [W]
        self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]

        # Sum over vertices of parameter variance multiplied by number of 
        # nearest neighbours for each vertex
        trace_term = self.log_tf(tf.reduce_sum(self.sigmaK * self.num_nn), name="trace") # [1]

        # Sum of nearest and next-nearest neighbour mean values
        self.sum_means_nn = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.nn, tf.reshape(self.wK, (-1, 1))), (-1,)), name="wksum") # [W]
        self.sum_means_n2 = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.n2, tf.reshape(self.wK, (-1, 1))), (-1,)), name="contrib8") # [W]
        
        # vertex parameter mean multipled by number of nearest neighbours
        wknn = self.log_tf(self.wK * self.num_nn, name="wknn") # [W]

        swk = self.log_tf(wknn - self.sum_means_nn, name="swk") # [W]

        term2 = self.log_tf(tf.reduce_sum(swk * self.wK), name="term2") # [1]

        gk = 1 / (0.5 * trace_term + 0.5 * term2 + 0.1)
        hk = tf.multiply(tf.to_float(self.nvertices), 0.5) + 1.0
        self.ak = self.log_tf(tf.identity(gk * hk, name="ak"))
Esempio n. 2
0
    def _inference(self, x, dropout):
        with tf.name_scope('gconv1'):
            N, M = x.get_shape()  # N: number of samples, M: number of features
            M = int(M)
            # Transform to Chebyshev basis
            xt0 = tf.transpose(x)  # M x N
            xt = tf.expand_dims(xt0, 0)  # 1 x M x N
            def concat(xt, x):
                x = tf.expand_dims(x, 0)  # 1 x M x N
                return tf.concat([xt, x], axis=0)  # K x M x N
            if self.K > 1:
                xt1 = tf.sparse_tensor_dense_matmul(self.L, xt0)
                xt = concat(xt, xt1)
            for k in range(2, self.K):
                xt2 = 2 * tf.sparse_tensor_dense_matmul(self.L, xt1) - xt0  # M x N
                xt = concat(xt, xt2)
                xt0, xt1 = xt1, xt2
            xt = tf.transpose(xt)  # N x M x K
            xt = tf.reshape(xt, [-1,self.K])  # NM x K
            # Filter
            W = self._weight_variable([self.K, self.F])
            y = tf.matmul(xt, W)  # NM x F
            y = tf.reshape(y, [-1, M, self.F])  # N x M x F
            # Bias and non-linearity
#            b = self._bias_variable([1, 1, self.F])
            b = self._bias_variable([1, M, self.F])
            y += b  # N x M x F
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*M, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*M])
            y = tf.matmul(y, W) + b
        return y
Esempio n. 3
0
 def _call(self, inputs):
     # vecs: input feature of the current layer.
     # adj_partition_list: the row partitions of the full graph adj
     #       (only used in full-batch evaluation on the val/test sets)
     vecs, adj_norm, len_feat, adj_partition_list, _ = inputs
     vecs = tf.nn.dropout(vecs, 1 - self.dropout)
     vecs_hop = [tf.identity(vecs) for o in range(self.order + 1)]
     for o in range(self.order):
         for a in range(o + 1):
             ans1 = tf.sparse_tensor_dense_matmul(adj_norm, vecs_hop[o + 1])
             ans_partition = [
                 tf.sparse_tensor_dense_matmul(adj, vecs_hop[o + 1])
                 for adj in adj_partition_list
             ]
             ans2 = tf.concat(ans_partition, 0)
             vecs_hop[o + 1] = tf.cond(self.is_train,
                                       lambda: tf.identity(ans1),
                                       lambda: tf.identity(ans2))
     vecs_hop = [self._F_nonlinear(v, o) for o, v in enumerate(vecs_hop)]
     if self.aggr == 'mean':
         ret = vecs_hop[0]
         for o in range(len(vecs_hop) - 1):
             ret += vecs_hop[o + 1]
     elif self.aggr == 'concat':
         ret = tf.concat(vecs_hop, axis=1)
     else:
         raise NotImplementedError
     return ret
Esempio n. 4
0
File: cox.py Progetto: SerTelnov/DLF
    def __init__(self, lr, batch_size, dimension, util_train, util_test, campaign, reg_lambda, nn=False):
        # hyperparameters
        self.lr = lr
        self.batch_size = batch_size
        self.util_train = util_train
        self.util_test = util_test
        self.reg_lambda = reg_lambda

        self.train_data_amt = util_train.get_data_amt()
        self.test_data_amt = util_test.get_data_amt()

        # output dir
        model_name = "{}_{}_{}".format(self.lr, self.reg_lambda, self.batch_size)
        if nn:
            self.output_dir = "output/coxnn/{}/{}/".format(campaign, model_name)
        else:
            self.output_dir = "output/cox/{}/{}/".format(campaign, model_name)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        # reset graph
        ops.reset_default_graph()

        # placeholders, sorted value
        tfc.disable_eager_execution()
        self.X = tfc.sparse_placeholder(tf.float64)
        self.z = tfc.placeholder(tf.float64)
        self.b = tfc.placeholder(tf.float64)
        self.y = tfc.placeholder(tf.float64)

        # computation graph, linear estimator or neural network
        if nn:
            hidden_size = 20
            self.w1 = tf.Variable(initial_value=tfc.truncated_normal(shape=[dimension, hidden_size], dtype=tf.float64),
                                  name='w1')
            self.w2 = tf.Variable(initial_value=tfc.truncated_normal(shape=[hidden_size, 1], dtype=tf.float64),
                                  name='w2')
            self.hidden_values = tf.nn.relu(tfc.sparse_tensor_dense_matmul(self.X, self.w1))
            self.index = tf.matmul(self.hidden_values, self.w2)
            self.reg = tf.nn.l2_loss(self.w1[1:, ]) + tf.nn.l2_loss(self.w2[1:, ])
        else:
            self.w = tf.Variable(initial_value=tfc.truncated_normal(shape=[dimension, 1], dtype=tf.float64), name='w')
            self.index = tfc.sparse_tensor_dense_matmul(self.X, self.w)
            self.reg = tf.reduce_sum(tf.abs(self.w[1:, ]))

        self.multiple_times = tf.exp(self.index)
        self.loss = -tf.reduce_sum((self.index - tfc.log(tf.cumsum(self.multiple_times, reverse=True))) * self.y) + \
                    self.reg
        self.optimizer = tfc.train.GradientDescentOptimizer(self.lr)
        self.train_step = self.optimizer.minimize(self.loss)

        # for test h0
        self.base = self.z * self.y + self.b * (1 - self.y)
        self.candidate = (1 / tf.cumsum(tf.exp(self.index), reverse=True)) * self.y

        # session initialization
        config = tfc.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tfc.Session(config=config)
        tfc.global_variables_initializer().run(session=self.sess)
    def _get_bi_pooling_predictions(self, feats):
        # Linear terms: batch_size * 1
        term0 = tf.sparse_tensor_dense_matmul(feats,
                                              self.weights['var_linear'])

        # Interaction terms w.r.t. first sum then square: batch_size * emb_size.
        #   e.g., sum_{k from 1 to K}{(v1k+v2k)**2}
        sum_emb = tf.sparse_tensor_dense_matmul(feats,
                                                self.weights['var_factor'])
        term1 = tf.square(sum_emb)

        # Interaction terms w.r.t. first square then sum: batch_size * emb_size.
        #   e.g., sum_{k from 1 to K}{v1k**2 + v2k**2}
        square_emb = tf.sparse_tensor_dense_matmul(
            tf.square(feats), tf.square(self.weights['var_factor']))
        term2 = square_emb

        # "neural factorization machine", Equation 3, the result of bi-interaction pooling: batch_size * emb_size
        term3 = 0.5 * (term1 - term2)

        # "neural factorization machine", Equation 7, the result of MLP: batch_size * 1
        z = [term3]
        for i in range(self.n_layers):
            temp = tf.nn.relu(
                tf.matmul(z[i], self.weights['W_%d' % i]) +
                self.weights['b_%d' % i])
            temp = tf.nn.dropout(temp, 1 - self.mess_dropout[i])
            z.append(temp)

        preds = term0 + tf.matmul(z[-1], self.weights['h'])

        return preds
Esempio n. 6
0
    def _inference(self, x, dropout):
        with tf.name_scope('gconv1'):
            N, M = x.get_shape()  # N: number of samples, M: number of features
            M = int(M)
            # Filter
            W = self._weight_variable([self.K, self.F])
            def filter(xt, k):
                xt = tf.transpose(xt)  # N x M
                xt = tf.reshape(xt, [-1, 1])  # NM x 1
                w = tf.slice(W, [k,0], [1,-1])  # 1 x F
                y = tf.matmul(xt, w)  # NM x F
                return tf.reshape(y, [-1, M, self.F])  # N x M x F
            xt0 = tf.transpose(x)  # M x N
            y = filter(xt0, 0)
            if self.K > 1:
                xt1 = tf.sparse_tensor_dense_matmul(self.L, xt0)
                y += filter(xt1, 1)
            for k in range(2, self.K):
                xt2 = 2 * tf.sparse_tensor_dense_matmul(self.L, xt1) - xt0  # M x N
                y += filter(xt2, k)
                xt0, xt1 = xt1, xt2
            # Bias and non-linearity
#            b = self._bias_variable([1, 1, self.F])
            b = self._bias_variable([1, M, self.F])
            y += b  # N x M x F
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*M, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*M])
            y = tf.matmul(y, W) + b
        return y
Esempio n. 7
0
 def chebyshev5(self, x, L, Fout, K):
     N, M, Fin = x.get_shape()
     N, M, Fin = int(N), int(M), int(Fin)
     # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
     L = scipy.sparse.csr_matrix(L)
     L = graph.rescale_L(L, lmax=2)
     L = L.tocoo()
     indices = np.column_stack((L.row, L.col))
     L = tf.SparseTensor(indices, L.data, L.shape)
     L = tf.sparse_reorder(L)
     # Transform to Chebyshev basis
     x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
     x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
     x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
     print("Test")
     def concat(x, x_):
         x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
         return tf.concat([x, x_], axis=0)  # K x M x Fin*N
     if K > 1:
         x1 = tf.sparse_tensor_dense_matmul(L, x0)
         x = concat(x, x1)
         print(" K = 1")
     for k in range(2, K):
         x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
         x = concat(x, x2)
         x0, x1 = x1, x2
     x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
     x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
     x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
     # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
     W = self._weight_variable([Fin*K, Fout], regularization=False)
     x = tf.matmul(x, W)  # N*M x Fout
     return tf.reshape(x, [N, M, Fout])  # N x M x Fout
    def chebyshev(self, x, L, Fout, K , normalized=False, algo='LB'):
        '''normalized or not,  algo='LB' or 'gL' (graph Laplacian)
        will affact the value of "lmax" (maximum eigenvalue)'''
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)  
        lmax=graph.lmax(L, normalized, algo)   # 202000912
#        L = graph.rescale_L(L, lmax=2)            
        L = graph.rescale_L(L, lmax)        # 202000912
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(1, K-1):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
Esempio n. 9
0
 def _call(self, inputs):
     x = inputs
     x = dropout_sparse(x, 1 - self.dropout, self.features_nonzero)
     x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])
     x = tf.sparse_tensor_dense_matmul(self.adj, x)
     outputs = self.act(x)
     return outputs
Esempio n. 10
0
def dot1(x, y, sparse=False):
    """Wrapper for tf.matmul (sparse vs dense)."""
    if sparse:
        y = tf.sparse_tensor_to_dense(y)
        res = tf.sparse_tensor_dense_matmul(x, y)
    else:
        res = tf.sparse_tensor_dense_matmul(x, y)
    return res
Esempio n. 11
0
    def build(self):
        """Wrapper for _build()."""
        with tf.variable_scope(self.name):
            self._build()

        # FM
        self.fm_embedding = tf.get_variable('fm_embedding',
                                            [self.input_dim, FLAGS.hidden1])
        squared_feature_emb = tf.square(self.fm_embedding)

        if self.sparse_inputs:
            summed_feature_emb = tf.sparse_tensor_dense_matmul(
                self.inputs, self.fm_embedding)
            squared_feature_emb_sum = tf.sparse_tensor_dense_matmul(
                self.inputs, squared_feature_emb)
        else:
            summed_feature_emb = tf.matmul(self.inputs, self.fm_embedding)
            squared_feature_emb_sum = tf.matmul(self.inputs,
                                                squared_feature_emb)

        summed_feature_emb_square = tf.square(summed_feature_emb)

        self.NFM = 0.5 * tf.subtract(
            summed_feature_emb_square, squared_feature_emb_sum, name='nfm')
        self.NFM = tf.nn.dropout(self.NFM, 1 - self.fm_dropout)

        # Build sequential layer model
        self.activations.append(self.inputs)
        for layer in self.layers:
            hidden = layer(self.activations[-1])
            if isinstance(hidden, tuple):
                tf.logging.info('{} shape = {}'.format(layer.name,
                                                       hidden[0].get_shape()))
            else:
                tf.logging.info('{} shape = {}'.format(layer.name,
                                                       hidden.get_shape()))
            self.activations.append(hidden)
        self.GCN = self.activations[-1]

        self.outputs = self.project_layer(
            tf.concat([self.GCN, self.NFM], axis=1))

        # Store model variables for easy access
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                      scope=self.name)
        self.vars = variables
        for k in self.vars:
            tf.logging.info((k.name, k.get_shape()))

        # Build metrics
        self._loss()
        self._accuracy()
        self._predict()

        self.opt_op = self.optimizer.minimize(self.loss)
Esempio n. 12
0
 def __encoder(self, A, H, layer):
     # H = tf.matmul(H, self.W[layer])
     if layer == 0:
         H = layers.sparse_dropout(
             H, 1 - self.dropout, self.placeholders['num_features_nonzero'])
         H = tf.sparse_tensor_dense_matmul(H, self.W[layer])
     else:
         H = tf.nn.dropout(H, 1 - self.dropout)
         H = tf.matmul(H, self.W[layer])
     self.C[layer] = self.graph_attention_layer(A, H, self.v[layer],
                                                layer)  # attention value
     return tf.sparse_tensor_dense_matmul(self.C[layer], H)
Esempio n. 13
0
 def _call(self, inputs):
     outputs = []
     # 有num_types种关系类型,分别计算后,append
     for k in range(self.num_types):
         x = dropout_sparse(inputs, 1 - self.dropout,
                            self.nonzero_feat[self.edge_type[1]])  #防止过拟合
         x = tf.sparse_tensor_dense_matmul(x, self.vars['weights_%d' % k])
         x = tf.sparse_tensor_dense_matmul(self.adj_mats[self.edge_type][k],
                                           x)  #稀疏矩阵变成密集矩阵
         outputs.append(self.act(x))
     outputs = tf.add_n(outputs)
     outputs = tf.nn.l2_normalize(outputs, dim=1)  #l2归一化按行
     return outputs  #outputs就是特征
Esempio n. 14
0
    def _create_gcmc_embed(self):
        A_fold_hat = self._split_A_hat(self.norm_adj)

        embeddings = tf.concat(
            [self.weights['user_embedding'], self.weights['item_embedding']],
            axis=0)

        all_embeddings = []

        for k in range(0, self.n_layers):
            temp_embed = []
            for f in range(self.n_fold):
                temp_embed.append(
                    tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))
            embeddings = tf.concat(temp_embed, 0)
            # convolutional layer.
            embeddings = tf.nn.leaky_relu(
                tf.matmul(embeddings, self.weights['W_gc_%d' % k]) +
                self.weights['b_gc_%d' % k])
            # dense layer.
            mlp_embeddings = tf.matmul(
                embeddings,
                self.weights['W_mlp_%d' % k]) + self.weights['b_mlp_%d' % k]
            # mlp_embeddings = tf.nn.dropout(mlp_embeddings, 1 - self.mess_dropout[k])

            all_embeddings += [mlp_embeddings]
        all_embeddings = tf.concat(all_embeddings, 1)

        u_g_embeddings, i_g_embeddings = tf.split(all_embeddings,
                                                  [self.n_users, self.n_items],
                                                  0)
        return u_g_embeddings, i_g_embeddings
Esempio n. 15
0
    def _create_lightgcn_embed(self):
        if self.node_dropout_flag:
            A_fold_hat = self._split_A_hat_node_dropout(self.norm_adj)
        else:
            A_fold_hat = self._split_A_hat(self.norm_adj)

        ego_embeddings = tf.concat(
            [self.weights['user_embedding'], self.weights['item_embedding']],
            axis=0)
        all_embeddings = [ego_embeddings]

        for k in range(0, self.n_layers):

            temp_embed = []
            for f in range(self.n_fold):
                temp_embed.append(
                    tf.sparse_tensor_dense_matmul(A_fold_hat[f],
                                                  ego_embeddings))

            side_embeddings = tf.concat(temp_embed, 0)
            ego_embeddings = side_embeddings
            all_embeddings += [ego_embeddings]
        all_embeddings = tf.stack(all_embeddings, 1)
        all_embeddings = tf.reduce_mean(all_embeddings, axis=1, keepdims=False)
        u_g_embeddings, i_g_embeddings = tf.split(all_embeddings,
                                                  [self.n_users, self.n_items],
                                                  0)
        return u_g_embeddings, i_g_embeddings
Esempio n. 16
0
 def call(self, inputs):
     inputs = self.batch_normlization(inputs)
     mapped_inputs = tf.matmul(inputs, self.kernel)
     attention_inputs1 = tf.matmul(inputs, self.kernel1)
     attention_inputs2 = tf.matmul(inputs, self.kernel2)
     con_sa_1 = tf.reduce_sum(tf.multiply(attention_inputs1, inputs),
                              1,
                              keepdims=True)
     con_sa_2 = tf.reduce_sum(tf.multiply(attention_inputs2, inputs),
                              1,
                              keepdims=True)
     con_sa_1 = tf.keras.activations.tanh(con_sa_1)
     con_sa_2 = tf.keras.activations.tanh(con_sa_2)
     if self.dropout_rate > 0.0:
         con_sa_1 = tf.nn.dropout(con_sa_1, self.dropout_rate)
         con_sa_2 = tf.nn.dropout(con_sa_2, self.dropout_rate)
     con_sa_1 = tf.cast(self.adjs[0], dtype=tf.float32) * con_sa_1
     con_sa_2 = tf.cast(self.adjs[0], dtype=tf.float32) * tf.transpose(
         con_sa_2, [1, 0])
     weights = tf.sparse_add(con_sa_1, con_sa_2)
     weights = tf.SparseTensor(indices=weights.indices,
                               values=tf.nn.leaky_relu(weights.values),
                               dense_shape=weights.dense_shape)
     attention_adj = tf.sparse_softmax(weights)
     attention_adj = tf.sparse_reshape(
         attention_adj, shape=[self.nodes_num, self.nodes_num])
     value = tf.sparse_tensor_dense_matmul(attention_adj, mapped_inputs)
     return self.activation(value)
Esempio n. 17
0
 def _call(self, inputs):
     x = inputs
     x = tf.nn.dropout(x, 1 - self.dropout)
     x = tf.matmul(x, self.vars['weights'])
     x = tf.sparse_tensor_dense_matmul(self.adj, x)
     outputs = self.act(x)
     return outputs
Esempio n. 18
0
    def _create_gcn_embed(self):
        A = self.A_in
        # Generate a set of adjacency sub-matrix.
        A_fold_hat = self._split_A_hat(A)

        embeddings = tf.concat(
            [self.weights['user_embed'], self.weights['entity_embed']], axis=0)
        all_embeddings = [embeddings]

        for k in range(0, self.n_layers):
            # A_hat_drop = tf.nn.dropout(A_hat, 1 - self.node_dropout[k], [self.n_users + self.n_items, 1])
            temp_embed = []
            for f in range(self.n_fold):
                temp_embed.append(
                    tf.sparse_tensor_dense_matmul(A_fold_hat[f], embeddings))

            embeddings = tf.concat(temp_embed, 0)
            embeddings = tf.nn.leaky_relu(
                tf.matmul(embeddings, self.weights['W_gc_%d' % k]) +
                self.weights['b_gc_%d' % k])
            embeddings = tf.nn.dropout(embeddings, 1 - self.mess_dropout[k])

            # normalize the distribution of embeddings.
            norm_embeddings = tf.math.l2_normalize(embeddings, axis=1)

            all_embeddings += [norm_embeddings]

        all_embeddings = tf.concat(all_embeddings, 1)

        ua_embeddings, ea_embeddings = tf.split(
            all_embeddings, [self.n_users, self.n_entities], 0)
        return ua_embeddings, ea_embeddings
Esempio n. 19
0
def sparse_linear(input_, input_size, output_size, scope, init_bias=0.0):
    with tf.variable_scope(scope):
        W = tf.get_variable(
            "Matrix", [input_size, output_size], tf.float32,
            tf.random_normal_initializer(stddev=1.0 / math.sqrt(output_size)))
        b = tf.get_variable("bias", [output_size],
                            initializer=tf.constant_initializer(init_bias))
    return tf.sparse_tensor_dense_matmul(input_, W) + b
Esempio n. 20
0
 def add_diag_layer(self, inlayer, init=ones):
     inlayer = tf.nn.dropout(inlayer, 1 - self.dropout)
     w0 = init([1, self.dim])
     tosum = tf.sparse_tensor_dense_matmul(self.M, tf.multiply(inlayer, w0))
     if self.act_func is None:
         return tosum
     else:
         return self.act_func(tosum)
Esempio n. 21
0
 def add_full_layer(self, inlayer, init=glorot):
     inlayer = tf.nn.dropout(inlayer, 1 - self.dropout)
     w0 = init([self.dim, self.dim])
     tosum = tf.sparse_tensor_dense_matmul(self.M, tf.matmul(inlayer, w0))
     if self.act_func is None:
         return tosum
     else:
         return self.act_func(tosum)
Esempio n. 22
0
def dot(x, y, sparse=False):
    """Wrapper for tf.matmul (sparse vs dense)."""
    print(x)
    if sparse:
        res = tf.sparse_tensor_dense_matmul(x, y)
    else:
        res = tf.matmul(x, y)
    return res
Esempio n. 23
0
 def _post_setup(self):
     self.sum_rho = _tf.reduce_sum([
         _tf.sparse_tensor_dense_matmul(self.b_maps[t], self.rho[t])
         for t in self.atom_types
     ],
                                   axis=0,
                                   name="SumRho")
     _tf.summary.histogram("SumRho", self.sum_rho)
     with _tf.variable_scope(self.name + "_EmbeddingFunc",
                             reuse=_tf.AUTO_REUSE):
         self.F_out = self.F(self.sum_rho)
     self.output = _tf.add(_tf.reduce_sum([
         _tf.sparse_tensor_dense_matmul(self.b_maps[t], self.pairPot[t])
         for t in self.atom_types
     ],
                                          axis=0,
                                          name="SumPairPot") + self.F_out,
                           self.offset,
                           name="AtomicEnergy")
Esempio n. 24
0
    def build(self):
        """Wrapper for _build()."""
        with tf.variable_scope(self.name):
            self._build()

        # FM
        self.fm_embedding = tf.get_variable('fm_embedding',
                                            [self.input_dim, FLAGS.fm_dims])
        squared_feature_emb = tf.square(self.fm_embedding)

        if self.sparse_inputs:
            summed_feature_emb = tf.sparse_tensor_dense_matmul(
                self.inputs, self.fm_embedding)
            squared_feature_emb_sum = tf.sparse_tensor_dense_matmul(
                self.inputs, squared_feature_emb)
        else:
            summed_feature_emb = tf.matmul(self.inputs, self.fm_embedding)
            squared_feature_emb_sum = tf.matmul(self.inputs,
                                                squared_feature_emb)

        summed_feature_emb_square = tf.square(summed_feature_emb)

        self.NFM = 0.5 * tf.subtract(
            summed_feature_emb_square, squared_feature_emb_sum, name='nfm')
        self.NFM = tf.nn.dropout(self.NFM, 1 - self.fm_dropout)

        self.outputs = self.project_layer(
            tf.concat([self.GAT, self.NFM], axis=1))

        # Store model variables for easy access
        variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                      scope=self.name)
        self.vars = variables
        for k in self.vars:
            tf.logging.info((k.name, k.get_shape()))

        # Build metrics
        self._loss()
        self._accuracy()
        self._predict()

        self.opt_op = self.optimizer.minimize(self.loss)
Esempio n. 25
0
 def _call(self, inputs):
     outputs = []
     for k in range(self.num_types):
         x = tf.nn.dropout(inputs, 1 - self.dropout)
         x = tf.matmul(x, self.vars['weights_%d' % k])
         x = tf.sparse_tensor_dense_matmul(self.adj_mats[self.edge_type][k],
                                           x)
         outputs.append(self.act(x))
     outputs = tf.add_n(outputs)  #对列表内的元素相加
     outputs = tf.nn.l2_normalize(outputs, dim=1)  #dim=1 按行进行l2范化
     return outputs
    def create(self):
        rotation = tf.eye(self.dim)
        #rotation = tf.SparseTensor(indices=[[i, i] for i in range(self.dim)], values=[1.0 for _ in range(self.dim)], dense_shape=[self.dim, self.dim])

        for plane in self.planes:
            old_rotation = rotation
            new_rotation = self.rotation_matrix(plane)
            #rotation = tf.matmul(old_rotation, tf.sparse_add(tf.zeros([self.dim, self.dim]), new_rotation))
            rotation = tf.sparse_tensor_dense_matmul(new_rotation,
                                                     old_rotation)

        return rotation
Esempio n. 27
0
    def _create_bi_interaction_embed(self):
        A = self.A_in
        # Generate a set of adjacency sub-matrix.
        A_fold_hat = self._split_A_hat(A)

        ego_embeddings = tf.concat(
            [self.weights['user_embed'], self.weights['entity_embed']], axis=0)
        all_embeddings = [ego_embeddings]

        for k in range(0, self.n_layers):
            # A_hat_drop = tf.nn.dropout(A_hat, 1 - self.node_dropout[k], [self.n_users + self.n_items, 1])
            temp_embed = []
            for f in range(self.n_fold):
                #                 set_trace()  # GTL
                temp_embed.append(
                    tf.sparse_tensor_dense_matmul(A_fold_hat[f],
                                                  ego_embeddings))

            # sum messages of neighbors.
            side_embeddings = tf.concat(temp_embed, 0)

            add_embeddings = ego_embeddings + side_embeddings

            # transformed sum messages of neighbors.
            sum_embeddings = tf.nn.leaky_relu(
                tf.matmul(add_embeddings, self.weights['W_gc_%d' % k]) +
                self.weights['b_gc_%d' % k])

            # bi messages of neighbors.
            bi_embeddings = tf.multiply(ego_embeddings, side_embeddings)
            # transformed bi messages of neighbors.
            bi_embeddings = tf.nn.leaky_relu(
                tf.matmul(bi_embeddings, self.weights['W_bi_%d' % k]) +
                self.weights['b_bi_%d' % k])

            ego_embeddings = bi_embeddings + sum_embeddings
            # message dropout.
            ego_embeddings = tf.nn.dropout(ego_embeddings,
                                           1 - self.mess_dropout[k])

            # normalize the distribution of embeddings.
            norm_embeddings = tf.math.l2_normalize(ego_embeddings, axis=1)

            all_embeddings += [norm_embeddings]

        all_embeddings = tf.concat(all_embeddings, 1)

        ua_embeddings, ea_embeddings = tf.split(
            all_embeddings, [self.n_users, self.n_entities], 0)
        return ua_embeddings, ea_embeddings
Esempio n. 28
0
 def add_sparse_att_layer(self, inlayer, dual_layer):
     dual_transform = tf.reshape(
         tf.layers.conv1d(tf.expand_dims(dual_layer, 0), 1, 1), (-1, 1))
     logits = tf.reshape(
         tf.nn.embedding_lookup(dual_transform, self.r_mat.values), [-1])
     lrelu = tf.SparseTensor(indices=self.r_mat.indices,
                             values=tf.nn.leaky_relu(logits),
                             dense_shape=self.r_mat.dense_shape)
     coefs = tf.sparse_softmax(lrelu)
     vals = tf.sparse_tensor_dense_matmul(coefs, inlayer)
     if self.act_func is None:
         return vals
     else:
         return self.act_func(vals)
Esempio n. 29
0
    def test(self):
        print('Test begin')
        self.pred_mp = tf.exp(tf.sparse_tensor_dense_matmul(self.X, self.w))
        self.MSE = tf.reduce_mean(tf.square(self.z - self.pred_mp))

        x, b, z, y = self.util_test.get_all_data_origin()
        feed_dict = {}

        feed_dict[self.X] = tf.SparseTensorValue(
            x, [1] * len(x), [self.test_data_amt, dimension])
        feed_dict[self.z] = z
        feed_dict[self.y] = y
        feed_dict[self.b] = b

        # calculate MSE
        mse = self.sess.run(self.MSE, feed_dict)
        print("MSE: {}".format(mse))

        ks = self.pred_mp / self.theta
        ps = tf.pow(self.z, (ks - 1.)) * tf.exp(-self.z / self.theta) / tf.pow(
            self.theta, ks) / tf.exp(tf.lgamma(ks))
        cs = tf.igamma(ks, self.b / self.theta) / tf.exp(tf.lgamma(ks))
        # calculate AUC and LogLoss
        win_rate = self.sess.run(cs, feed_dict)
        auc = roc_auc_score(y, win_rate)
        print("AUC: {}".format(auc))
        logloss = log_loss(y, win_rate)
        print("Log Loss: {}".format(logloss))

        # calculate ANLP
        logp = -tf.log(tf.clip_by_value(ps, 1e-8, 1.0))
        logp_arr = self.sess.run(logp, feed_dict)
        logp_arr[np.isnan(logp_arr)] = 1e-20  #for overflow values, minor
        logp_arr[logp_arr == 0] = 1e-20

        anlp = np.mean(logp_arr)
        print("ANLP: {}".format(anlp))

        # save result and params
        fin = open(self.output_dir + 'result.txt', 'w')
        fin.writelines([
            "MSE: {0}   AUC: {1}    Log Loss: {2}   ANLP: {3}\n".format(
                mse, auc, logloss, anlp)
        ])
        fin.close()

        np.save(self.output_dir + 'w', self.sess.run(self.w))
        np.save(self.output_dir + 'k', self.sess.run(ks, feed_dict))
        np.save(self.output_dir + 'theta', self.sess.run(self.theta))
Esempio n. 30
0
    def _create_ngcf_embed(self):
        if self.node_dropout_flag:
            A_fold_hat = self._split_A_hat_node_dropout(self.norm_adj)
        else:
            A_fold_hat = self._split_A_hat(self.norm_adj)

        ego_embeddings = tf.concat(
            [self.weights['user_embedding'], self.weights['item_embedding']],
            axis=0)

        all_embeddings = [ego_embeddings]

        for k in range(0, self.n_layers):

            temp_embed = []
            for f in range(self.n_fold):
                temp_embed.append(
                    tf.sparse_tensor_dense_matmul(A_fold_hat[f],
                                                  ego_embeddings))

            side_embeddings = tf.concat(temp_embed, 0)
            sum_embeddings = tf.nn.leaky_relu(
                tf.matmul(side_embeddings, self.weights['W_gc_%d' % k]) +
                self.weights['b_gc_%d' % k])

            # bi messages of neighbors.
            bi_embeddings = tf.multiply(ego_embeddings, side_embeddings)
            # transformed bi messages of neighbors.
            bi_embeddings = tf.nn.leaky_relu(
                tf.matmul(bi_embeddings, self.weights['W_bi_%d' % k]) +
                self.weights['b_bi_%d' % k])
            # non-linear activation.
            ego_embeddings = sum_embeddings + bi_embeddings

            # message dropout.
            # ego_embeddings = tf.nn.dropout(ego_embeddings, 1 - self.mess_dropout[k])

            # normalize the distribution of embeddings.
            norm_embeddings = tf.nn.l2_normalize(ego_embeddings, axis=1)

            all_embeddings += [norm_embeddings]

        all_embeddings = tf.concat(all_embeddings, 1)
        u_g_embeddings, i_g_embeddings = tf.split(all_embeddings,
                                                  [self.n_users, self.n_items],
                                                  0)
        return u_g_embeddings, i_g_embeddings