예제 #1
0
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        self.hidden1 = GraphConvolution(
            input_dim=self.input_dim,
            output_dim=self.hidden1_dim,
            adj=self.adj,
            # features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        # Second GCN Layer: (A, H) --> Z (mode embeddings)
        self.embeddings = GraphConvolution(input_dim=self.hidden1_dim,
                                           output_dim=self.hidden2_dim,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        # Z_mean for AE. No noise added (because not a VAE)
        self.z_mean = self.embeddings

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
예제 #2
0
    def _build(self, hidden1, hidden2, hidden3):
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.hidden2 = GraphConvolution(input_dim=hidden1,
                                        output_dim=hidden2,
                                        adj=self.adj,
                                        act=lambda x: x,
                                        dropout=self.dropout,
                                        logging=self.logging)(self.hidden1)

        self.embeddings = GraphConvolution(input_dim=hidden2,
                                           output_dim=hidden3,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(hidden2)

        self.z_mean = self.embeddings

        self.reconstructions = InnerProductDecoder(input_dim=hidden3,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
예제 #3
0
    def __init__(self,
                 input_feat_dim,
                 hidden_dim1,
                 hidden_dim2,
                 dropout,
                 pretrained_weights=None,
                 hidden_dims_predictor=[256],
                 drop_out_predictor=0.3,
                 output_dim=1,
                 freezed=False):
        super(GCNPredictor, self).__init__()
        self.gc1 = GraphConvolution(input_feat_dim,
                                    hidden_dim1,
                                    dropout,
                                    act=F.relu)
        self.gc2 = GraphConvolution(hidden_dim1,
                                    hidden_dim2,
                                    dropout,
                                    act=F.relu)
        self.dc = InnerProductDecoder(dropout, act=lambda x: x)

        self.predictor = Predictor(input_dim=hidden_dim2,
                                   output_dim=output_dim,
                                   h_dims=hidden_dims_predictor,
                                   drop_out=drop_out_predictor)
예제 #4
0
    def __init__(self, input_dim, latent_dim=128, h_dims=[512], drop_out=0.3):

        super(GAEBase, self).__init__()

        self.latent_dim = latent_dim

        modules = []
        hidden_dims = deepcopy(h_dims)

        hidden_dims.insert(0, input_dim)

        # Build Encoder
        for i in range(1, len(hidden_dims)):
            i_dim = hidden_dims[i - 1]
            o_dim = hidden_dims[i]

            modules.append(
                nn.Sequential(
                    GraphConvolution(i_dim, o_dim, drop_out, act=lambda x: x),
                    #nn.BatchNorm1d(o_dim),
                    #nn.ReLU()
                    #nn.Dropout(drop_out)
                ))
            #in_channels = h_dim

        self.encoder = nn.Sequential(*modules)
        self.bottleneck = GraphConvolution(hidden_dims[-1],
                                           latent_dim,
                                           drop_out,
                                           act=lambda x: x)

        # Build Decoder
        self.decoder = InnerProductDecoder(drop_out, act=lambda x: x)
    def _build(self):
        # 第一层 GCN 卷积层: (A, X) --> H (隐藏层特征表达)
        self.hidden1 = GraphConvolution(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              # features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)

        #  第二层 GCN 卷积层: (A, H) --> Z (模型嵌入)
        self.embeddings = GraphConvolution(input_dim=self.hidden1_dim,
                                           output_dim=self.hidden2_dim,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        # Z_mean用于AE,没有添加噪音(因为不是VAE)
        self.z_mean = self.embeddings

        # 内积解码器: Z (嵌入) --> A (重建邻接矩阵)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      logging=self.logging)(self.embeddings)
예제 #6
0
    def _build(self):

        self.GCN1 = GraphConvolution(input_dim=self.author_feature_inputs,
                                     output_dim=FLAGS.hidden1,
                                     adj=self.AuthorAdj,
                                     dropout=self.dropout,
                                     logging=self.logging)(self.inputs)

        # N * hidden2 (60)
        self.AuthorEnbedding = GraphConvolution(input_dim=FLAGS.hidden1,
                                                output_dim=FLAGS.hidden2,
                                                adj=self.AuthorAdj,
                                                dropout=self.dropout,
                                                act=lambda x: x,
                                                logging=self.logging)(
                                                    self.GCN1)

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.AuthorEnbedding)

        # self.document_feature_inputs = tf.matmul(D2A, )

        pass
예제 #7
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.num_hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.z_mean = GraphConvolution(input_dim=self.num_hidden1,
                                       output_dim=self.num_hidden2,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(self.num_hidden1)

        self.z_log_std = GraphConvolution(input_dim=self.num_hidden1,
                                          output_dim=self.num_hidden2,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(self.hidden1)

        self.z = self.z_mean + tf.random_normal(
            [self.n_samples, self.num_hidden2]) * tf.exp(self.z_log_std)

        self.reconstructions = InnerProductDecoder(input_dim=self.num_hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.z)
예제 #8
0
 def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
     super(GCNModelAE, self).__init__()
     self.gc1 = GraphConvolution(input_feat_dim,
                                 hidden_dim1,
                                 dropout,
                                 act=F.relu)
     self.gc2 = GraphConvolution(hidden_dim1,
                                 hidden_dim2,
                                 dropout,
                                 act=lambda x: x)
     self.dc = InnerProductDecoder(dropout, act=lambda x: x)
예제 #9
0
 def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
     super(GCNModelAE, self).__init__()
     #self.gc1 = ChebConv(input_feat_dim, hidden_dim1, 3) #GCNConv(input_feat_dim, hidden_dim1, improved=False) #GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
     #self.gc2 = ChebConv(hidden_dim1, hidden_dim2, 3) #GCNConv(hidden_dim1, hidden_dim2, improved=False) #GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
     #self.gc3 = ChebConv(hidden_dim1, hidden_dim2, 3) #GCNConv(hidden_dim1, hidden_dim2, improved=False) #GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
     self.gc1 = GraphConvolution(input_feat_dim,
                                 hidden_dim1,
                                 dropout,
                                 act=F.elu)
     self.gc2 = GraphConvolution(hidden_dim1,
                                 hidden_dim2,
                                 dropout,
                                 act=F.elu)
     #self.dc = InnerProductDecoder(dropout, act=lambda x: x)
     self.dc = InnerProductDecoder(dropout, act=F.relu)
예제 #10
0
파일: model.py 프로젝트: masoudmlk/MIVGAE
 def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
     super(GCNModelVAE, self).__init__()
     self.gc1 = GraphConvolution(input_feat_dim,
                                 hidden_dim1,
                                 dropout,
                                 act=F.relu,
                                 iteration=4,
                                 first_layer=False)
     self.gc2 = GraphConvolution(hidden_dim1,
                                 hidden_dim2,
                                 dropout,
                                 act=lambda x: x)
     self.gc3 = GraphConvolution(hidden_dim1,
                                 hidden_dim2,
                                 dropout,
                                 act=lambda x: x)
     self.dc = InnerProductDecoder(dropout, act=lambda x: x)
예제 #11
0
파일: model.py 프로젝트: sreycodes/YLR
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        fl = GraphConvolutionSparse(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,                
                                              dtype=self.dtype,
                                              logging=self.logging)
        # self.ir = fl.ir
        self.initial = fl.initial
        self.fw = fl.vars['weights']
        self.hidden1 = fl(self.inputs)
        self.dx = fl.dx
        self.wdx = fl.wdx
        self.awdx =fl.awdx

        # Second GCN Layer: (A, H) --> Z_mean (node embeddings)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       dtype=self.dtype,
                                       logging=self.logging)(self.hidden1)

        # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=tf.nn.sigmoid,
                                          dropout=self.dropout,
                                          dtype=self.dtype,
                                          logging=self.logging)(self.hidden1)

        # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=self.dtype) * tf.exp(self.z_log_std)

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      flatten=self.flatten_output,
                                      logging=self.logging)(self.z)
예제 #12
0
 def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout,
              alpha):
     super(GATcoarseVAE, self).__init__()
     self.gc0 = GraphConvolution(input_feat_dim,
                                 input_feat_dim,
                                 dropout,
                                 act=lambda x: x)
     self.gat1 = GraphAttentionLayer(input_feat_dim,
                                     hidden_dim2,
                                     dropout=dropout,
                                     alpha=alpha,
                                     concat=True)
     self.gc2 = GraphConvolution(input_feat_dim,
                                 hidden_dim1,
                                 dropout,
                                 act=lambda x: x)
     self.gc3 = GraphConvolution(input_feat_dim,
                                 hidden_dim1,
                                 dropout,
                                 act=lambda x: x)
     self.dc = InnerProductDecoder(dropout, act=lambda x: x)
    def _build(self):
        # 第一层 GCN 卷积层: (A, X) --> H (隐藏层特征表达)
        self.hidden1 = GraphConvolution(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              # features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              dtype=self.dtype,
                                              logging=self.logging)(self.inputs)

        #  第二层 GCN 卷积层: (A, H) --> Z (节点嵌入)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       dtype=self.dtype,
                                       logging=self.logging)(self.hidden1)

        # 还是第二层 GCN 卷积层: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          dtype=self.dtype,
                                          logging=self.logging)(self.hidden1)

        # 采样操作: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=self.dtype) * tf.exp(self.z_log_std)

        # 内积解码器: Z (嵌入) --> A (重建邻接矩阵)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      flatten=self.flatten_output,
                                      logging=self.logging)(self.z)
예제 #14
0
파일: model.py 프로젝트: afcarl/graphite
    def encoder(self, inputs):

        hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=FLAGS.hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=0.,
            logging=self.logging)(inputs)

        self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1,
                                       output_dim=FLAGS.hidden2,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(hidden1)

        self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1,
                                          output_dim=FLAGS.hidden2,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(hidden1)
예제 #15
0
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.hidden1_dim,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        # Second GCN Layer: (A, H) --> Z_mean (node embeddings)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(self.hidden1)

        # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(self.hidden1)

        # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal(
            [self.n_samples, self.hidden2_dim]) * tf.exp(self.z_log_std)

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.z)
예제 #16
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,  # input size
                                              output_dim=FLAGS.hidden1,  # output size
                                              adj=self.adj,  # Adjacency matrix
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)  # sparse input

        self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1,  # input size
                                           output_dim=FLAGS.hidden2,  # output size
                                           adj=self.adj,  # Adjacency matrix
                                           act=lambda x: x,  # no activation function
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)  # tensor input

        self.z_mean = self.embeddings

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(self.embeddings)
예제 #17
0
    def _build(self):
        self.hidden1, self.w1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=FLAGS.hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.embeddings, self.w2 = GraphConvolution(input_dim=FLAGS.hidden1,
                                                    output_dim=FLAGS.hidden2,
                                                    adj=self.adj,
                                                    act=lambda x: x,
                                                    dropout=self.dropout,
                                                    logging=self.logging)(
                                                        self.hidden1)
        #        self.embeddings1 = DeepConvolution(input_dim=FLAGS.hidden2,
        #                                           output_dim=FLAGS.hidden3,
        #
        #                                           act=lambda x: x,
        #                                           dropout=0.0001,
        #                                           logging=self.logging)(self.embeddings)
        #        self.embeddings2 = DeepConvolution(input_dim=FLAGS.hidden3,
        #                                           output_dim=FLAGS.hidden4,
        #
        #                                           act=lambda x: x,
        #                                           dropout=self.dropout,
        #                                           logging=self.logging)(self.embeddings1)

        self.z_mean = self.embeddings
        #        self.z_mean = self.embeddings1
        #        self.z_mean = self.embeddings2

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
예제 #18
0
def multihead_attention_gcn(queries,
                            keys,
                            num_units=None,
                            num_heads=8,
                            dropout_rate=0,
                            is_training=True,
                            causality=False,
                            scope="multihead_attention",
                            reuse=None):
    '''Applies multihead attention.
    
    Args:
      queries: A 3d tensor with shape of [N, T_q, C_q].
      keys: A 3d tensor with shape of [N, T_k, C_k].
      num_units: A scalar. Attention size.
      dropout_rate: A floating point number.
      is_training: Boolean. Controller of mechanism for dropout.
      causality: Boolean. If true, units that reference the future are masked. 
      num_heads: An int. Number of heads.
      scope: Optional scope for `variable_scope`.
      reuse: Boolean, whether to reuse the weights of a previous layer
        by the same name.
        
    Returns
      A 3d tensor with shape of (N, T_q, C)  
    '''
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        # Set the fall back option for num_units
        if num_units is None:
            num_units = queries.get_shape().as_list[-1]

        # Linear projections
        Q = tf.contrib.layers.fully_connected(queries,
                                              num_units)  # (N, T_q, C)
        K = tf.contrib.layers.fully_connected(queries,
                                              num_units)  # (N, T_k, C)
        V = tf.contrib.layers.fully_connected(keys, num_units)  # (N, T_k, C)

        Q1 = tf.reshape(Q, (Q.get_shape().as_list()[0],
                            Q.get_shape().as_list()[1], num_units))
        K1 = tf.reshape(K, (Q.get_shape().as_list()[0],
                            Q.get_shape().as_list()[1], num_units))
        V1 = tf.reshape(V, (Q.get_shape().as_list()[0],
                            Q.get_shape().as_list()[1], num_units))

        # Split and concat
        Q_ = tf.concat(tf.split(Q1, num_heads, axis=2),
                       axis=0)  # (h*N, T_q, C/h)
        K_ = tf.concat(tf.split(K1, num_heads, axis=2),
                       axis=0)  # (h*N, T_k, C/h)
        V_ = tf.concat(tf.split(V1, num_heads, axis=2),
                       axis=0)  # (h*N, T_k, C/h)

        # Multiplication
        outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))  # (h*N, T_q, T_k)

        # Scale
        outputs = outputs / (K_.get_shape().as_list()[-1]**0.5)

        # Key Masking
        key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1)))  # (N, T_k)
        key_masks = tf.tile(key_masks, [num_heads, 1])  # (h*N, T_k)
        key_masks = tf.tile(tf.expand_dims(key_masks, 1),
                            [1, tf.shape(queries)[1], 1])  # (h*N, T_q, T_k)
        #
        paddings = tf.ones_like(outputs) * (-2**32 + 1)
        outputs = tf.where(tf.equal(key_masks, 0), paddings,
                           outputs)  # (h*N, T_q, T_k)

        # Causality = Future blinding
        if causality:
            diag_vals = tf.ones_like(outputs[0, :, :])  # (T_q, T_k)
            tril = tf.linalg.LinearOperatorLowerTriangular(
                diag_vals).to_dense()  # (T_q, T_k)
            masks = tf.tile(tf.expand_dims(tril, 0),
                            [tf.shape(outputs)[0], 1, 1])  # (h*N, T_q, T_k)
            #
            paddings = tf.ones_like(masks) * (-2**32 + 1)
            outputs = tf.where(tf.equal(masks, 0), paddings,
                               outputs)  # (h*N, T_q, T_k)

        # Activation
        outputs = tf.nn.softmax(outputs)  # (h*N, T_q, T_k)
        # Query Masking
        query_masks = tf.sign(tf.abs(tf.reduce_sum(queries,
                                                   axis=-1)))  # (N, T_q)
        query_masks = tf.tile(query_masks, [num_heads, 1])  # (h*N, T_q)
        query_masks = tf.tile(tf.expand_dims(query_masks, -1),
                              [1, 1, tf.shape(keys)[1]])  # (h*N, T_q, T_k)
        outputs *= query_masks  # broadcasting. (N, T_q, C)
        matt = outputs
        # Dropouts
        outputs = tf.contrib.layers.dropout(
            outputs,
            keep_prob=dropout_rate,
            is_training=tf.convert_to_tensor(is_training))

        ki = ['1', '2', '3', '4', '5', '6', '7', '8']
        for k in range(0, num_heads):
            adj = matt[k, :, :]
            gcnin = V_[k, :, :]
            scopex1 = ki[k]
            scopex2 = ki[k] + 'x'

            print(scopex2)
            a_t = (adj)
            idg = tf.where(tf.not_equal(a_t, 0))
            sparse = tf.SparseTensor(idg, tf.gather_nd(a_t, idg),
                                     a_t.get_shape())

            gcnout1 = GraphConvolution(input_dim=num_units / num_heads,
                                       output_dim=num_units / num_heads,
                                       adj=sparse,
                                       act=tf.nn.relu,
                                       dropout=1 - dropout_rate,
                                       logging=False,
                                       scope=scopex1)(gcnin)

            gcnout2 = GraphConvolution(input_dim=num_units / num_heads,
                                       output_dim=num_units / num_heads,
                                       adj=sparse,
                                       act=tf.nn.relu,
                                       dropout=1 - dropout_rate,
                                       logging=False,
                                       scope=scopex2)(gcnout1)

            if k == 0:
                gcnout = tf.expand_dims(gcnout2, axis=0)

            else:
                temp_gcn = tf.expand_dims(gcnout2, axis=0)
                gcnout = tf.concat([gcnout, temp_gcn], axis=0)

        # Restore shape
        outputs = tf.concat(tf.split(gcnout, num_heads, axis=0),
                            axis=2)  # (N, T_q, C)

        # Residual connection
        outputs += queries

        # Normalize
        outputs = normalize(outputs)  # (N, T_q, C)

    return outputs, matt
예제 #19
0
    def _build(self):

        # Two reconstruction, one from G1 to G1, the other from G2 to G2. Meanwhile, a nonlinear relationship
        # between G1 and G2 embeddings. Using a one layer MLP to predict.

        # For G1, autoencoder
        self.hidden1_G1 = GraphConvolutionSparse(
            input_dim=self.input_dim1,
            output_dim=FLAGS.hidden1,
            adj=self.adj1,
            features_nonzero=self.features_nonzero1,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs1)

        self.gat_hid_G1 = SpGAT.inference(inputs=self.inputs1_gat,
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer1_G1 = tf.concat([self.hidden1_G1, self.gat_hid_G1], axis=-1)

        # self.layer1_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        self.hidden2_G1 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj1,
                                           act=tf.nn.relu,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer1_G1)

        self.gat_hid_G1 = SpGAT.inference(inputs=tf.expand_dims(self.layer1_G1,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer2_G1 = tf.concat([self.hidden2_G1, self.gat_hid_G1], axis=-1)

        # self.layer2_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        self.hidden3_G1 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj1,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer2_G1)

        self.gat_hid_G1 = SpGAT.inference(inputs=tf.expand_dims(self.layer2_G1,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.embeddings_G1 = tf.concat([self.hidden3_G1, self.gat_hid_G1],
                                       axis=-1)

        # self.embeddings_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        ### GAT layers
        # GAT_input1 = tf.sparse_reshape(self.inputs1,shape=[1, self.nodes1, self.input_dim1])
        # GAT_input1 = dense_to_sparse(GAT_input1)

        self.cancat_G1 = self.embeddings_G1

        # self.cancat_G1 = tf.concat([self.embeddings_G1,self.gat_hid_G1],axis=-1)
        self.cancat_G1 = tf.layers.dense(tf.nn.relu(self.cancat_G1),
                                         FLAGS.hidden2,
                                         activation=lambda x: x)

        # self.z_mean = self.embeddings_G1

        self.reconstructions1 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                    act=lambda x: x,
                                                    logging=self.logging)(
                                                        self.cancat_G1)

        ##############################################################################

        # For G2, autoencoder, the same network structure as G1.
        self.hidden1_G2 = GraphConvolutionSparse(
            input_dim=self.input_dim2,
            output_dim=FLAGS.hidden1,
            adj=self.adj2,
            features_nonzero=self.features_nonzero2,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs2)

        self.gat_hid_G2 = SpGAT.inference(inputs=self.inputs2_gat,
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)
        #
        self.layer1_G2 = tf.concat([self.hidden1_G2, self.gat_hid_G2], axis=-1)
        # self.layer1_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.hidden2_G2 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj2,
                                           act=tf.nn.relu,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer1_G2)

        self.gat_hid_G2 = SpGAT.inference(inputs=tf.expand_dims(self.layer1_G2,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer2_G2 = tf.concat([self.hidden2_G2, self.gat_hid_G2], axis=-1)
        # self.layer2_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.hidden3_G2 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj2,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer2_G2)

        self.gat_hid_G2 = SpGAT.inference(inputs=tf.expand_dims(self.layer2_G2,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.embeddings_G2 = tf.concat([self.hidden3_G2, self.gat_hid_G2],
                                       axis=-1)
        # self.embeddings_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.cancat_G2 = self.embeddings_G2

        # self.cancat_G2 = tf.concat([self.embeddings_G2, self.gat_hid_G2], axis=-1)
        self.cancat_G2 = tf.layers.dense(tf.nn.relu(self.cancat_G2),
                                         FLAGS.hidden2,
                                         activation=lambda x: x)
        # self.z_mean = self.embeddings_G2

        self.reconstructions2 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                    act=lambda x: x,
                                                    logging=self.logging)(
                                                        self.cancat_G2)

        #### non-linear mapping from G1 embeddings to G2 embeddings

        self.dense1 = tf.layers.dense(self.embeddings_G1,
                                      FLAGS.hidden2,
                                      activation=tf.nn.relu)
        self.latentmap = tf.layers.dense(self.dense1, FLAGS.hidden2)

        # classification layers
        # self.match_embedding_G1 = tf.gather(self.cancat_G1, self.GID1)
        self.match_embedding_G1 = tf.gather(self.latentmap, self.GID1)
        self.match_embedding_G2 = tf.gather(self.cancat_G2, self.GID2)

        # self.match_embedding_G1 = tf.gather(tf.sparse_tensor_to_dense(self.inputs1), self.GID1)
        # self.match_embedding_G2 = tf.gather(tf.sparse_tensor_to_dense(self.inputs2), self.GID2)

        self.match_embeddings = tf.concat(
            [self.match_embedding_G1, self.match_embedding_G2], axis=1)

        # self.match_embeddings = tf.concat([tf.sparse_tensor_to_dense(self.inputs1), tf.sparse_tensor_to_dense(self.inputs2)], axis=1)
        # self.match_embeddings = tf.reshape(self.match_embeddings, [-1, FLAGS.hidden2])

        self.match_embeddings = tf.reshape(self.match_embeddings,
                                           [-1, FLAGS.hidden2 * 2])

        self.fcn1 = tf.layers.dense(self.match_embeddings,
                                    128,
                                    activation=tf.nn.relu)
        # self.fcn2 = tf.layers.dense(self.fcn1, 32, activation=tf.nn.relu)
        self.out = tf.layers.dense(self.fcn1, 2)