Exemple #1
0
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)

        # Second GCN Layer: (A, H) --> Z (mode embeddings)
        self.embeddings = GraphConvolution(input_dim=self.hidden1_dim,
                                           output_dim=self.hidden2_dim,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        # Z_mean for AE. No noise added (because not a VAE)
        self.z_mean = self.embeddings

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      logging=self.logging)(self.embeddings)
Exemple #2
0
    def _build(self):

        self.GCN1 = GraphConvolution(input_dim=self.author_feature_inputs,
                                     output_dim=FLAGS.hidden1,
                                     adj=self.AuthorAdj,
                                     dropout=self.dropout,
                                     logging=self.logging)(self.inputs)

        # N * hidden2 (60)
        self.AuthorEnbedding = GraphConvolution(input_dim=FLAGS.hidden1,
                                                output_dim=FLAGS.hidden2,
                                                adj=self.AuthorAdj,
                                                dropout=self.dropout,
                                                act=lambda x: x,
                                                logging=self.logging)(
                                                    self.GCN1)

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.AuthorEnbedding)

        # self.document_feature_inputs = tf.matmul(D2A, )

        pass
Exemple #3
0
    def __init__(self,
                 input_feat_dim,
                 hidden_dim1,
                 hidden_dim2,
                 dropout,
                 pretrained_weights=None,
                 hidden_dims_predictor=[256],
                 drop_out_predictor=0.3,
                 output_dim=1,
                 freezed=False):
        super(GCNPredictor, self).__init__()
        self.gc1 = GraphConvolution(input_feat_dim,
                                    hidden_dim1,
                                    dropout,
                                    act=F.relu)
        self.gc2 = GraphConvolution(hidden_dim1,
                                    hidden_dim2,
                                    dropout,
                                    act=F.relu)
        self.dc = InnerProductDecoder(dropout, act=lambda x: x)

        self.predictor = Predictor(input_dim=hidden_dim2,
                                   output_dim=output_dim,
                                   h_dims=hidden_dims_predictor,
                                   drop_out=drop_out_predictor)
Exemple #4
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.num_hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.z_mean = GraphConvolution(input_dim=self.num_hidden1,
                                       output_dim=self.num_hidden2,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(self.num_hidden1)

        self.z_log_std = GraphConvolution(input_dim=self.num_hidden1,
                                          output_dim=self.num_hidden2,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(self.hidden1)

        self.z = self.z_mean + tf.random_normal(
            [self.n_samples, self.num_hidden2]) * tf.exp(self.z_log_std)

        self.reconstructions = InnerProductDecoder(input_dim=self.num_hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.z)
Exemple #5
0
    def __init__(self, input_dim, latent_dim=128, h_dims=[512], drop_out=0.3):

        super(GAEBase, self).__init__()

        self.latent_dim = latent_dim

        modules = []
        hidden_dims = deepcopy(h_dims)

        hidden_dims.insert(0, input_dim)

        # Build Encoder
        for i in range(1, len(hidden_dims)):
            i_dim = hidden_dims[i - 1]
            o_dim = hidden_dims[i]

            modules.append(
                nn.Sequential(
                    GraphConvolution(i_dim, o_dim, drop_out, act=lambda x: x),
                    #nn.BatchNorm1d(o_dim),
                    #nn.ReLU()
                    #nn.Dropout(drop_out)
                ))
            #in_channels = h_dim

        self.encoder = nn.Sequential(*modules)
        self.bottleneck = GraphConvolution(hidden_dims[-1],
                                           latent_dim,
                                           drop_out,
                                           act=lambda x: x)

        # Build Decoder
        self.decoder = InnerProductDecoder(drop_out, act=lambda x: x)
    def _build(self):
        # 第一层 GCN 卷积层: (A, X) --> H (隐藏层特征表达)
        self.hidden1 = GraphConvolution(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              # features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)

        #  第二层 GCN 卷积层: (A, H) --> Z (模型嵌入)
        self.embeddings = GraphConvolution(input_dim=self.hidden1_dim,
                                           output_dim=self.hidden2_dim,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        # Z_mean用于AE,没有添加噪音(因为不是VAE)
        self.z_mean = self.embeddings

        # 内积解码器: Z (嵌入) --> A (重建邻接矩阵)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      logging=self.logging)(self.embeddings)
Exemple #7
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.num_hidden1,
            #--my code---------------
            #output_dim=hidden_1,
            #--my code---------------
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.embeddings = GraphConvolution(input_dim=self.num_hidden1,
                                           output_dim=self.num_hidden2,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        self.z_mean = self.embeddings

        self.reconstructions = InnerProductDecoder(input_dim=self.num_hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
Exemple #8
0
    def decoder(self, z):

        reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                              act=lambda x: x,
                                              dropout=0.,
                                              logging=self.logging)(z)

        reconstructions = tf.reshape(reconstructions, [-1])
        return reconstructions
Exemple #9
0
 def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
     super(GCNModelVAE, self).__init__()
     self.gc1 = GraphConvolution(input_feat_dim,
                                 hidden_dim1,
                                 dropout,
                                 act=F.relu)
     self.gc2 = GraphConvolution(hidden_dim1,
                                 hidden_dim2,
                                 dropout,
                                 act=F.relu)
     self.gc3 = GraphConvolution(hidden_dim1,
                                 hidden_dim2,
                                 dropout,
                                 act=F.relu)
     self.dc = InnerProductDecoder(dropout, act=lambda x: x)
Exemple #10
0
    def make_decoder(self):

        self.l0 = Dense(input_dim=self.input_dim,
                        output_dim=FLAGS.hidden3,
                        act=tf.nn.elu,
                        dropout=0.,
                        bias=True,
                        logging=self.logging)

        self.l1 = Dense(input_dim=FLAGS.hidden2,
                        output_dim=FLAGS.hidden3,
                        act=tf.nn.elu,
                        dropout=0.,
                        bias=True,
                        logging=self.logging)

        self.l2 = Dense(input_dim=FLAGS.hidden3,
                        output_dim=FLAGS.hidden2,
                        act=lambda x: x,
                        dropout=self.dropout,
                        bias=True,
                        logging=self.logging)

        self.l3 = Dense(input_dim=2 * FLAGS.hidden2,
                        output_dim=FLAGS.hidden3,
                        act=tf.nn.elu,
                        dropout=self.dropout,
                        bias=True,
                        logging=self.logging)

        self.l3p5 = Dense(input_dim=FLAGS.hidden3,
                          output_dim=FLAGS.hidden3,
                          act=tf.nn.elu,
                          dropout=self.dropout,
                          bias=True,
                          logging=self.logging)

        self.l4 = Dense(input_dim=FLAGS.hidden3,
                        output_dim=1,
                        act=lambda x: x,
                        dropout=self.dropout,
                        bias=True,
                        logging=self.logging)

        self.l5 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                      act=lambda x: x,
                                      logging=self.logging)
Exemple #11
0
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        fl = GraphConvolutionSparse(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,                
                                              dtype=self.dtype,
                                              logging=self.logging)
        # self.ir = fl.ir
        self.initial = fl.initial
        self.fw = fl.vars['weights']
        self.hidden1 = fl(self.inputs)
        self.dx = fl.dx
        self.wdx = fl.wdx
        self.awdx =fl.awdx

        # Second GCN Layer: (A, H) --> Z_mean (node embeddings)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       dtype=self.dtype,
                                       logging=self.logging)(self.hidden1)

        # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=tf.nn.sigmoid,
                                          dropout=self.dropout,
                                          dtype=self.dtype,
                                          logging=self.logging)(self.hidden1)

        # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=self.dtype) * tf.exp(self.z_log_std)

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      flatten=self.flatten_output,
                                      logging=self.logging)(self.z)
Exemple #12
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,  # input size
                                              output_dim=FLAGS.hidden1,  # output size
                                              adj=self.adj,  # Adjacency matrix
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)  # sparse input

        self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1,  # input size
                                           output_dim=FLAGS.hidden2,  # output size
                                           adj=self.adj,  # Adjacency matrix
                                           act=lambda x: x,  # no activation function
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)  # tensor input

        self.z_mean = self.embeddings

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(self.embeddings)
Exemple #13
0
    def _build(self):
        self.hidden1, self.w1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=FLAGS.hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.embeddings, self.w2 = GraphConvolution(input_dim=FLAGS.hidden1,
                                                    output_dim=FLAGS.hidden2,
                                                    adj=self.adj,
                                                    act=lambda x: x,
                                                    dropout=self.dropout,
                                                    logging=self.logging)(
                                                        self.hidden1)
        #        self.embeddings1 = DeepConvolution(input_dim=FLAGS.hidden2,
        #                                           output_dim=FLAGS.hidden3,
        #
        #                                           act=lambda x: x,
        #                                           dropout=0.0001,
        #                                           logging=self.logging)(self.embeddings)
        #        self.embeddings2 = DeepConvolution(input_dim=FLAGS.hidden3,
        #                                           output_dim=FLAGS.hidden4,
        #
        #                                           act=lambda x: x,
        #                                           dropout=self.dropout,
        #                                           logging=self.logging)(self.embeddings1)

        self.z_mean = self.embeddings
        #        self.z_mean = self.embeddings1
        #        self.z_mean = self.embeddings2

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
    def _build(self):
        # 第一层 GCN 卷积层: (A, X) --> H (隐藏层特征表达)
        self.hidden1 = GraphConvolution(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              # features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              dtype=self.dtype,
                                              logging=self.logging)(self.inputs)

        #  第二层 GCN 卷积层: (A, H) --> Z (节点嵌入)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       dtype=self.dtype,
                                       logging=self.logging)(self.hidden1)

        # 还是第二层 GCN 卷积层: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          dtype=self.dtype,
                                          logging=self.logging)(self.hidden1)

        # 采样操作: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=self.dtype) * tf.exp(self.z_log_std)

        # 内积解码器: Z (嵌入) --> A (重建邻接矩阵)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      flatten=self.flatten_output,
                                      logging=self.logging)(self.z)
Exemple #15
0
    def make_decoder(self):
        self.l0 = GraphiteSparse(input_dim=self.input_dim,
                                 output_dim=FLAGS.hidden3,
                                 act=tf.nn.relu,
                                 dropout=0.,
                                 logging=self.logging)

        self.l1 = Graphite(input_dim=FLAGS.hidden2,
                           output_dim=FLAGS.hidden3,
                           act=tf.nn.relu,
                           dropout=0.,
                           logging=self.logging)

        self.l2 = Graphite(input_dim=FLAGS.hidden3,
                           output_dim=FLAGS.hidden2,
                           act=lambda x: x,
                           dropout=self.dropout,
                           logging=self.logging)

        self.l3 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                      act=lambda x: x,
                                      logging=self.logging)

        self.l4 = Scale(input_dim=FLAGS.hidden2, logging=self.logging)
Exemple #16
0
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.hidden1_dim,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        # Second GCN Layer: (A, H) --> Z_mean (node embeddings)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(self.hidden1)

        # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(self.hidden1)

        # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal(
            [self.n_samples, self.hidden2_dim]) * tf.exp(self.z_log_std)

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.z)
Exemple #17
0
    def _build(self):

        # Two reconstruction, one from G1 to G1, the other from G2 to G2. Meanwhile, a nonlinear relationship
        # between G1 and G2 embeddings. Using a one layer MLP to predict.

        # For G1, autoencoder
        self.hidden1_G1 = GraphConvolutionSparse(
            input_dim=self.input_dim1,
            output_dim=FLAGS.hidden1,
            adj=self.adj1,
            features_nonzero=self.features_nonzero1,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs1)

        self.gat_hid_G1 = SpGAT.inference(inputs=self.inputs1_gat,
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer1_G1 = tf.concat([self.hidden1_G1, self.gat_hid_G1], axis=-1)

        # self.layer1_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        self.hidden2_G1 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj1,
                                           act=tf.nn.relu,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer1_G1)

        self.gat_hid_G1 = SpGAT.inference(inputs=tf.expand_dims(self.layer1_G1,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer2_G1 = tf.concat([self.hidden2_G1, self.gat_hid_G1], axis=-1)

        # self.layer2_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        self.hidden3_G1 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj1,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer2_G1)

        self.gat_hid_G1 = SpGAT.inference(inputs=tf.expand_dims(self.layer2_G1,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.embeddings_G1 = tf.concat([self.hidden3_G1, self.gat_hid_G1],
                                       axis=-1)

        # self.embeddings_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        ### GAT layers
        # GAT_input1 = tf.sparse_reshape(self.inputs1,shape=[1, self.nodes1, self.input_dim1])
        # GAT_input1 = dense_to_sparse(GAT_input1)

        self.cancat_G1 = self.embeddings_G1

        # self.cancat_G1 = tf.concat([self.embeddings_G1,self.gat_hid_G1],axis=-1)
        self.cancat_G1 = tf.layers.dense(tf.nn.relu(self.cancat_G1),
                                         FLAGS.hidden2,
                                         activation=lambda x: x)

        # self.z_mean = self.embeddings_G1

        self.reconstructions1 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                    act=lambda x: x,
                                                    logging=self.logging)(
                                                        self.cancat_G1)

        ##############################################################################

        # For G2, autoencoder, the same network structure as G1.
        self.hidden1_G2 = GraphConvolutionSparse(
            input_dim=self.input_dim2,
            output_dim=FLAGS.hidden1,
            adj=self.adj2,
            features_nonzero=self.features_nonzero2,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs2)

        self.gat_hid_G2 = SpGAT.inference(inputs=self.inputs2_gat,
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)
        #
        self.layer1_G2 = tf.concat([self.hidden1_G2, self.gat_hid_G2], axis=-1)
        # self.layer1_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.hidden2_G2 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj2,
                                           act=tf.nn.relu,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer1_G2)

        self.gat_hid_G2 = SpGAT.inference(inputs=tf.expand_dims(self.layer1_G2,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer2_G2 = tf.concat([self.hidden2_G2, self.gat_hid_G2], axis=-1)
        # self.layer2_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.hidden3_G2 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj2,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer2_G2)

        self.gat_hid_G2 = SpGAT.inference(inputs=tf.expand_dims(self.layer2_G2,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.embeddings_G2 = tf.concat([self.hidden3_G2, self.gat_hid_G2],
                                       axis=-1)
        # self.embeddings_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.cancat_G2 = self.embeddings_G2

        # self.cancat_G2 = tf.concat([self.embeddings_G2, self.gat_hid_G2], axis=-1)
        self.cancat_G2 = tf.layers.dense(tf.nn.relu(self.cancat_G2),
                                         FLAGS.hidden2,
                                         activation=lambda x: x)
        # self.z_mean = self.embeddings_G2

        self.reconstructions2 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                    act=lambda x: x,
                                                    logging=self.logging)(
                                                        self.cancat_G2)

        #### non-linear mapping from G1 embeddings to G2 embeddings

        self.dense1 = tf.layers.dense(self.embeddings_G1,
                                      FLAGS.hidden2,
                                      activation=tf.nn.relu)
        self.latentmap = tf.layers.dense(self.dense1, FLAGS.hidden2)

        # classification layers
        # self.match_embedding_G1 = tf.gather(self.cancat_G1, self.GID1)
        self.match_embedding_G1 = tf.gather(self.latentmap, self.GID1)
        self.match_embedding_G2 = tf.gather(self.cancat_G2, self.GID2)

        # self.match_embedding_G1 = tf.gather(tf.sparse_tensor_to_dense(self.inputs1), self.GID1)
        # self.match_embedding_G2 = tf.gather(tf.sparse_tensor_to_dense(self.inputs2), self.GID2)

        self.match_embeddings = tf.concat(
            [self.match_embedding_G1, self.match_embedding_G2], axis=1)

        # self.match_embeddings = tf.concat([tf.sparse_tensor_to_dense(self.inputs1), tf.sparse_tensor_to_dense(self.inputs2)], axis=1)
        # self.match_embeddings = tf.reshape(self.match_embeddings, [-1, FLAGS.hidden2])

        self.match_embeddings = tf.reshape(self.match_embeddings,
                                           [-1, FLAGS.hidden2 * 2])

        self.fcn1 = tf.layers.dense(self.match_embeddings,
                                    128,
                                    activation=tf.nn.relu)
        # self.fcn2 = tf.layers.dense(self.fcn1, 32, activation=tf.nn.relu)
        self.out = tf.layers.dense(self.fcn1, 2)