示例#1
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.num_hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.z_mean = GraphConvolution(input_dim=self.num_hidden1,
                                       output_dim=self.num_hidden2,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(self.num_hidden1)

        self.z_log_std = GraphConvolution(input_dim=self.num_hidden1,
                                          output_dim=self.num_hidden2,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(self.hidden1)

        self.z = self.z_mean + tf.random_normal(
            [self.n_samples, self.num_hidden2]) * tf.exp(self.z_log_std)

        self.reconstructions = InnerProductDecoder(input_dim=self.num_hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.z)
示例#2
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.num_hidden1,
            #--my code---------------
            #output_dim=hidden_1,
            #--my code---------------
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.embeddings = GraphConvolution(input_dim=self.num_hidden1,
                                           output_dim=self.num_hidden2,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        self.z_mean = self.embeddings

        self.reconstructions = InnerProductDecoder(input_dim=self.num_hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
示例#3
0
文件: model.py 项目: sreycodes/YLR
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)

        # Second GCN Layer: (A, H) --> Z (mode embeddings)
        self.embeddings = GraphConvolution(input_dim=self.hidden1_dim,
                                           output_dim=self.hidden2_dim,
                                           adj=self.adj,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)

        # Z_mean for AE. No noise added (because not a VAE)
        self.z_mean = self.embeddings

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      logging=self.logging)(self.embeddings)
示例#4
0
文件: model.py 项目: sreycodes/YLR
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        fl = GraphConvolutionSparse(input_dim=self.input_dim,
                                              output_dim=self.hidden1_dim,
                                              adj=self.adj,
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,                
                                              dtype=self.dtype,
                                              logging=self.logging)
        # self.ir = fl.ir
        self.initial = fl.initial
        self.fw = fl.vars['weights']
        self.hidden1 = fl(self.inputs)
        self.dx = fl.dx
        self.wdx = fl.wdx
        self.awdx =fl.awdx

        # Second GCN Layer: (A, H) --> Z_mean (node embeddings)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       dtype=self.dtype,
                                       logging=self.logging)(self.hidden1)

        # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=tf.nn.sigmoid,
                                          dropout=self.dropout,
                                          dtype=self.dtype,
                                          logging=self.logging)(self.hidden1)

        # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden2_dim], dtype=self.dtype) * tf.exp(self.z_log_std)

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                      act=lambda x: x,
                                      flatten=self.flatten_output,
                                      logging=self.logging)(self.z)
示例#5
0
    def _build(self):
        self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,  # input size
                                              output_dim=FLAGS.hidden1,  # output size
                                              adj=self.adj,  # Adjacency matrix
                                              features_nonzero=self.features_nonzero,
                                              act=tf.nn.relu,
                                              dropout=self.dropout,
                                              logging=self.logging)(self.inputs)  # sparse input

        self.embeddings = GraphConvolution(input_dim=FLAGS.hidden1,  # input size
                                           output_dim=FLAGS.hidden2,  # output size
                                           adj=self.adj,  # Adjacency matrix
                                           act=lambda x: x,  # no activation function
                                           dropout=self.dropout,
                                           logging=self.logging)(self.hidden1)  # tensor input

        self.z_mean = self.embeddings

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(self.embeddings)
示例#6
0
    def _build(self):
        self.hidden1, self.w1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=FLAGS.hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        self.embeddings, self.w2 = GraphConvolution(input_dim=FLAGS.hidden1,
                                                    output_dim=FLAGS.hidden2,
                                                    adj=self.adj,
                                                    act=lambda x: x,
                                                    dropout=self.dropout,
                                                    logging=self.logging)(
                                                        self.hidden1)
        #        self.embeddings1 = DeepConvolution(input_dim=FLAGS.hidden2,
        #                                           output_dim=FLAGS.hidden3,
        #
        #                                           act=lambda x: x,
        #                                           dropout=0.0001,
        #                                           logging=self.logging)(self.embeddings)
        #        self.embeddings2 = DeepConvolution(input_dim=FLAGS.hidden3,
        #                                           output_dim=FLAGS.hidden4,
        #
        #                                           act=lambda x: x,
        #                                           dropout=self.dropout,
        #                                           logging=self.logging)(self.embeddings1)

        self.z_mean = self.embeddings
        #        self.z_mean = self.embeddings1
        #        self.z_mean = self.embeddings2

        self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.embeddings)
示例#7
0
    def _build(self):
        # First GCN Layer: (A, X) --> H (hidden layer features)
        self.hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=self.hidden1_dim,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs)

        # Second GCN Layer: (A, H) --> Z_mean (node embeddings)
        self.z_mean = GraphConvolution(input_dim=self.hidden1_dim,
                                       output_dim=self.hidden2_dim,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(self.hidden1)

        # Also second GCN Layer: (A, H) --> Z_log_stddev (for VAE noise)
        self.z_log_std = GraphConvolution(input_dim=self.hidden1_dim,
                                          output_dim=self.hidden2_dim,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(self.hidden1)

        # Sampling operation: z = z_mean + (random_noise_factor) * z_stddev
        self.z = self.z_mean + tf.random_normal(
            [self.n_samples, self.hidden2_dim]) * tf.exp(self.z_log_std)

        # Inner-Product Decoder: Z (embeddings) --> A (reconstructed adj.)
        self.reconstructions = InnerProductDecoder(input_dim=self.hidden2_dim,
                                                   act=lambda x: x,
                                                   logging=self.logging)(
                                                       self.z)
示例#8
0
文件: model.py 项目: afcarl/graphite
    def encoder(self, inputs):

        hidden1 = GraphConvolutionSparse(
            input_dim=self.input_dim,
            output_dim=FLAGS.hidden1,
            adj=self.adj,
            features_nonzero=self.features_nonzero,
            act=tf.nn.relu,
            dropout=0.,
            logging=self.logging)(inputs)

        self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1,
                                       output_dim=FLAGS.hidden2,
                                       adj=self.adj,
                                       act=lambda x: x,
                                       dropout=self.dropout,
                                       logging=self.logging)(hidden1)

        self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1,
                                          output_dim=FLAGS.hidden2,
                                          adj=self.adj,
                                          act=lambda x: x,
                                          dropout=self.dropout,
                                          logging=self.logging)(hidden1)
示例#9
0
    def _build(self):

        # Two reconstruction, one from G1 to G1, the other from G2 to G2. Meanwhile, a nonlinear relationship
        # between G1 and G2 embeddings. Using a one layer MLP to predict.

        # For G1, autoencoder
        self.hidden1_G1 = GraphConvolutionSparse(
            input_dim=self.input_dim1,
            output_dim=FLAGS.hidden1,
            adj=self.adj1,
            features_nonzero=self.features_nonzero1,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs1)

        self.gat_hid_G1 = SpGAT.inference(inputs=self.inputs1_gat,
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer1_G1 = tf.concat([self.hidden1_G1, self.gat_hid_G1], axis=-1)

        # self.layer1_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        self.hidden2_G1 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj1,
                                           act=tf.nn.relu,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer1_G1)

        self.gat_hid_G1 = SpGAT.inference(inputs=tf.expand_dims(self.layer1_G1,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer2_G1 = tf.concat([self.hidden2_G1, self.gat_hid_G1], axis=-1)

        # self.layer2_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        self.hidden3_G1 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj1,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer2_G1)

        self.gat_hid_G1 = SpGAT.inference(inputs=tf.expand_dims(self.layer2_G1,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes1,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias1,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.embeddings_G1 = tf.concat([self.hidden3_G1, self.gat_hid_G1],
                                       axis=-1)

        # self.embeddings_G1 = tf.concat([self.gat_hid_G1, self.gat_hid_G1], axis=-1)

        ### GAT layers
        # GAT_input1 = tf.sparse_reshape(self.inputs1,shape=[1, self.nodes1, self.input_dim1])
        # GAT_input1 = dense_to_sparse(GAT_input1)

        self.cancat_G1 = self.embeddings_G1

        # self.cancat_G1 = tf.concat([self.embeddings_G1,self.gat_hid_G1],axis=-1)
        self.cancat_G1 = tf.layers.dense(tf.nn.relu(self.cancat_G1),
                                         FLAGS.hidden2,
                                         activation=lambda x: x)

        # self.z_mean = self.embeddings_G1

        self.reconstructions1 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                    act=lambda x: x,
                                                    logging=self.logging)(
                                                        self.cancat_G1)

        ##############################################################################

        # For G2, autoencoder, the same network structure as G1.
        self.hidden1_G2 = GraphConvolutionSparse(
            input_dim=self.input_dim2,
            output_dim=FLAGS.hidden1,
            adj=self.adj2,
            features_nonzero=self.features_nonzero2,
            act=tf.nn.relu,
            dropout=self.dropout,
            logging=self.logging)(self.inputs2)

        self.gat_hid_G2 = SpGAT.inference(inputs=self.inputs2_gat,
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)
        #
        self.layer1_G2 = tf.concat([self.hidden1_G2, self.gat_hid_G2], axis=-1)
        # self.layer1_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.hidden2_G2 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj2,
                                           act=tf.nn.relu,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer1_G2)

        self.gat_hid_G2 = SpGAT.inference(inputs=tf.expand_dims(self.layer1_G2,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.layer2_G2 = tf.concat([self.hidden2_G2, self.gat_hid_G2], axis=-1)
        # self.layer2_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.hidden3_G2 = GraphConvolution(input_dim=FLAGS.hidden1 * 2,
                                           output_dim=FLAGS.hidden2,
                                           adj=self.adj2,
                                           act=lambda x: x,
                                           dropout=self.dropout,
                                           logging=self.logging)(
                                               self.layer2_G2)

        self.gat_hid_G2 = SpGAT.inference(inputs=tf.expand_dims(self.layer2_G2,
                                                                axis=0),
                                          nb_classes=FLAGS.hidden2,
                                          nb_nodes=self.nodes2,
                                          training=True,
                                          attn_drop=self.dropout,
                                          ffd_drop=self.dropout,
                                          bias_mat=self.bias2,
                                          hid_units=[FLAGS.hidden1],
                                          n_heads=self.nhead,
                                          activation=tf.nn.relu,
                                          residual=True)

        self.embeddings_G2 = tf.concat([self.hidden3_G2, self.gat_hid_G2],
                                       axis=-1)
        # self.embeddings_G2 = tf.concat([self.gat_hid_G2, self.gat_hid_G2], axis=-1)

        self.cancat_G2 = self.embeddings_G2

        # self.cancat_G2 = tf.concat([self.embeddings_G2, self.gat_hid_G2], axis=-1)
        self.cancat_G2 = tf.layers.dense(tf.nn.relu(self.cancat_G2),
                                         FLAGS.hidden2,
                                         activation=lambda x: x)
        # self.z_mean = self.embeddings_G2

        self.reconstructions2 = InnerProductDecoder(input_dim=FLAGS.hidden2,
                                                    act=lambda x: x,
                                                    logging=self.logging)(
                                                        self.cancat_G2)

        #### non-linear mapping from G1 embeddings to G2 embeddings

        self.dense1 = tf.layers.dense(self.embeddings_G1,
                                      FLAGS.hidden2,
                                      activation=tf.nn.relu)
        self.latentmap = tf.layers.dense(self.dense1, FLAGS.hidden2)

        # classification layers
        # self.match_embedding_G1 = tf.gather(self.cancat_G1, self.GID1)
        self.match_embedding_G1 = tf.gather(self.latentmap, self.GID1)
        self.match_embedding_G2 = tf.gather(self.cancat_G2, self.GID2)

        # self.match_embedding_G1 = tf.gather(tf.sparse_tensor_to_dense(self.inputs1), self.GID1)
        # self.match_embedding_G2 = tf.gather(tf.sparse_tensor_to_dense(self.inputs2), self.GID2)

        self.match_embeddings = tf.concat(
            [self.match_embedding_G1, self.match_embedding_G2], axis=1)

        # self.match_embeddings = tf.concat([tf.sparse_tensor_to_dense(self.inputs1), tf.sparse_tensor_to_dense(self.inputs2)], axis=1)
        # self.match_embeddings = tf.reshape(self.match_embeddings, [-1, FLAGS.hidden2])

        self.match_embeddings = tf.reshape(self.match_embeddings,
                                           [-1, FLAGS.hidden2 * 2])

        self.fcn1 = tf.layers.dense(self.match_embeddings,
                                    128,
                                    activation=tf.nn.relu)
        # self.fcn2 = tf.layers.dense(self.fcn1, 32, activation=tf.nn.relu)
        self.out = tf.layers.dense(self.fcn1, 2)