コード例 #1
0
ファイル: sbvat.py プロジェクト: voladorlu/graphgallery
    def build(self, hidden_layers=[16], activations=['relu'], dropout=0.5, 
              learning_rate=0.01, l2_norm=5e-4, p1=1., p2=1., 
              n_power_iterations=1, epsilon=0.03, xi=1e-6):
        
        with self.device:
            
            x = Input(batch_shape=[self.n_nodes, self.n_features], dtype=tf.float32, name='features')
            adj = Input(batch_shape=[self.n_nodes, self.n_nodes], dtype=tf.float32, sparse=True, name='adj_matrix')
            index = Input(batch_shape=[None],  dtype=tf.int32, name='index')

            self.GCN_layers = [GraphConvolution(hidden_layers[0], 
                                                activation=activations[0], 
                                                kernel_regularizer=regularizers.l2(l2_norm)),
                               GraphConvolution(self.n_classes)]
            self.dropout_layer = Dropout(dropout)
            
            logit = self.propagation(x, adj)
            output = tf.gather(logit, index)
            output = Softmax()(output)
            model = Model(inputs=[x, adj, index], outputs=output)
    
            self.model = model
            self.train_metric = SparseCategoricalAccuracy()
            self.test_metric = SparseCategoricalAccuracy()
            self.optimizer = Adam(lr=learning_rate)
            self.built = True
            
        self.p1 = p1 # Alpha
        self.p2 = p2 # Beta
        self.xi = xi # Small constant for finite difference
        self.epsilon = epsilon # Norm length for (virtual) adversarial training
        self.n_power_iterations = n_power_iterations #  Number of power iterations
コード例 #2
0
    def build(self, hidden_layers=[16], activations=['relu'], dropout=0.5, learning_rate=0.01, l2_norm=5e-4, p1=1.4, p2=0.7, epsilon=0.01):

        with self.device:

            x = Input(batch_shape=[self.n_nodes, self.n_features], dtype=tf.float32, name='features')
            adj = Input(batch_shape=[self.n_nodes, self.n_nodes], dtype=tf.float32, sparse=True, name='adj_matrix')
            index = Input(batch_shape=[None],  dtype=tf.int32, name='index')

            self.GCN_layers = [GraphConvolution(hidden_layers[0], activation=activations[0], 
                                                kernel_regularizer=regularizers.l2(l2_norm)),
                               GraphConvolution(self.n_classes)]
            self.dropout_layer = Dropout(rate=dropout)
            logit = self.propagation(x, adj)
            logit = tf.ensure_shape(logit, (self.n_nodes, self.n_classes))
            output = tf.gather(logit, index)
            output = Softmax()(output)
            model = Model(inputs=[x, adj, index], outputs=output)
            model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])

            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit, epsilon)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
            self.adv_optimizer = Adam(lr=learning_rate/10)
            self.built = True
コード例 #3
0
ファイル: gcn.py プロジェクト: voladorlu/graphgallery
    def build(self, hidden_layers=[16], activations=['relu'], dropout=0.5,
              learning_rate=0.01, l2_norm=5e-4, use_bias=False):

        with self.device:

            x = Input(batch_shape=[self.n_nodes, self.n_features], dtype=tf.float32, name='features')
            adj = Input(batch_shape=[self.n_nodes, self.n_nodes], dtype=tf.float32, sparse=True, name='adj_matrix')
            index = Input(batch_shape=[None],  dtype=tf.int32, name='index')

            h = x
            for hid, activation in zip(hidden_layers, activations):
                h = GraphConvolution(hid, use_bias=use_bias,
                                     activation=activation,
                                     kernel_regularizer=regularizers.l2(l2_norm))([h, adj])

                h = Dropout(rate=dropout)(h)

            h = GraphConvolution(self.n_classes, use_bias=use_bias)([h, adj])
            h = tf.ensure_shape(h, [self.n_nodes, self.n_classes])
            h = tf.gather(h, index)
            output = Softmax()(h)

            model = Model(inputs=[x, adj, index], outputs=output)
            model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])

            self.model = model
            self.built = True
コード例 #4
0
    def build(self,
              hidden_layers=[32],
              activations=['relu'],
              dropout=0.5,
              learning_rate=0.01,
              l2_norm=1e-5):

        with self.device:

            x = Input(batch_shape=[None, self.n_features],
                      dtype=tf.float32,
                      name='features')
            adj = Input(batch_shape=[None, None],
                        dtype=tf.float32,
                        sparse=True,
                        name='adj_matrix')
            mask = Input(batch_shape=[None], dtype=tf.bool, name='mask')

            h = Dropout(rate=dropout)(x)

            for hid, activation in zip(hidden_layers, activations):
                h = GraphConvolution(
                    hid,
                    activation=activation,
                    kernel_regularizer=regularizers.l2(l2_norm))([h, adj])
                h = Dropout(rate=dropout)(h)

            h = GraphConvolution(self.n_classes)([h, adj])
            h = tf.boolean_mask(h, mask)
            output = Softmax()(h)

            model = Model(inputs=[x, adj, mask], outputs=output)

            model.compile(loss='sparse_categorical_crossentropy',
                          optimizer=Adam(lr=learning_rate),
                          metrics=['accuracy'],
                          experimental_run_tf_function=False)

            self.model = model
            self.built = True
コード例 #5
0
ファイル: gmnn.py プロジェクト: voladorlu/graphgallery
            def build_GCN(x):
                h = Dropout(rate=dropout)(x)

                for hid, activation in zip(hidden_layers, activations):
                    h = GraphConvolution(
                        hid,
                        use_bias=use_bias,
                        activation=activation,
                        kernel_regularizer=regularizers.l2(l2_norm))([h, adj])
#                     h = Dropout(rate=dropout)(h)

                h = GraphConvolution(self.n_classes,
                                     use_bias=use_bias)([h, adj])
                h = tf.ensure_shape(h, [self.n_nodes, self.n_classes])
                h = tf.gather(h, index)
                output = Softmax()(h)

                model = Model(inputs=[x, adj, index], outputs=output)
                model.compile(loss='categorical_crossentropy',
                              optimizer=RMSprop(lr=learning_rate),
                              metrics=['accuracy'])
                return model