Пример #1
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, order=2, use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(), name='node_attr')
        adj = [Input(batch_shape=[None, None],
                     dtype=floatx(), sparse=True,
                     name=f'adj_matrix_{i}') for i in range(order + 1)]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = ChebyConvolution(hidden, order=order, use_bias=use_bias,
                                 activation=activation,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
            h = Dropout(rate=dropout)(h)

        h = ChebyConvolution(out_channels,
                             order=order, use_bias=use_bias)([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
Пример #2
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.5,
              weight_decay=5e-4,
              use_bias=False,
              lr=0.01,
              p1=1.4,
              p2=0.7):

        if self.backend == "torch":
            raise RuntimeError(
                f"Currently {self.name} only supports for tensorflow backend.")

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='node_attr')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx,
                          name='node_index')

            GCN_layers = []
            for hidden, activation in zip(hiddens, activations):
                GCN_layers.append(
                    GraphConvolution(
                        hidden,
                        activation=activation,
                        use_bias=use_bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

            GCN_layers.append(
                GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias))
            self.GCN_layers = GCN_layers
            self.dropout = Dropout(rate=dropout)

            logit = self.forward(x, adj)
            output = Gather()([logit, index])

            model = TFKeras(inputs=[x, adj, index], outputs=output)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)(
                shape=[self.graph.num_nodes, self.graph.num_node_attrs]),
                name="r_vadv")
            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
            self.adv_optimizer = Adam(lr=lr / 10)
Пример #3
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.,
              lr=0.01,
              weight_decay=5e-4,
              p1=1.4,
              p2=0.7,
              use_bias=False,
              epsilon=0.01):

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='node_attr')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx,
                          name='node_index')

            GCN_layers = []
            for hidden, activation in zip(hiddens, activations):
                GCN_layers.append(
                    GraphConvolution(
                        hidden,
                        activation=activation,
                        use_bias=use_bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

            GCN_layers.append(
                GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias))

            self.GCN_layers = GCN_layers
            self.dropout = Dropout(rate=dropout)

            logit = self.forward(x, adj)
            output = Gather()([logit, index])

            model = TFKeras(inputs=[x, adj, index], outputs=output)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit, epsilon)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
Пример #4
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[64],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, kl=5e-4, gamma=1.,
                 use_bias=False):

        _floatx = floatx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=_floatx, name='node_attr')
        adj = [Input(batch_shape=[None, None], dtype=_floatx,
                     sparse=True, name='adj_matrix_1'),
               Input(batch_shape=[None, None], dtype=_floatx, sparse=True,
                     name='adj_matrix_2')]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        if hiddens:
            mean, var = GaussionConvolution_F(hiddens[0], gamma=gamma,
                                              use_bias=use_bias,
                                              activation=activations[0],
                                              kernel_regularizer=regularizers.l2(weight_decay))([h, *adj])
            if kl:
                KL_divergence = 0.5 * \
                    tf.reduce_mean(tf.math.square(mean) + var -
                                   tf.math.log(1e-8 + var) - 1, axis=1)
                KL_divergence = tf.reduce_sum(KL_divergence)

                # KL loss
                kl_loss = kl * KL_divergence

        # additional layers (usually unnecessay)
        for hidden, activation in zip(hiddens[1:], activations[1:]):

            mean, var = GaussionConvolution_D(
                hidden, gamma=gamma, use_bias=use_bias, activation=activation)([mean, var, *adj])
            mean = Dropout(rate=dropout)(mean)
            var = Dropout(rate=dropout)(var)

        mean, var = GaussionConvolution_D(
            out_channels, gamma=gamma, use_bias=use_bias)([mean, var, *adj])

        h = Sample()([mean, var])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])

        if hiddens and kl:
            self.add_loss(kl_loss)
Пример #5
0
    def build(self,
              hiddens=[32],
              activations=['relu'],
              dropout=0.5,
              weight_decay=5e-4,
              lr=0.01,
              use_bias=False,
              eps1=0.3,
              eps2=1.2,
              lamb1=0.8,
              lamb2=0.8):

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='features')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        name='adj_matrix')
            index = Input(batch_shape=[None], dtype=self.intx, name='index')

            h = x
            for hid, activation in zip(hiddens, activations):
                h = DenseConvolution(
                    hid,
                    use_bias=use_bias,
                    activation=activation,
                    kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

                h = Dropout(rate=dropout)(h)

            h = DenseConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias)([h, adj])
            h = Gather()([h, index])

            model = TFKeras(inputs=[x, adj, index], outputs=h)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            self.eps1 = eps1
            self.eps2 = eps2
            self.lamb1 = lamb1
            self.lamb2 = lamb2
            self.model = model
Пример #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[64],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-3,
                 lr=0.01,
                 use_bias=False,
                 K=10):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = Dense(hidden,
                      use_bias=use_bias,
                      activation=activation,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)
            h = Dropout(dropout)(h)

        h = Dense(out_channels,
                  use_bias=use_bias,
                  activation=activations[-1],
                  kernel_regularizer=regularizers.l2(weight_decay))(h)
        h = Dropout(dropout)(h)

        h = PropConvolution(
            K,
            use_bias=use_bias,
            activation='sigmoid',
            kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Пример #7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_nodes,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        _floatx = floatx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=_floatx,
                  name='node_attr')
        wavelet = Input(batch_shape=[num_nodes, num_nodes],
                        dtype=_floatx,
                        sparse=True,
                        name='wavelet_matrix')
        inverse_wavelet = Input(batch_shape=[num_nodes, num_nodes],
                                dtype=_floatx,
                                sparse=True,
                                name='inverse_wavelet_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = WaveletConvolution(
                hidden,
                activation=activation,
                use_bias=use_bias,
                kernel_regularizer=regularizers.l2(weight_decay))(
                    [h, wavelet, inverse_wavelet])
            h = Dropout(rate=dropout)(h)

        h = WaveletConvolution(
            out_channels, use_bias=use_bias)([h, wavelet, inverse_wavelet])
        h = Gather()([h, index])

        super().__init__(inputs=[x, wavelet, inverse_wavelet, index],
                         outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Пример #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 n_heads=[8],
                 activations=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, n_head, activation in zip(hiddens, n_heads, activations):
            h = GraphAttention(
                hidden,
                attn_heads=n_head,
                reduction='concat',
                use_bias=use_bias,
                activation=activation,
                kernel_regularizer=regularizers.l2(weight_decay),
                attn_kernel_regularizer=regularizers.l2(weight_decay),
            )([h, adj])
            h = Dropout(rate=dropout)(h)

        h = GraphAttention(out_channels,
                           use_bias=use_bias,
                           attn_heads=1,
                           reduction='average')([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Пример #9
0
            def build_GCN(x):
                h = x
                for hidden, activation in zip(hiddens, activations):
                    h = GraphConvolution(
                        hidden,
                        use_bias=use_bias,
                        activation=activation,
                        kernel_regularizer=regularizers.l2(weight_decay))(
                            [h, adj])
                    h = Dropout(rate=dropout)(h)

                h = GraphConvolution(self.graph.num_node_classes,
                                     use_bias=use_bias)([h, adj])
                h = Gather()([h, index])

                model = TFKeras(inputs=[x, adj, index], outputs=h)
                model.compile(loss=CategoricalCrossentropy(from_logits=True),
                              optimizer=RMSprop(lr=lr),
                              metrics=['accuracy'])
                return model