示例#1
0
    def __init__(self, in_features, out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, bias=False,
                 experimental_run_tf_function=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = GraphConvolution(hid, use_bias=bias,
                                 activation=act,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

            h = Dropout(rate=dropout)(h)

        h = GraphConvolution(out_features, use_bias=bias)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'],
                     experimental_run_tf_function=experimental_run_tf_function)
示例#2
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.5,
              weight_decay=5e-4,
              use_bias=False,
              lr=0.01,
              p1=1.4,
              p2=0.7):

        if self.backend == "torch":
            raise RuntimeError(
                f"Currently {self.name} only supports for tensorflow backend.")

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='node_attr')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx,
                          name='node_index')

            GCN_layers = []
            for hidden, activation in zip(hiddens, activations):
                GCN_layers.append(
                    GraphConvolution(
                        hidden,
                        activation=activation,
                        use_bias=use_bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

            GCN_layers.append(
                GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias))
            self.GCN_layers = GCN_layers
            self.dropout = Dropout(rate=dropout)

            logit = self.forward(x, adj)
            output = Gather()([logit, index])

            model = TFKeras(inputs=[x, adj, index], outputs=output)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)(
                shape=[self.graph.num_nodes, self.graph.num_node_attrs]),
                name="r_vadv")
            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
            self.adv_optimizer = Adam(lr=lr / 10)
示例#3
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.,
              lr=0.01,
              weight_decay=5e-4,
              p1=1.4,
              p2=0.7,
              use_bias=False,
              epsilon=0.01):

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='node_attr')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx,
                          name='node_index')

            GCN_layers = []
            for hidden, activation in zip(hiddens, activations):
                GCN_layers.append(
                    GraphConvolution(
                        hidden,
                        activation=activation,
                        use_bias=use_bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

            GCN_layers.append(
                GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias))

            self.GCN_layers = GCN_layers
            self.dropout = Dropout(rate=dropout)

            logit = self.forward(x, adj)
            output = Gather()([logit, index])

            model = TFKeras(inputs=[x, adj, index], outputs=output)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit, epsilon)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
示例#4
0
    def builder(self,
                hids=[16],
                acts=['relu'],
                dropout=0.5,
                weight_decay=5e-4,
                lr=0.01,
                use_bias=False,
                p1=1.4,
                p2=0.7,
                use_tfn=True):

        x = Input(batch_shape=[None, self.graph.num_node_attrs],
                  dtype=self.floatx,
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=self.floatx,
                    sparse=True,
                    name='adj_matrix')

        GCN_layers = []
        for hid, act in zip(hids, acts):
            GCN_layers.append(
                GraphConvolution(
                    hid,
                    activation=act,
                    use_bias=use_bias,
                    kernel_regularizer=regularizers.l2(weight_decay)))

        GCN_layers.append(
            GraphConvolution(self.graph.num_node_classes,
                             use_bias=use_bias))
        self.GCN_layers = GCN_layers
        self.dropout = Dropout(rate=dropout)

        h = self.forward(x, adj)

        model = TFKeras(inputs=[x, adj], outputs=h)
        model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                      optimizer=Adam(lr=lr),
                      metrics=['accuracy'])

        self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)(shape=[self.graph.num_nodes,
                                                                      self.graph.num_node_attrs]), name="r_vadv")
        entropy_loss = entropy_y_x(h)
        vat_loss = self.virtual_adversarial_loss(x, adj, h)
        model.add_loss(p1 * vat_loss + p2 * entropy_loss)

        self.adv_optimizer = Adam(lr=lr / 10.)

        if use_tfn:
            model.use_tfn()
        return model
示例#5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = Dense(hidden,
                      use_bias=use_bias,
                      activation=activation,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)
            h = Dropout(rate=dropout)(h)

        h = GraphConvolution(out_channels, use_bias=use_bias)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
示例#6
0
        def build_GCN(x):
            h = x
            for hid, act in zip(hids, acts):
                h = GraphConvolution(
                    hid,
                    use_bias=use_bias,
                    activation=act,
                    kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
                h = Dropout(rate=dropout)(h)

            h = GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias)([h, adj])

            model = TFKeras(inputs=[x, adj], outputs=h)
            model.compile(loss=CategoricalCrossentropy(from_logits=True),
                          optimizer=RMSprop(lr=lr),
                          metrics=['accuracy'])
            return model