示例#1
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.5,
              weight_decay=5e-4,
              use_bias=False,
              lr=0.01,
              p1=1.4,
              p2=0.7):

        if self.backend == "torch":
            raise RuntimeError(
                f"Currently {self.name} only supports for tensorflow backend.")

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='node_attr')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx,
                          name='node_index')

            GCN_layers = []
            for hidden, activation in zip(hiddens, activations):
                GCN_layers.append(
                    GraphConvolution(
                        hidden,
                        activation=activation,
                        use_bias=use_bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

            GCN_layers.append(
                GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias))
            self.GCN_layers = GCN_layers
            self.dropout = Dropout(rate=dropout)

            logit = self.forward(x, adj)
            output = Gather()([logit, index])

            model = TFKeras(inputs=[x, adj, index], outputs=output)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)(
                shape=[self.graph.num_nodes, self.graph.num_node_attrs]),
                name="r_vadv")
            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
            self.adv_optimizer = Adam(lr=lr / 10)
示例#2
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.,
              lr=0.01,
              weight_decay=5e-4,
              p1=1.4,
              p2=0.7,
              use_bias=False,
              epsilon=0.01):

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.num_node_attrs],
                      dtype=self.floatx,
                      name='node_attr')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx,
                          name='node_index')

            GCN_layers = []
            for hidden, activation in zip(hiddens, activations):
                GCN_layers.append(
                    GraphConvolution(
                        hidden,
                        activation=activation,
                        use_bias=use_bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

            GCN_layers.append(
                GraphConvolution(self.graph.num_node_classes,
                                 use_bias=use_bias))

            self.GCN_layers = GCN_layers
            self.dropout = Dropout(rate=dropout)

            logit = self.forward(x, adj)
            output = Gather()([logit, index])

            model = TFKeras(inputs=[x, adj, index], outputs=output)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])

            entropy_loss = entropy_y_x(logit)
            vat_loss = self.virtual_adversarial_loss(x, adj, logit, epsilon)
            model.add_loss(p1 * vat_loss + p2 * entropy_loss)

            self.model = model
示例#3
0
    def builder(self,
                hids=[16],
                acts=['relu'],
                dropout=0.5,
                weight_decay=5e-4,
                lr=0.01,
                use_bias=False,
                p1=1.4,
                p2=0.7,
                use_tfn=True):

        x = Input(batch_shape=[None, self.graph.num_node_attrs],
                  dtype=self.floatx,
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=self.floatx,
                    sparse=True,
                    name='adj_matrix')

        GCN_layers = []
        for hid, act in zip(hids, acts):
            GCN_layers.append(
                GraphConvolution(
                    hid,
                    activation=act,
                    use_bias=use_bias,
                    kernel_regularizer=regularizers.l2(weight_decay)))

        GCN_layers.append(
            GraphConvolution(self.graph.num_node_classes,
                             use_bias=use_bias))
        self.GCN_layers = GCN_layers
        self.dropout = Dropout(rate=dropout)

        h = self.forward(x, adj)

        model = TFKeras(inputs=[x, adj], outputs=h)
        model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                      optimizer=Adam(lr=lr),
                      metrics=['accuracy'])

        self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)(shape=[self.graph.num_nodes,
                                                                      self.graph.num_node_attrs]), name="r_vadv")
        entropy_loss = entropy_y_x(h)
        vat_loss = self.virtual_adversarial_loss(x, adj, h)
        model.add_loss(p1 * vat_loss + p2 * entropy_loss)

        self.adv_optimizer = Adam(lr=lr / 10.)

        if use_tfn:
            model.use_tfn()
        return model
示例#4
0
    def model_step(self,
                   hids=[16],
                   acts=['relu'],
                   dropout=0.,
                   lr=0.01,
                   weight_decay=5e-4,
                   bias=False,
                   p1=1.4,
                   p2=0.7,
                   epsilon=0.01):

        x = Input(batch_shape=[None, self.graph.num_node_attrs],
                  dtype=self.floatx,
                  name='node_attr')
        adj = Input(batch_shape=[None, None],
                    dtype=self.floatx,
                    sparse=True,
                    name='adj_matrix')

        GCN_layers = []
        for hid, act in zip(hids, acts):
            GCN_layers.append(
                GCNConv(hid,
                        activation=act,
                        bias=bias,
                        kernel_regularizer=regularizers.l2(weight_decay)))

        GCN_layers.append(GCNConv(self.graph.num_node_classes, bias=bias))

        self.GCN_layers = GCN_layers
        self.dropout = Dropout(rate=dropout)

        h = self.forward(x, adj)

        model = TFKeras(inputs=[x, adj], outputs=h)
        model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                      optimizer=Adam(lr=lr),
                      metrics=['accuracy'])

        entropy_loss = entropy_y_x(h)
        vat_loss = self.virtual_adversarial_loss(x, adj, h, epsilon)
        model.add_loss(p1 * vat_loss + p2 * entropy_loss)

        return model