def build(self, hiddens=[32], activations=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, use_bias=False, eps1=0.3, eps2=1.2, lamb1=0.8, lamb2=0.8): with tf.device(self.device): x = Input(batch_shape=[None, self.graph.num_node_attrs], dtype=self.floatx, name='features') adj = Input(batch_shape=[None, None], dtype=self.floatx, name='adj_matrix') index = Input(batch_shape=[None], dtype=self.intx, name='index') h = x for hid, activation in zip(hiddens, activations): h = DenseConvolution( hid, use_bias=use_bias, activation=activation, kernel_regularizer=regularizers.l2(weight_decay))([h, adj]) h = Dropout(rate=dropout)(h) h = DenseConvolution(self.graph.num_node_classes, use_bias=use_bias)([h, adj]) h = Gather()([h, index]) model = TFKeras(inputs=[x, adj, index], outputs=h) model.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy']) self.eps1 = eps1 self.eps2 = eps2 self.lamb1 = lamb1 self.lamb2 = lamb2 self.model = model
def build_GCN(x): h = x for hid, act in zip(hids, acts): h = GraphConvolution( hid, use_bias=use_bias, activation=act, kernel_regularizer=regularizers.l2(weight_decay))([h, adj]) h = Dropout(rate=dropout)(h) h = GraphConvolution(self.graph.num_node_classes, use_bias=use_bias)([h, adj]) model = TFKeras(inputs=[x, adj], outputs=h) model.compile(loss=CategoricalCrossentropy(from_logits=True), optimizer=RMSprop(lr=lr), metrics=['accuracy']) return model
def builder(self, hids=[16], acts=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, use_bias=False, p1=1.4, p2=0.7, use_tfn=True): x = Input(batch_shape=[None, self.graph.num_node_attrs], dtype=self.floatx, name='node_attr') adj = Input(batch_shape=[None, None], dtype=self.floatx, sparse=True, name='adj_matrix') GCN_layers = [] for hid, act in zip(hids, acts): GCN_layers.append( GraphConvolution( hid, activation=act, use_bias=use_bias, kernel_regularizer=regularizers.l2(weight_decay))) GCN_layers.append( GraphConvolution(self.graph.num_node_classes, use_bias=use_bias)) self.GCN_layers = GCN_layers self.dropout = Dropout(rate=dropout) h = self.forward(x, adj) model = TFKeras(inputs=[x, adj], outputs=h) model.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy']) self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)(shape=[self.graph.num_nodes, self.graph.num_node_attrs]), name="r_vadv") entropy_loss = entropy_y_x(h) vat_loss = self.virtual_adversarial_loss(x, adj, h) model.add_loss(p1 * vat_loss + p2 * entropy_loss) self.adv_optimizer = Adam(lr=lr / 10.) if use_tfn: model.use_tfn() return model
def build(self, hiddens=[16], activations=['relu'], dropout=0.5, weight_decay=5e-4, use_bias=False, lr=0.01, p1=1.4, p2=0.7): if self.backend == "torch": raise RuntimeError( f"Currently {self.name} only supports for tensorflow backend.") with tf.device(self.device): x = Input(batch_shape=[None, self.graph.num_node_attrs], dtype=self.floatx, name='node_attr') adj = Input(batch_shape=[None, None], dtype=self.floatx, sparse=True, name='adj_matrix') index = Input(batch_shape=[None], dtype=self.intx, name='node_index') GCN_layers = [] for hidden, activation in zip(hiddens, activations): GCN_layers.append( GraphConvolution( hidden, activation=activation, use_bias=use_bias, kernel_regularizer=regularizers.l2(weight_decay))) GCN_layers.append( GraphConvolution(self.graph.num_node_classes, use_bias=use_bias)) self.GCN_layers = GCN_layers self.dropout = Dropout(rate=dropout) logit = self.forward(x, adj) output = Gather()([logit, index]) model = TFKeras(inputs=[x, adj, index], outputs=output) model.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy']) self.r_vadv = tf.Variable(TruncatedNormal(stddev=0.01)( shape=[self.graph.num_nodes, self.graph.num_node_attrs]), name="r_vadv") entropy_loss = entropy_y_x(logit) vat_loss = self.virtual_adversarial_loss(x, adj, logit) model.add_loss(p1 * vat_loss + p2 * entropy_loss) self.model = model self.adv_optimizer = Adam(lr=lr / 10)
def build(self, hiddens=[16], activations=['relu'], dropout=0., lr=0.01, weight_decay=5e-4, p1=1.4, p2=0.7, use_bias=False, epsilon=0.01): with tf.device(self.device): x = Input(batch_shape=[None, self.graph.num_node_attrs], dtype=self.floatx, name='node_attr') adj = Input(batch_shape=[None, None], dtype=self.floatx, sparse=True, name='adj_matrix') index = Input(batch_shape=[None], dtype=self.intx, name='node_index') GCN_layers = [] for hidden, activation in zip(hiddens, activations): GCN_layers.append( GraphConvolution( hidden, activation=activation, use_bias=use_bias, kernel_regularizer=regularizers.l2(weight_decay))) GCN_layers.append( GraphConvolution(self.graph.num_node_classes, use_bias=use_bias)) self.GCN_layers = GCN_layers self.dropout = Dropout(rate=dropout) logit = self.forward(x, adj) output = Gather()([logit, index]) model = TFKeras(inputs=[x, adj, index], outputs=output) model.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy']) entropy_loss = entropy_y_x(logit) vat_loss = self.virtual_adversarial_loss(x, adj, logit, epsilon) model.add_loss(p1 * vat_loss + p2 * entropy_loss) self.model = model
def model_step(self, hids=[16], acts=['relu'], dropout=0., lr=0.01, weight_decay=5e-4, bias=False, p1=1.4, p2=0.7, epsilon=0.01): x = Input(batch_shape=[None, self.graph.num_node_attrs], dtype=self.floatx, name='node_attr') adj = Input(batch_shape=[None, None], dtype=self.floatx, sparse=True, name='adj_matrix') GCN_layers = [] for hid, act in zip(hids, acts): GCN_layers.append( GCNConv(hid, activation=act, bias=bias, kernel_regularizer=regularizers.l2(weight_decay))) GCN_layers.append(GCNConv(self.graph.num_node_classes, bias=bias)) self.GCN_layers = GCN_layers self.dropout = Dropout(rate=dropout) h = self.forward(x, adj) model = TFKeras(inputs=[x, adj], outputs=h) model.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy']) entropy_loss = entropy_y_x(h) vat_loss = self.virtual_adversarial_loss(x, adj, h, epsilon) model.add_loss(p1 * vat_loss + p2 * entropy_loss) return model