def __init__(self, in_features, out_features, hids=[16], acts=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, bias=False): x = Input(batch_shape=[None, in_features], dtype=floatx(), name='node_attr') adj = Input(batch_shape=[None, None], dtype=floatx(), sparse=False, name='adj_matrix') h = x for hid, act in zip(hids, acts): h = DenseConvolution(hid, use_bias=bias, activation=act, kernel_regularizer=regularizers.l2(weight_decay))([h, adj]) h = Dropout(rate=dropout)(h) h = DenseConvolution(out_features, use_bias=bias)([h, adj]) super().__init__(inputs=[x, adj], outputs=h) self.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy'])
def __init__(self, in_channels, out_channels, hiddens=[32], n_filters=[8, 8], activations=[None, None], dropout=0.8, weight_decay=5e-4, lr=0.1, use_bias=False, K=8): x = Input(batch_shape=[None, in_channels], dtype=floatx(), name='node_attr') adj = Input(batch_shape=[None, None], dtype=floatx(), sparse=False, name='adj_matrix') mask = Input(batch_shape=[None], dtype='bool', name='node_mask') h = x for idx, hidden in enumerate(hiddens): h = Dropout(rate=dropout)(h) h = DenseConvolution( hidden, use_bias=use_bias, activation=activations[idx], kernel_regularizer=regularizers.l2(weight_decay))([h, adj]) for idx, n_filter in enumerate(n_filters): top_k_h = Top_k_features(K=K)([h, adj]) cur_h = LGConvolution( n_filter, kernel_size=K, use_bias=use_bias, dropout=dropout, activation=activations[idx], kernel_regularizer=regularizers.l2(weight_decay))(top_k_h) cur_h = BatchNormalization()(cur_h) h = Concatenate()([h, cur_h]) h = Dropout(rate=dropout)(h) h = DenseConvolution( out_channels, use_bias=use_bias, activation=activations[-1], kernel_regularizer=regularizers.l2(weight_decay))([h, adj]) h = Mask()([h, mask]) super().__init__(inputs=[x, adj, mask], outputs=h) self.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Nadam(lr=lr), metrics=['accuracy'])
def build(self, hiddens=[32], activations=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, use_bias=False, eps1=0.3, eps2=1.2, lamb1=0.8, lamb2=0.8): with tf.device(self.device): x = Input(batch_shape=[None, self.graph.num_node_attrs], dtype=self.floatx, name='features') adj = Input(batch_shape=[None, None], dtype=self.floatx, name='adj_matrix') index = Input(batch_shape=[None], dtype=self.intx, name='index') h = x for hid, activation in zip(hiddens, activations): h = DenseConvolution( hid, use_bias=use_bias, activation=activation, kernel_regularizer=regularizers.l2(weight_decay))([h, adj]) h = Dropout(rate=dropout)(h) h = DenseConvolution(self.graph.num_node_classes, use_bias=use_bias)([h, adj]) h = Gather()([h, index]) model = TFKeras(inputs=[x, adj, index], outputs=h) model.compile(loss=SparseCategoricalCrossentropy(from_logits=True), optimizer=Adam(lr=lr), metrics=['accuracy']) self.eps1 = eps1 self.eps2 = eps2 self.lamb1 = lamb1 self.lamb2 = lamb2 self.model = model