예제 #1
0
    def conv_layer(row, col, x):
        conv = tf.keras.layers.Conv1D(hidden_dim * 2,
                                      5,
                                      padding='same',
                                      activation='tanh',
                                      input_shape=(row, col))(x)

        gcn_1 = GraphConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_2 = ChebConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_3 = ARMAConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_1 = tf.keras.layers.Concatenate()([gcn_1, gcn_2, gcn_3])
        gcn_1 = tf.keras.layers.Conv1D(3 * graph_channels,
                                       5,
                                       padding='same',
                                       activation='tanh',
                                       input_shape=(row, col))(gcn_1)

        conv = tf.keras.layers.Concatenate()([x, conv, gcn_1, gcn_2, gcn_3])
        conv = tf.keras.layers.Activation("relu")(conv)
        conv = tf.keras.layers.SpatialDropout1D(0.1)(conv)

        return conv
예제 #2
0
A, X, y, train_mask, val_mask, test_mask = citation.load_data(dataset)

# Parameters
channels = 16           # Number of channels in the first layer
K = 2                   # Max degree of the Chebyshev polynomials
N = X.shape[0]          # Number of nodes in the graph
F = X.shape[1]          # Original size of node features
n_classes = y.shape[1]  # Number of classes
dropout = 0.5           # Dropout rate for the features
l2_reg = 5e-4 / 2       # L2 regularization rate
learning_rate = 1e-2    # Learning rate
epochs = 200            # Number of training epochs
es_patience = 10        # Patience for early stopping

# Preprocessing operations
fltr = ChebConv.preprocess(A).astype('f4')
X = X.toarray()

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = ChebConv(channels,
                        K=K,
                        activation='relu',
                        kernel_regularizer=l2(l2_reg),
                        use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = ChebConv(n_classes,
                        K=K,
예제 #3
0
l2_reg = 5e-4  # Regularization rate for l2
learning_rate = 1e-2  # Learning rate for SGD
epochs = 20000  # Number of training epochs
es_patience = 200  # Patience fot early stopping

# Preprocessing operations
fltr = chebyshev_filter(A, cheb_k)

# Model definition
X_in = Input(shape=(F, ))
# One input filter for each degree of the Chebyshev approximation
fltr_in = [Input((N, ), sparse=True) for _ in range(support)]

dropout_1 = Dropout(dropout_rate)(X_in)
graph_conv_1 = ChebConv(16,
                        activation='relu',
                        kernel_regularizer=l2(l2_reg),
                        use_bias=False)([dropout_1] + fltr_in)
dropout_2 = Dropout(dropout_rate)(graph_conv_1)
graph_conv_2 = ChebConv(n_classes, activation='softmax',
                        use_bias=False)([dropout_2] + fltr_in)

# Build model
model = Model(inputs=[X_in] + fltr_in, outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

# Callbacks
callbacks = [
예제 #4
0
epochs = 200  # Number of training epochs
patience = 10  # Patience for early stopping
a_dtype = dataset[0].a.dtype  # Only needed for TF 2.1

N = dataset.n_nodes  # Number of nodes in the graph
F = dataset.n_node_features  # Original size of node features
n_out = dataset.n_labels  # Number of classes

# Model definition
x_in = Input(shape=(F, ))
a_in = Input((N, ), sparse=True, dtype=a_dtype)

do_1 = Dropout(dropout)(x_in)
gc_1 = ChebConv(channels,
                K=K,
                activation='relu',
                kernel_regularizer=l2(l2_reg),
                use_bias=False)([do_1, a_in])
do_2 = Dropout(dropout)(gc_1)
gc_2 = ChebConv(n_out, K=K, activation='softmax', use_bias=False)([do_2, a_in])

# Build model
model = Model(inputs=[x_in, a_in], outputs=gc_2)
optimizer = Adam(lr=learning_rate)
model.compile(
    optimizer=optimizer,
    loss=CategoricalCrossentropy(reduction='sum'),  # To compute mean
    weighted_metrics=['acc'])
model.summary()

# Train model
예제 #5
0
        y = data['y']

    y = np.asarray(y)

    # BUILDING MODEL
    # Parameters
    l2_reg = 5e-4  # Regularization rate for l2
    learning_rate = 5e-4  # Learning rate for SGD
    batch_size = 32  # Batch size
    epochs = 50  # Number of training epochs
    es_patience = 0  # Patience fot early stopping
    channels = 16  # Number of channels in the first layer
    K = 2
    n_out = 3

    fltr = ChebConv.preprocess(A).astype('f4')
    assert fltr.shape == adjacency_matrix.shape

    f1_weighted_per_fold.clear()
    f1_macro_per_fold.clear()
    f1_micro_per_fold.clear()
    testacc_per_fold.clear()
    precision_per_fold.clear()
    recall_per_fold.clear()

    f1_weighted_per_level.clear()
    f1_macro_per_level.clear()
    f1_micro_per_level.clear()
    testacc_per_level.clear()
    precision_per_level.clear()
    recall_per_level.clear()
예제 #6
0
test_samples = X_test.shape[0]
N_nodes = X_train.shape[-2]   # Number of nodes in the graph


# Preprocessing operations
fltr = chebyshev_filter(A, cheb_k) # normalaize adjacency matrix


# Model definition
X_in = layers.Input(shape=(N_nodes, 1))

# One input filter for each degree of the Chebyshev approximation
fltr_in = [layers.Input((N_nodes, N_nodes)) for _ in range(support)]

graph_conv_1 = ChebConv(channels,
                        activation='relu',
                        use_bias=False)([X_in] + fltr_in)
graph_conv_2 = ChebConv(2 * channels,
                        activation='softmax',
                        use_bias=False)([graph_conv_1] + fltr_in)
flatten = layers.Flatten()(graph_conv_2)
output = layers.Dense(10, activation='softmax')(flatten)

# Build model
model = models.Model(inputs=[X_in] + fltr_in, outputs=output)
optimizer = optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()