Exemplo n.º 1
0
epochs = 10000                # Number of training epochs
es_patience = 100             # Patience fot early stopping

# Preprocessing operations
X = preprocess_features(X)
A = A + np.eye(A.shape[0])  # Add self-loops

# Model definition (as per Section 3.3 of the paper)
X_in = Input(shape=(F,))
A_in = Input(shape=(N,))

dropout1 = Dropout(dropout_rate)(X_in)
graph_attention_1 = GraphAttention(F_,
                                   attn_heads=n_attn_heads,
                                   attn_heads_reduction='concat',
                                   dropout_rate=dropout_rate,
                                   activation='elu',
                                   kernel_regularizer=l2(l2_reg),
                                   attn_kernel_regularizer=l2(l2_reg))([dropout1, A_in])
dropout2 = Dropout(dropout_rate)(graph_attention_1)
graph_attention_2 = GraphAttention(n_classes,
                                   attn_heads=1,
                                   attn_heads_reduction='average',
                                   dropout_rate=dropout_rate,
                                   activation='softmax',
                                   kernel_regularizer=l2(l2_reg),
                                   attn_kernel_regularizer=l2(l2_reg))([dropout2, A_in])

# Build model
model = Model(inputs=[X_in, A_in], outputs=graph_attention_2)
optimizer = Adam(lr=learning_rate)
Exemplo n.º 2
0
learning_rate = 0.005  # Learning rate for SGD
epochs = 2000  # Number of epochs to run for
es_patience = 100

# Preprocessing operations
X_train /= X_train.sum(1).reshape(-1, 1)
A_train = A_train.toarray()

# Model definition (as per Section 3.3 of the paper)
X = Input(shape=(F, ))
A = Input(shape=(N, ))

dropout1 = Dropout(dropout_rate)(X)
graph_attention_1 = GraphAttention(
    F_,
    attn_heads=8,
    attn_heads_reduction='concat',
    activation='elu',
    kernel_regularizer=l2(l2_reg))([dropout1, A])
dropout2 = Dropout(dropout_rate)(graph_attention_1)
graph_attention_2 = GraphAttention(
    n_classes,
    attn_heads=1,
    attn_heads_reduction='average',
    activation='softmax',
    kernel_regularizer=l2(0.0005))([dropout2, A])

# Build model
model = Model(inputs=[X, A], outputs=graph_attention_2)
optimizer = Adam(lr=0.005)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',