Ejemplo n.º 1
0
 def preprocess(a):
     a = normalized_laplacian(a)
     a = rescale_laplacian(a)
     return a
Ejemplo n.º 2
0
 def preprocess(A):
     fltr = normalized_laplacian(A, symmetric=True)
     fltr = rescale_laplacian(fltr, lmax=2)
     return fltr
Ejemplo n.º 3
0
 def preprocess(A):
     L = normalized_laplacian(A)
     L = rescale_laplacian(L)
     return L
Ejemplo n.º 4
0
# Parameters
ARMA_T = 1              # Depth of each ARMA_1 filter
ARMA_K = 2              # Number of parallel ARMA_1 filters
recurrent = True        # Share weights like a recurrent net in each head
N = X.shape[0]          # Number of nodes in the graph
F = X.shape[1]          # Original feature dimensionality
n_classes = y.shape[1]  # Number of classes
dropout_rate = 0.75     # Dropout rate applied to the input of GCN layers
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 1e-2    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping

# Preprocessing operations
fltr = normalized_laplacian(A, symmetric=True)
fltr = rescale_laplacian(fltr, lmax=2)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout_rate)(X_in)
graph_conv_1 = ARMAConv(16,
                        T=ARMA_T,
                        K=ARMA_K,
                        recurrent=recurrent,
                        dropout_rate=dropout_rate,
                        activation='elu',
                        gcn_activation='elu',
                        kernel_regularizer=l2(l2_reg))([dropout_1, fltr_in])
dropout_2 = Dropout(dropout_rate)(graph_conv_1)