Esempio n. 1
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.conv1 = GCNConv(32, activation="elu", kernel_regularizer=l2(l2_reg))
     self.conv2 = GCNConv(32, activation="elu", kernel_regularizer=l2(l2_reg))
     self.flatten = GlobalSumPool()
     self.fc1 = Dense(512, activation="relu")
     self.fc2 = Dense(10, activation="softmax")  # MNIST has 10 classes
Esempio n. 2
0
 def __init__(self):
     super().__init__()
     self.masking = GraphMasking()
     self.conv1 = ECCConv(32, activation="relu")
     self.conv2 = ECCConv(32, activation="relu")
     self.global_pool = GlobalSumPool()
     self.dense = Dense(n_out)
Esempio n. 3
0
 def __init__(self):
     super().__init__()
     self.mask = GraphMasking()
     self.conv1 = GCSConv(32, activation="relu")
     self.pool = MinCutPool(N // 2)
     self.conv2 = GCSConv(32, activation="relu")
     self.global_pool = GlobalSumPool()
     self.dense1 = Dense(n_out)
Esempio n. 4
0
def buildmodel(dataset):

    F = dataset.n_node_features  # Dimension of node features
    S = dataset.n_edge_features  # Dimension of edge features
    n_out = dataset.n_labels  # Dimension of the target

    #model
    X_in = Input(shape=(F,), name="X_in")
    A_in = Input(shape=(None,), sparse=True, name="A_in")
    E_in = Input(shape=(S,), name="E_in")
    I_in = Input(shape=(), name="segment_ids_in", dtype=tf.int32)

    X_1 = ECCConv(32, activation="relu")([X_in, A_in, E_in])
    X_2 = ECCConv(32, activation="relu")([X_1, A_in, E_in])
    X_3 = GlobalSumPool()([X_2, I_in])
    output = Dense(n_out)(X_3)

    # Build model
    model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output)
    model.summary()

    return model
Esempio n. 5
0
dataset_te = dataset[idx_te]

loader_tr = DisjointLoader(dataset_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(dataset_te, batch_size=batch_size, epochs=1)

################################################################################
# Build model
################################################################################
X_in = Input(shape=(F,))
A_in = Input(shape=(None,), sparse=True)
E_in = Input(shape=(S,))
I_in = Input(shape=(), dtype=tf.int64)

X_1 = ECCConv(32, activation="relu")([X_in, A_in, E_in])
X_2 = ECCConv(32, activation="relu")([X_1, A_in, E_in])
X_3 = GlobalSumPool()([X_2, I_in])
output = Dense(n_out, activation="sigmoid")(X_3)

model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output)
optimizer = Adam(learning_rate)
loss_fn = BinaryCrossentropy()


################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
    with tf.GradientTape() as tape:
        predictions = model(inputs, training=True)
        loss = loss_fn(target, predictions) + sum(model.losses)
Esempio n. 6
0
# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1, random_state=0)

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))
E_in = Input(shape=(N, N, S))

X_1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
X_2 = EdgeConditionedConv(32, activation='relu')([X_1, A_in, E_in])
X_3 = GlobalSumPool()(X_2)
output = Dense(n_out)(X_3)

# Build model
model = Model(inputs=[X_in, A_in, E_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.summary()

################################################################################
# FIT MODEL
################################################################################
model.fit([X_train, A_train, E_train],
          y_train,
          batch_size=batch_size,
          epochs=epochs)