예제 #1
0
def create_GCN_model(graph):

    generator = FullBatchNodeGenerator(graph)
    train_gen = generator.flow([1, 2], np.array([[1, 0], [0, 1]]))

    base_model = GCN(
        layer_sizes=[8, 2],
        generator=generator,
        bias=True,
        dropout=0.5,
        activations=["elu", "softmax"],
    )

    x_inp, x_out = base_model.in_out_tensors()

    keras_model = Model(inputs=x_inp, outputs=x_out)

    return base_model, keras_model, generator, train_gen
def create_GCN_model_sparse(graph):
    generator = FullBatchNodeGenerator(graph, sparse=True, method="gcn")
    train_gen = generator.flow([0, 1], np.array([[1, 0], [0, 1]]))

    layer_sizes = [2, 2]
    gcn = GCN(
        layer_sizes=layer_sizes,
        activations=["elu", "elu"],
        generator=generator,
        dropout=0.3,
        kernel_regularizer=regularizers.l2(5e-4),
    )

    for layer in gcn._layers:
        layer._initializer = "ones"
    x_inp, x_out = gcn.in_out_tensors()
    keras_model = Model(inputs=x_inp, outputs=x_out)
    return gcn, keras_model, generator, train_gen
예제 #3
0
def make_gcn(train_targets, generator):
    gcn = GCN(layer_sizes=[90, 90],
              activations=["relu", "relu"],
              generator=generator,
              dropout=0.5)

    x_inp, x_out = gcn.in_out_tensors()
    #predictions = keras.layers.Softmax()(x_out)
    #predictions = keras.layers.()(x_out)
    predictions = layers.Dense(units=train_targets.shape[1],
                               activation="sigmoid")(x_out)

    gcn_model = Model(inputs=x_inp, outputs=predictions)
    gcn_model.compile(
        optimizer=optimizers.Adam(lr=0.005),
        loss=losses.mean_squared_error,
        metrics=["acc"],
    )
    embedding_model = Model(inputs=x_inp, outputs=x_out)
    return gcn_model, embedding_model
예제 #4
0
fullbatch_generator = FullBatchNodeGenerator(G, sparse=False)
gcn_model = GCN(layer_sizes=[2], activations=["relu"], generator=fullbatch_generator)

corrupted_generator = CorruptedGenerator(fullbatch_generator)
gen = corrupted_generator.flow(G.nodes())

infomax = DeepGraphInfomax(gcn_model, corrupted_generator)
x_in, x_out = infomax.in_out_tensors()

model = Model(inputs=x_in, outputs=x_out)
model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=1e-3))

epochs = 100

es = EarlyStopping(monitor="loss", min_delta=0, patience=20)
history = model.fit(gen, epochs=epochs, verbose=0, callbacks=[es])
plot_history(history)

x_emb_in, x_emb_out = gcn_model.in_out_tensors()

# for full batch models, squeeze out the batch dim (which is 1)
x_out = tf.squeeze(x_emb_out, axis=0)
emb_model = Model(inputs=x_emb_in, outputs=x_out)

all_embeddings = emb_model.predict(fullbatch_generator.flow(G.nodes()))

test = pd.DataFrame(all_embeddings, index=G.nodes())


test.to_csv("/home/jonno/setse_1_data/test_embs.csv" )
    train_dataset, test_dataset = split_data(node_classes)
    train_targets, test_targets, target_encoding = encode_classes(
        train_dataset, test_dataset)

    ###############################################################

    # creating GCN model
    gcn_generator = FullBatchNodeGenerator(stellar_g,
                                           method="gcn",
                                           sparse=False)
    train_gcn_gen = gcn_generator.flow(train_dataset.index, train_targets)
    gcn = GCN(layer_sizes=[16, 16],
              activations=['relu', 'relu'],
              generator=gcn_generator,
              dropout=0.5)  # 2 GCN layers
    gcn_inp, gcn_out = gcn.in_out_tensors()  # for the KERAS model

    # creating KERAS model with the GCN model layers
    gcn_dense_layer = layers.Dense(units=train_targets.shape[1],
                                   activation="softmax")(gcn_out)
    keras_gcn = Model(inputs=gcn_inp,
                      outputs=gcn_dense_layer)  # 2 GCN, 1 Dense
    keras_gcn.compile(
        optimizer="adam",
        loss=losses.categorical_crossentropy,
        metrics=["accuracy"],
    )
    keras_gcn.fit(train_gcn_gen,
                  epochs=10,
                  batch_size=32,
                  verbose=1,
예제 #6
0
print(G_test.info())

epochs = 50

train_gen = sg.mapper.FullBatchLinkGenerator(G_train, method="gcn")
train_flow = train_gen.flow(edge_ids_train, edge_labels_train)

test_gen = FullBatchLinkGenerator(G_test, method="gcn")
test_flow = train_gen.flow(edge_ids_test, edge_labels_test)

gcn = GCN(layer_sizes=[16, 16],
          activations=["relu", "relu"],
          generator=train_gen,
          dropout=0.3)

x_inp, x_out = gcn.in_out_tensors()

prediction = LinkEmbedding(activation="relu", method="ip")(x_out)
prediction = keras.layers.Reshape((-1, ))(prediction)

model = keras.Model(inputs=x_inp, outputs=prediction)

model.compile(
    optimizer=keras.optimizers.Adam(lr=0.01),
    loss=keras.losses.binary_crossentropy,
    metrics=["accuracy"],
)

init_train_metrics = model.evaluate(train_flow)
init_test_metrics = model.evaluate(test_flow)