def imdb_experient(): model_class = gnn_models.SagCWL2GCN dsm = eval_ds.IMDB_8() model = model_class( act="sigmoid", local_act="sigmoid", squeeze_output=True, conv_layer_dims=[dsm.dim_wl2_features(), 40, 40, 40, 1], att_conv_layer_dims=[dsm.dim_wl2_features(), 1], conv_stack_tf="keep_input", bias=True) opt = keras.optimizers.Adam(0.0007) model.compile( optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"]) train, val = dsm.get_train_fold(0, output_type=model_class.input_type) test = dsm.get_test_fold(0, output_type=model_class.input_type) evaluate.train( model, train, test, verbose=2, epochs=1000, label=f"{dsm.name}_{model.name}") print(model.evaluate(test))
def wl2_power_experiment(): # model_class = gnn_models.AvgGIN model_class = gnn_models.AvgCWL2GCN model_class = gnn_models.with_fc(model_class) dsm = synthetic.threesix_dataset(stored=True)( wl2_neighborhood=1) # ok(3, 2, 1) if model_class.input_type == "dense": in_dim = dsm.dim_dense_features() elif model_class.input_type == "wl1": in_dim = dsm.dim_wl1_features() else: in_dim = dsm.dim_wl2_features() if in_dim == 0: in_dim = 1 opt = keras.optimizers.Adam(0.1) model = model_class( act="sigmoid", squeeze_output=True, layer_dims=[in_dim, 4, 1], fc_layer_dims=[1, 10, 1], neighborhood_mask=1, # ok(3, 2), nok(-1, 1) bias=False, no_local_hash=True) model.compile( optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"]) ds = dsm.get_all( output_type=model_class.input_type, shuffle=True) evaluate.train( model, ds, verbose=1, epochs=200, patience=200, label=f"{dsm.name}_{model.name}") print( list(dsm.get_all(output_type="dense"))[0][1].numpy(), model.predict(dsm.get_all(output_type=model_class.input_type)))
def synthetic_experiment2(): model_class = gnn_models.AvgCWL2GCN dsm = synthetic.balanced_triangle_classification_dataset(stored=True)( with_holdout=False, wl2_neighborhood=1, wl2_batch_size=dict(batch_graph_count=228)) if model_class.input_type == "dense": in_dim = dsm.dim_dense_features() else: in_dim = dsm.dim_wl2_features() if in_dim == 0: in_dim = 1 opt = keras.optimizers.Adam(0.0005) model = model_class( act="sigmoid", local_act="relu", squeeze_output=True, layer_dims=[in_dim, 32, 32, 32, 1], att_conv_layer_dims=[in_dim, 32, 32, 32, 1], bias=True, no_local_hash=True) model.compile( optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"]) i = 5 ds = dsm.get_train_fold( i, output_type=model_class.input_type) ds_test = dsm.get_test_fold( i, output_type=model_class.input_type) evaluate.train( model, ds, ds_test, verbose=2, epochs=5000, patience=2000, label=f"{dsm.name}_{model.name}")
def proteins_experient(): model_class = gnn_models.AvgWL2GCN dsm = eval_ds.Proteins_6() model = model_class( act="sigmoid", squeeze_output=True, conv_layer_dims=[dsm.dim_wl2_features(), 64, 64, 64, 1], vert_only_pool=False, bias=True) opt = keras.optimizers.Adam(0.0001) model.compile( optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"]) ds, ds_val = dsm.get_train_fold( 1, output_type=model_class.input_type) evaluate.train( model, ds, ds_val, verbose=1, label=f"{dsm.name}_{model.name}")
def kernel_experiment(): model_class = kernel_models.WL_sp model = model_class(C=0.001) dsm = synthetic.balanced_triangle_classification_dataset(stored=True)( with_holdout=False) for i in range(10): ds = dsm.get_train_fold( i, output_type=model_class.input_type) ds_test = dsm.get_test_fold( i, output_type=model_class.input_type) #ds = dsm.get_all(output_type=model_class.input_type) print(i) print(evaluate.train(model, ds, ds_test, label=f"{dsm.name}_{model.name}").history)
def dd_experient(): model_class = gnn_models.AvgWL2GCN dsm = eval_ds.DD_2() model = model_class( act="sigmoid", squeeze_output=True, layer_dims=[dsm.dim_wl2_features(), 64, 64, 64, 1], bias=True) opt = keras.optimizers.Adam(0.00001) model.compile( optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"]) ds_raw = dsm.get_all( output_type=model_class.input_type) ds = ds_raw evaluate.train( model, ds, verbose=1, label=f"{dsm.name}_{model.name}") print(model.evaluate(ds))