if use_pretrain:
        pretrained_path = './result/PreTrained_GraphBert/' + dataset_name + '/node_reconstruct_model/'

    method_obj = MethodGraphBertNodeClassification(bert_config,
                                                   pretrained_path,
                                                   dataset_name)
    #---- set to false to run faster ----
    method_obj.spy_tag = True
    method_obj.max_epoch = max_epoch
    method_obj.lr = lr
    if use_pretrain:
        method_obj.save_pretrained_path = './result/PreTrained_GraphBert/' + dataset_name + '/node_pretrain_classification_model/'
    else:
        method_obj.save_pretrained_path = './result/PreTrained_GraphBert/' + dataset_name + '/node_classification_model/'

    result_obj = ResultSaving()
    result_obj.result_destination_folder_path = './result/GraphBert/'
    result_obj.result_destination_file_name = dataset_name + '_' + str(
        num_hidden_layers)

    setting_obj = NewSettings()

    evaluate_obj = None
    # ------------------------------------------------------

    # ---- running section ---------------------------------
    setting_obj.prepare(data_obj, method_obj, result_obj, evaluate_obj)
    setting_obj.load_run_save_evaluate()
    # ------------------------------------------------------

    method_obj.save_pretrained('./result/PreTrained_GraphBert/' +
예제 #2
0
    print('GrapBert, dataset: ' + dataset_name + ', Pre-training, Node Attribute Reconstruction.')
    # ---- objection initialization setction ---------------
    data_obj = DatasetLoader()
    data_obj.dataset_source_folder_path = './data/' + dataset_name + '/'
    data_obj.dataset_name = dataset_name
    data_obj.k = k
    data_obj.load_all_tag = True
    

    bert_config = GraphBertConfig(residual_type = residual_type, k=k, x_size=nfeature, y_size=y_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_attention_heads=num_attention_heads, num_hidden_layers=num_hidden_layers)
    method_obj = MethodGraphBertNodeConstruct(bert_config)
    method_obj.max_epoch = max_epoch
    method_obj.lr = lr
    method_obj.save_pretrained_path = './result/PreTrained_GraphBert/' + dataset_name + '/node_reconstruct_model/'

    result_obj = ResultSaving()
    result_obj.result_destination_folder_path = './result/GraphBert/'
    result_obj.result_destination_file_name = dataset_name + '_' + str(k) + '_node_reconstruction'

    setting_obj = Settings()

    evaluate_obj = None
    # ------------------------------------------------------

    # ---- running section ---------------------------------
    setting_obj.prepare(data_obj, method_obj, result_obj, evaluate_obj)
    setting_obj.load_run_save_evaluate()
    # ------------------------------------------------------
    # method_obj.save_pretrained('./result/PreTrained_GraphBert/' + dataset_name + '/node_reconstruct_model/')
    
    print('************ Finish ************')
예제 #3
0
    elif dataset_name == 'COLLAB':
        max_graph_size = 500

#---- Step 1: Load Raw Graphs for Train/Test Partition ----
if 1:
    print('************ Start ************')
    print('Preprocessing dataset: ' + dataset_name)
    # ---- objection initialization setction ---------------
    data_obj = DatasetLoader()
    data_obj.dataset_source_folder_path = './data/' + dataset_name + '/'
    data_obj.dataset_name = dataset_name
    data_obj.load_type = 'Raw'

    method_obj = MethodProcessRaw()

    result_obj = ResultSaving()
    result_obj.result_destination_folder_path = './result/Preprocess/'
    result_obj.result_destination_file_name = dataset_name

    setting_obj = Settings()

    evaluate_obj = None
    # ------------------------------------------------------

    # ---- running section ---------------------------------
    setting_obj.prepare(data_obj, method_obj, result_obj, evaluate_obj)
    setting_obj.load_run_save_evaluate()
    # ------------------------------------------------------

    print('************ Finish ************')
#------------------------------------
예제 #4
0
import numpy as np
import matplotlib.pyplot as plt
from code.ResultSaving import ResultSaving

#---------- clustering results evaluation -----------------

#---- IMDBBINARY, IMDBMULTI, MUTAG, NCI1, PTC,  PROTEINS,   COLLAB, REDDITBINARY, REDDITMULTI5K ----

#---- isolated_segment, padding_pruning, full_input
strategy = 'isolated_segment'
dataset_name = 'IMDBMULTI'
residual_type = 'none'

if 1:
    epoch_number = 500
    result_obj = ResultSaving('', '')
    result_obj.result_destination_folder_path = './result/AuGBert/' + strategy + '/' + dataset_name + '/'

    result_list = []
    time_list = []
    for fold in range(1, 11):
        result_obj.result_destination_file_name = dataset_name + '_' + str(
            fold) + '_' + str(
                epoch_number) + '_' + residual_type + '_' + strategy
        loaded_result = result_obj.load()
        time_list.append(
            sum([loaded_result[epoch]['time'] for epoch in loaded_result]))
        result_list.append(
            np.max(
                [loaded_result[epoch]['acc_test'] for epoch in loaded_result]))
    print('accuracy: {:.2f}$\pm${:.2f}'.format(100 * np.mean(result_list),
                                      y_size=y_size,
                                      hidden_size=hidden_size,
                                      intermediate_size=intermediate_size,
                                      num_attention_heads=num_attention_heads,
                                      num_hidden_layers=num_hidden_layers)
        method_obj = MethodGraphBertNodeClassification(bert_config)
        #---- set to false to run faster ----
        method_obj.spy_tag = True
        method_obj.max_epoch = max_epoch
        method_obj.lr = lr
        method_obj.fold = fold
        method_obj.strategy = strategy
        method_obj.load_pretrained_path = ''
        method_obj.save_pretrained_path = ''

        result_obj = ResultSaving()
        result_obj.result_destination_folder_path = './result/AuGBert/'
        result_obj.result_destination_file_name = dataset_name + '_' + str(
            fold) + '_' + str(max_epoch) + '_' + residual_type + '_' + strategy

        setting_obj = Settings()

        evaluate_obj = None
        # ------------------------------------------------------

        # ---- running section ---------------------------------
        setting_obj.prepare(data_obj, method_obj, result_obj, evaluate_obj)
        setting_obj.load_run_save_evaluate()
        # ------------------------------------------------------

        #method_obj.save_pretrained('./result/PreTrained_GraphBert/' + dataset_name + '/node_classification_complete_model/')
예제 #6
0
    nclass = 3
    nfeature = 500
    ngraph = 19717

#---- Step 1: WL based graph coloring ----
if 1:
    print('************ Start ************')
    print('WL, dataset: ' + dataset_name)
    # ---- objection initialization setction ---------------
    data_obj = DatasetLoader()
    data_obj.dataset_source_folder_path = './data/' + dataset_name + '/'
    data_obj.dataset_name = dataset_name

    method_obj = MethodWLNodeColoring()

    result_obj = ResultSaving()
    result_obj.result_destination_folder_path = './result/WL/'
    result_obj.result_destination_file_name = dataset_name

    setting_obj = Settings()

    evaluate_obj = None
    # ------------------------------------------------------

    # ---- running section ---------------------------------
    setting_obj.prepare(data_obj, method_obj, result_obj, evaluate_obj)
    setting_obj.load_run_save_evaluate()
    # ------------------------------------------------------

    print('************ Finish ************')
#------------------------------------
import matplotlib.pyplot as plt
from code.ResultSaving import ResultSaving

#---------- clustering results evaluation -----------------

dataset_name = 'pubmed'

if 0:
    pre_train_task = 'node_reconstruction+structure_recovery'

    result_obj = ResultSaving('', '')
    result_obj.result_destination_folder_path = './result/GraphBert/'
    result_obj.result_destination_file_name = 'clustering_' + dataset_name + '_' + pre_train_task
    loaded_result = result_obj.load()

    eval_obj = EvaluateClustering()
    eval_obj.data = loaded_result
    eval_result = eval_obj.evaluate()

    print(eval_result)

#--------------- Graph Bert Pre-Training Records Convergence --------------

dataset_name = 'cora'

if 0:
    if dataset_name == 'cora':
        k = 7
    elif dataset_name == 'citeseer':
        k = 5
    elif dataset_name == 'pubmed':