예제 #1
0
파일: main.py 프로젝트: QixunHou/finalisel
def main():
    # set up seeds and gpu device
    torch.manual_seed(0)
    np.random.seed(0)

    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    print('Loading train set...')
    train_graphs = GraphData(opt.dataset, train=True, test=False)
    print('\nLoading test set...')
    test_graphs = GraphData(opt.dataset, train=False, test=False)
    train_loader = DataLoader(train_graphs,
                              opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers)
    test_loader = DataLoader(test_graphs,
                             opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers)

    model = GraphCNN(opt.num_layers, opt.num_mlp_layers,
                     train_graphs[0].node_features.shape[1], opt.hidden_dim,
                     opt.output_dim, opt.final_dropout, opt.learn_eps,
                     opt.learn_edges, opt.graph_pooling_type,
                     opt.neighbor_pooling_type, opt.device).to(opt.device)

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)

    print('\nStart training...\n')

    for epoch in range(1, opt.epochs + 1):

        avg_loss = train(model, opt.device, train_graphs, optimizer, scheduler,
                         epoch)
        acc_train, acc_test = test(model, opt.device, train_graphs,
                                   test_graphs, epoch)
        # writer.add_scalar('Loss', avg_loss, epoch)
        # writer.add_scalar('Acc_train', acc_train, epoch)
        # writer.add_scalar('Acc_test', acc_test,epoch)
        writer.add_scalars("My_cdfg_result", {
            "Acc_train": acc_train,
            "Acc_test": acc_test,
            "Loss": avg_loss
        }, epoch)

    if opt.learn_edges:
        print(model.edge_weight, model.edge_bias)

    writer.close

    if opt.save_path == None:
        opt.save_path = './saves/' + time.strftime("%b%d_%H-%M-%S",
                                                   time.localtime()) + '.pth'
    torch.save(model.state_dict(), opt.save_path)
예제 #2
0
    "final_dropout": 0.5,
    "graph_pooling_type": 'sum',
    "neighbor_pooling_type": 'sum',
    "learn_eps": 'store_true',
    'degree_as_tag': 'store_true',
    'filename': 'output.txt'
})

loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

graphs, num_classes = load_data(args.dataset, args.degree_as_tag)
train_graphs, test_graphs = separate_data(graphs, args.seed, args.fold_idx)
labels = tf.constant([graph.label for graph in train_graphs])

model = GraphCNN(args.num_layers, args.num_mlp_layers, args.hidden_dim,
                 num_classes, args.final_dropout, args.learn_eps,
                 args.graph_pooling_type, args.neighbor_pooling_type)

optimizer = tf.keras.optimizers.Adam(lr=args.lr)


#def train(loss,model,opt,original):
def train(args, model, train_graphs, opt, epoch):
    total_iters = args.iters_per_epoch
    pbar = tqdm(range(total_iters), unit='batch')

    loss_accum = 0
    for pos in pbar:
        selected_idx = np.random.permutation(
            len(train_graphs))[:args.batch_size]
        batch_graph = [train_graphs[idx] for idx in selected_idx]
예제 #3
0
파일: main.py 프로젝트: zyang1580/PAGNN_
def main():
    # Training settings
    # Note: Hyper-parameters need to be tuned in order to obtain results reported in the paper.
    parser = argparse.ArgumentParser(
        description=
        'PyTorch graph convolutional neural net for whole-graph classification'
    )
    parser.add_argument('--dataset',
                        type=str,
                        default="NCI1",
                        help='name of dataset (default: MUTAG)')
    parser.add_argument('--device',
                        type=int,
                        default=0,
                        help='which gpu to use if any (default: 0)')
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help='input batch size for training (default: 32)')
    parser.add_argument(
        '--iters_per_epoch',
        type=int,
        default=50,
        help='number of iterations per each epoch (default: 50)')
    parser.add_argument(
        '--epochs',
        type=int,
        default=500,  #defeat
        help='number of epochs to train (default: 350)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        help='learning rate (default: 0.01)')
    parser.add_argument('--wl2',
                        type=float,
                        default=0.0,
                        help='learning rate (default: 0.0)')
    parser.add_argument(
        '--seed',
        type=int,
        default=0,
        help='random seed for splitting the dataset into 10 (default: 0)')
    parser.add_argument(
        '--fold_idx',
        type=int,
        default=0,
        help='the index of fold in 10-fold validation. Should be less then 10.'
    )
    parser.add_argument(
        '--num_layers',
        type=int,
        default=6,
        help='number of layers INCLUDING the input one (default: 5)')
    parser.add_argument(
        '--num_mlp_layers',
        type=int,
        default=2,
        help=
        'number of layers for MLP EXCLUDING the input one (default: 2). 1 means linear model.'
    )
    parser.add_argument('--hidden_dim',
                        type=int,
                        default=64,
                        help='number of hidden units (default: 64)')
    parser.add_argument('--final_dropout',
                        type=float,
                        default=0.5,
                        help='final layer dropout (default: 0.5)')
    parser.add_argument(
        '--graph_pooling_type',
        type=str,
        default="sum",
        choices=["sum", "average"],
        help='Pooling for over nodes in a graph: sum or average')
    parser.add_argument(
        '--neighbor_pooling_type',
        type=str,
        default="sum",
        choices=["sum", "average", "max"],
        help='Pooling for over neighboring nodes: sum, average or max')
    parser.add_argument(
        '--learn_eps',
        action="store_true",
        help=
        'Whether to learn the epsilon weighting for the center nodes. Does not affect training accuracy though.'
    )
    parser.add_argument(
        '--degree_as_tag',
        action="store_true",
        help=
        'let the input node features be the degree of nodes (heuristics for unlabeled graph)'
    )
    parser.add_argument('--filename',
                        type=str,
                        default="meta-guass-pow10",
                        help='output file')
    parser.add_argument(
        '--attention',
        type=bool,
        default=True,  #defeault false
        help='if attention,defeaut:False')
    parser.add_argument('--tqdm', type=bool, default=False, help='if use tqdm')
    parser.add_argument(
        '--multi_head',
        type=int,
        default=1,  # defeat:3
        help='if use tqdm')
    parser.add_argument(
        '--sum_flag',
        type=int,
        default=1,  #defeatut :1
        help='if 0: don;t sum')
    parser.add_argument(
        '--inter',
        type=int,
        default=1,  #defeult :0
        help='if 0: not do unteraction in attention')

    parser.add_argument(
        '--dire_sigmod',
        type=int,
        default=0,  # defeult :0
        help='if 0: do softmax in dire attention,else if 1: sigmod')

    parser.add_argument(
        '--attention_type',
        type=str,
        default="mlp-sigmod",
        help='attention type:dire(sum or not), mlp-softmax , mlp-sigmod')
    args = parser.parse_args()

    ISOTIMEFORMAT = '%Y-%m-%d-%H-%M'
    theTime = datetime.datetime.now().strftime(ISOTIMEFORMAT)
    save_fold_path = "result/save_model/" + args.filename + str(
        args.num_layers) + str(theTime)  # result save path

    writer_path = str(theTime) + args.filename + 'TBX'
    writer = SummaryWriter(log_dir=writer_path)

    #set up seeds and gpu device
    print("lr:", args.lr)
    print("attention: ", args.attention)
    print("attention_type:", args.attention_type)
    print("sum_flag:", args.sum_flag)
    print("inter:", args.inter)
    print("filename:", args.filename)
    if args.attention == True:  # if do attention we need sum  graph pool information
        args.graph_pooling_type = 'sum'
    print("data sets:", args.dataset)
    print("degree as tag:", args.degree_as_tag)
    print("flod_idx:", args.fold_idx)
    if args.sum_flag == 1:
        print(
            "if use  directly attention is sum attention ,besides use sigmod attention model"
        )

    f = open(args.filename + "_train", 'w')
    if args.fold_idx == -1:
        acc = []
        for idx in range(10):
            acc_i = cross_val(args, writer, idx, f)
            acc.append(acc_i)
        writer.close()
        np.save("result/" + args.filename + "_all.numpy",
                np.array(acc))  # save
    else:
        torch.manual_seed(0)
        np.random.seed(0)
        device = torch.device("cuda:" +
                              str(args.device)) if torch.cuda.is_available(
                              ) else torch.device("cpu")
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(0)

        graphs, num_classes = load_data(args.dataset, args.degree_as_tag)

        ##10-fold cross validation. Conduct an experiment on the fold specified by args.fold_idx.
        train_graphs, test_graphs = separate_data(graphs, args.seed,
                                                  args.fold_idx)

        model = GraphCNN(args.num_layers,
                         args.num_mlp_layers,
                         train_graphs[0].node_features.shape[1],
                         args.hidden_dim,
                         num_classes,
                         args.final_dropout,
                         args.learn_eps,
                         args.graph_pooling_type,
                         args.neighbor_pooling_type,
                         device,
                         attention=args.attention,
                         multi_head=args.multi_head,
                         sum_flag=args.sum_flag,
                         inter=args.inter,
                         attention_type=args.attention_type,
                         dire_sigmod=args.dire_sigmod).to(device)

        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.wl2)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=50,
                                              gamma=0.5)

        #args.epoch = 1
        acc = []
        max_acc = 0
        for epoch in range(1, args.epochs + 1):  #args.epochs
            scheduler.step()
            avg_loss = train(args, model, device, train_graphs, optimizer,
                             epoch)
            acc_train, acc_test = ftest(args, model, device, train_graphs,
                                        test_graphs, epoch)
            max_acc = max(acc_test, max_acc)
            writer.add_scalars(
                str(args.fold_idx) + '/scalar/acc', {
                    'train': acc_train,
                    'val': acc_test
                }, epoch)
            acc.append(acc_test)
            f.write("%f %f %f" % (avg_loss, acc_train, acc_test))
            f.write("\n")
            print("")
            if epoch % 50 == 0:
                torch.save(model.state_dict(),
                           save_fold_path + "_" + str(epoch) + ".pt")
        print("****************max acc:", max_acc)
        try:
            torch.save(model.state_dict(), save_fold_path + "_last.pt")
            np.save(
                "result/" + args.filename + "_" + str(args.fold_idx) +
                "_val_acc.npy", np.array(acc))
            writer.close()
        except:
            print("acc all:", acc)
            pass
        #print(model.eps)
        f.close()
예제 #4
0
파일: main.py 프로젝트: zyang1580/PAGNN_
def cross_val(args, writer, idx, f):
    fold_idx = idx
    print(
        "**********************fold:{}**************************************************"
        .format(fold_idx))
    torch.manual_seed(0)
    np.random.seed(0)
    device = torch.device(
        "cuda:" +
        str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    graphs, num_classes = load_data(args.dataset, args.degree_as_tag)

    ##10-fold cross validation. Conduct an experiment on the fold specified by args.fold_idx.
    train_graphs, test_graphs = separate_data(graphs, args.seed, fold_idx)

    model = GraphCNN(args.num_layers,
                     args.num_mlp_layers,
                     train_graphs[0].node_features.shape[1],
                     args.hidden_dim,
                     num_classes,
                     args.final_dropout,
                     args.learn_eps,
                     args.graph_pooling_type,
                     args.neighbor_pooling_type,
                     device,
                     attention=args.attention,
                     multi_head=args.multi_head,
                     sum_flag=args.sum_flag,
                     inter=args.inter,
                     attention_type=args.attention_type,
                     dire_sigmod=args.dire_sigmod).to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)
    f.write("************************************** %d *********" % fold_idx)
    max_acc = 0
    acc = []
    for epoch in range(1, args.epochs + 1):
        scheduler.step()

        avg_loss = train(args, model, device, train_graphs, optimizer, epoch)
        acc_train, acc_test = test(args, model, device, train_graphs,
                                   test_graphs, epoch)
        writer.add_scalars('/scalar/acc' + str(fold_idx), {
            'train': acc_train,
            'val': acc_test
        }, epoch)
        acc.append(acc_test)
        if acc_test > max_acc:
            max_acc = acc_test

        f.write("%f %f %f" % (avg_loss, acc_train, acc_test))
        f.write("\n")
        print("")
    print("acc:", acc)
    try:
        f.write(
            "**************************************kkk flod_id:{},best:{} *********"
            .format(fold_idx, max_acc))
        np.save(
            "result/" + args.filename + "_" + str(fold_idx) + "_val_acc.npy",
            np.array(acc))
        print(
            "************************************** flod_id:{},best:{} *********"
            .format(fold_idx, max_acc))
    except:
        pass
    return acc
예제 #5
0
def main():
    # Training settings
    # Note: Hyper-parameters need to be tuned in order to obtain results reported in the paper.
    parser = argparse.ArgumentParser(
        description='PyTorch graph convolutional neural net for whole-graph classification')
    parser.add_argument('--dataset', type=str, default="TRIANGLES",
                        help='name of dataset (default: TRIANGLES)')
    parser.add_argument('--device', type=int, default=2,
                        help='which gpu to use if any (default: 0)')
    parser.add_argument('--iterations', type=int, default=700,
                        help='number of epochs to train (default: 350)')
    parser.add_argument('--num_layers', type=int, default=5,
                        help='number of layers INCLUDING the input one (default: 5)')
    parser.add_argument('--meta_train_metric', type=str, choices=('euclidean', 'cosine', 'l1', 'l2'),
                        default='l2',
                        help='meta-train evaluate metric')
    parser.add_argument('--meta_val_metric', type=str, choices=('euclidean', 'cosine', 'l1', 'l2'),
                        default='l2',
                        help='meta-train evaluate metric')
    parser.add_argument('--scheduler', type=str, choices=('step', 'multi_step', 'cosine'),
                        default='multi_step',
                        help='meta-train evaluate metric')
    parser.add_argument('--norm_type', type=str, choices=('L2N', 'CL2N', 'UN'),
                        default='CL2N')
    parser.add_argument('--type', type=str, choices=('local', 'global'),
                        default='local')
    parser.add_argument('--attention_type', type=str, choices=('weight', 'mlp','attention','self-attention','transformer'),
                        default='weight')
    parser.add_argument('--optimizer', default='Adam', choices=('SGD', 'Adam'))
    parser.add_argument('--lr', type=float, default=0.001,
                        help='learning rate (default: 0.01)')
    parser.add_argument('--num_NN', type=int, default=1,
                        help='number of nearest neighbors, set this number >1 when do kNN')
    parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)')
    parser.add_argument('--nesterov', action='store_true',
                        help='use nesterov for SGD, disable it in default')
    parser.add_argument('--num_ways', type=int, default=3)
    parser.add_argument('--spt_shots', type=int, default=5)
    parser.add_argument('--qry_shots', type=int, default=15)
    parser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=32)
    parser.add_argument('--test_task_num', type=int, help='meta batch size, namely task num', default=500)
    parser.add_argument('--val_task_num', type=int, help='meta batch size, namely task num', default=500)
    parser.add_argument('--num_mlp_layers', type=int, default=2,
                        help='number of layers for MLP EXCLUDING the input one (default: 2). 1 means linear model.')
    parser.add_argument('--hidden_dim', type=int, default=64,
                        help='number of hidden units (default: 64)')
    parser.add_argument('--final_dropout', type=float, default=0.2,
                        help='final layer dropout (default: 0.5)')
    parser.add_argument('--graph_pooling_type', type=str, default="sum", choices=["sum", "average"],
                        help='Pooling for over nodes in a graph: sum or average')
    parser.add_argument('--neighbor_pooling_type', type=str, default="sum", choices=["sum", "average", "max"],
                        help='Pooling for over neighboring nodes: sum, average or max')
    parser.add_argument('--learn_eps', action="store_true", default=False,
                        help='Whether to learn the epsilon weighting for the center nodes. Does not affect training accuracy though.')
    parser.add_argument('--degree_as_tag', action="store_true",
                        help='let the input node features be the degree of nodes (heuristics for unlabeled graph)')
    parser.add_argument('--input_model_file', type=str, default="/home/jsy/SimpleShot/data_weight_save/",
                        help='filename to read the model (if there is any)')
    args = parser.parse_args()

    # set up seeds and gpu device
    torch.manual_seed(0)
    np.random.seed(0)
    device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    all_graphs, label_dict = data_load_data(args.dataset, True)
    train_graph, test_graph = segregate(args, all_graphs, label_dict)

    # input_dim = 718
    input_dim = train_graph[0][0].node_features.shape[1]
    num_classes = args.num_ways

    model = GraphCNN(args, args.num_layers, args.num_mlp_layers, input_dim, args.hidden_dim, num_classes, args.final_dropout,
                     args.learn_eps, args.graph_pooling_type, args.neighbor_pooling_type, device).to(device)


    optimizer = get_optimizer(model, args)
    scheduler = get_scheduler(args.task_num, optimizer, args)

    with open("./dataset/dataset/TRIANGLES/way_shot/train-way.txt", 'r') as f:
        train_way = f.read()
        train_way = json.loads(train_way)
    with open("./dataset/dataset/TRIANGLES/way_shot/train-spt-shot.txt", 'r') as f:
        train_spt_shot = f.read()
        train_spt_shot = json.loads(train_spt_shot)
    with open("./dataset/dataset/TRIANGLES/way_shot/train-qry-shot.txt", 'r') as f:
        train_qry_shot = f.read()
        train_qry_shot = json.loads(train_qry_shot)
    with open("./dataset/dataset/TRIANGLES/way_shot/test-way.txt", 'r') as f:
        test_way = f.read()
        test_way = json.loads(test_way)
    with open("./dataset/dataset/TRIANGLES/way_shot/test-spt-shot.txt", 'r') as f:
        test_spt_shot = f.read()
        test_spt_shot = json.loads(test_spt_shot)
    with open("/./dataset/dataset/TRIANGLES/way_shot/test-qry-shot.txt", 'r') as f:
        test_qry_shot = f.read()
        test_qry_shot = json.loads(test_qry_shot)
    with open("./dataset/dataset/TRIANGLES/way_shot/val-way.txt", 'r') as f:
        val_way = f.read()
        val_way = json.loads(val_way)
    with open("./dataset/dataset/TRIANGLES/way_shot/val-spt-shot.txt", 'r') as f:
        val_spt_shot = f.read()
        val_spt_shot = json.loads(val_spt_shot)
    with open("./dataset/dataset/TRIANGLES/way_shot/val-qry-shot.txt", 'r') as f:
        val_qry_shot = f.read()
        val_qry_shot = json.loads(val_qry_shot)

    print(args)


    acc1 = []
    acc2 = []
    acc3 = []
    Train_Loss_list = []
    v_conf = []
    t_conf = []

    for iteration in range(1, args.iterations + 1):
        scheduler.step()

        train_loss, train_acc = train(train_graph, args, iteration, train_way, train_spt_shot, train_qry_shot, model,
                                      device,
                                      optimizer)
        acc1.append(train_acc)
        Train_Loss_list.append(train_loss)

        if iteration % 20 == 0:

            # torch.save(model.state_dict(), ("/home/jsy/SimpleShot/save6/{}.pth").format(iteration))
            val_acc, val_conf = val(test_graph, args, iteration, val_way, val_spt_shot, val_qry_shot, model, device)
            test_acc, test_conf = test(train_graph, test_graph, args, iteration, test_way, test_spt_shot, test_qry_shot,
                                       model, device,
                                       )
            acc2.append(val_acc)
            acc3.append(test_acc)
            t_conf.append(test_conf)

            print('iteration', iteration, ':', 'train_loss:', train_loss, 'train_acc:', train_acc,
                  'val_acc:', val_acc, 'val_conf:', val_conf, 'test_acc:', test_acc, 'test_conf:', test_conf)

        else:
            print('iteration', iteration, ':', 'train_loss:', train_loss, 'train_acc:', train_acc)

        if iteration == args.iterations:
            print('train_mean:', np.mean(acc1), 'train_max:', np.max(acc1),
                  'val_mean:', np.mean(acc2), 'val_max:', np.max(acc2),
                  'test_mean:', np.mean(acc3), 'test_max:', np.max(acc3))
            val_max = max(acc2)
            test_max = acc3[acc2.index(val_max)]
            conf = t_conf[acc2.index(val_max)]
            print('iteration', (acc2.index(val_max) + 1) * 20, ':val_max_acc:', val_max, 'test_acc:', test_max,
                  'test_conf:', conf)
예제 #6
0
def main():
    # Training settings
    # Note: Hyper-parameters need to be tuned in order to obtain results reported in the paper.
    parser = argparse.ArgumentParser(
        description=
        'PyTorch graph convolutional neural net for whole-graph classification'
    )
    parser.add_argument('--dataset',
                        type=str,
                        default="MUTAG",
                        help='name of dataset (default: MUTAG)')
    parser.add_argument('--device',
                        type=int,
                        default=0,
                        help='which gpu to use if any (default: 0)')
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help='input batch size for training (default: 32)')
    parser.add_argument(
        '--iters_per_epoch',
        type=int,
        default=50,
        help='number of iterations per each epoch (default: 50)')
    parser.add_argument('--epochs',
                        type=int,
                        default=350,
                        help='number of epochs to train (default: 350)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        help='learning rate (default: 0.01)')
    parser.add_argument(
        '--seed',
        type=int,
        default=0,
        help='random seed for splitting the dataset into 10 (default: 0)')
    parser.add_argument(
        '--fold_idx',
        type=int,
        default=0,
        help='the index of fold in 10-fold validation. Should be less then 10.'
    )
    parser.add_argument(
        '--num_layers',
        type=int,
        default=5,
        help='number of layers INCLUDING the input one (default: 5)')
    parser.add_argument(
        '--num_mlp_layers',
        type=int,
        default=2,
        help=
        'number of layers for MLP EXCLUDING the input one (default: 2). 1 means linear model.'
    )
    parser.add_argument('--hidden_dim',
                        type=int,
                        default=64,
                        help='number of hidden units (default: 64)')
    parser.add_argument('--final_dropout',
                        type=float,
                        default=0.5,
                        help='final layer dropout (default: 0.5)')
    parser.add_argument(
        '--graph_pooling_type',
        type=str,
        default="sum",
        choices=["sum", "average"],
        help='Pooling for over nodes in a graph: sum or average')
    parser.add_argument(
        '--neighbor_pooling_type',
        type=str,
        default="sum",
        choices=["sum", "average", "max"],
        help='Pooling for over neighboring nodes: sum, average or max')
    parser.add_argument(
        '--learn_eps',
        action="store_true",
        help=
        'Whether to learn the epsilon weighting for the center nodes. Does not affect training accuracy though.'
    )
    parser.add_argument(
        '--degree_as_tag',
        action="store_true",
        help=
        'let the input node features be the degree of nodes (heuristics for unlabeled graph)'
    )
    parser.add_argument('--filename', type=str, default="", help='output file')
    args = parser.parse_args()

    #set up seeds and gpu device
    torch.manual_seed(0)
    np.random.seed(0)
    device = torch.device(
        "cuda:" +
        str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    graphs, num_classes = load_data(args.dataset, args.degree_as_tag)

    ##10-fold cross validation. Conduct an experiment on the fold specified by args.fold_idx.
    train_graphs, test_graphs = separate_data(graphs, args.seed, args.fold_idx)

    model = GraphCNN(args.num_layers, args.num_mlp_layers,
                     train_graphs[0].node_features.shape[1], args.hidden_dim,
                     num_classes, args.final_dropout, args.learn_eps,
                     args.graph_pooling_type, args.neighbor_pooling_type,
                     device).to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)

    for epoch in range(1, args.epochs + 1):
        scheduler.step()

        avg_loss = train(args, model, device, train_graphs, optimizer, epoch)
        acc_train, acc_test = test(args, model, device, train_graphs,
                                   test_graphs, epoch)

        if not args.filename == "":
            with open(args.filename, 'w') as f:
                f.write("%f %f %f" % (avg_loss, acc_train, acc_test))
                f.write("\n")
        print("")

    extract_features(model, graphs)
예제 #7
0
def main():
    # Training settings
    # Note: Check experiment scripts for hyperparameters
    parser = argparse.ArgumentParser(description='PyTorch graph convolutional\
                                                  neural net for whole-graph \
                                                  classification')
    parser.add_argument('--dataset',
                        type=str,
                        default="MUTAG",
                        help='name of dataset (default: MUTAG)')
    parser.add_argument('--device',
                        type=str,
                        default="0",
                        help='which gpu to use if any (default: 0)')
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help='input batch size for training (default: 32)')
    parser.add_argument(
        '--iters_per_epoch',
        type=int,
        default=50,
        help='number of iterations per each epoch (default: 50)')
    parser.add_argument('--epochs',
                        type=int,
                        default=350,
                        help='number of epochs to train (default: 350)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        help='learning rate (default: 0.01)')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='random seed for splitting the dataset into 10\
                              (default: 0)')
    parser.add_argument('--fold_idx',
                        type=int,
                        default=0,
                        help='the index of fold in 10-fold validation. \
                              Should be less then 10.')
    parser.add_argument('--num_layers',
                        type=int,
                        default=5,
                        help='number of layers INCLUDING the input one \
                              (default: 5)')
    parser.add_argument(
        '--num_mlp_layers',
        type=int,
        default=2,
        help='number of layers for MLP EXCLUDING the input one \
                              (default: 2). 1 means linear model.')
    parser.add_argument('--hidden_dim',
                        type=int,
                        default=64,
                        help='number of hidden units (default: 64)')
    parser.add_argument('--final_dropout',
                        type=float,
                        default=0.5,
                        help='final layer dropout (default: 0.5)')
    parser.add_argument(
        '--graph_pooling_type',
        type=str,
        default="sum",
        choices=["sum", "average"],
        help='Pooling for over nodes in a graph: sum or average')
    parser.add_argument('--neighbor_pooling_type',
                        type=str,
                        default="sum",
                        choices=["sum", "average", "max"],
                        help='Pooling for over neighboring nodes: sum, average\
                              or max')
    parser.add_argument('--learn_eps',
                        action="store_true",
                        help='Whether to learn the epsilon \
                                              weighting for the center nodes.')
    parser.add_argument('--degree_as_tag',
                        action="store_true",
                        help='let the input node features be the degree of \
                              nodes (heuristics for unlabeled graph)')
    parser.add_argument('--filename', type=str, default="", help='output file')
    parser.add_argument('--bn',
                        type=bool,
                        default=True,
                        help="Enable batchnorm\
                                                               for MLP")
    parser.add_argument('--gbn',
                        type=bool,
                        default=True,
                        help="Enable \
                                                    batchnorm for graph")
    parser.add_argument('--corrupt_label',
                        action="store_true",
                        help="Enable label corruption")
    parser.add_argument('--N',
                        type=str,
                        default="",
                        help="Label noise configuration N. \
                              Should be passed as a flattened\
                               string with row order or a single\
                                value for symmetrix noise config.")
    parser.add_argument('--denoise',
                        type=str,
                        default="",
                        choices=["estimate", "anchors", "exact"],
                        help="Method to recover the noise matrix C.")
    parser.add_argument('--correction',
                        type=str,
                        default="backward",
                        choices=["backward", "forward", "compound"],
                        help="Type of loss correction function.")
    parser.add_argument('--anchors',
                        type=str,
                        default="",
                        help="List of representative train data.")
    parser.add_argument('--est_mode',
                        default="max",
                        choices=["max", "min"],
                        help="Type of estimator for C")
    parser.add_argument('--skip_new',
                        action="store_true",
                        help="Train new model for estimating noise")
    args = parser.parse_args()

    #set up seeds and gpu device
    torch.manual_seed(0)
    np.random.seed(0)
    if args.device != "cpu":
        device = torch.device("cuda:" + args.device)\
                 if torch.cuda.is_available() else torch.device("cpu")
    else:
        device = torch.device("cpu")
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    graphs, num_classes = load_data(args.dataset, args.degree_as_tag)

    ##10-fold cross validation. Conduct an experiment on the fold specified by args.fold_idx.
    train_graphs, test_graphs = separate_data(graphs, args.seed, args.fold_idx)

    # Corrupt data
    if args.corrupt_label:
        assert len(args.N) != 0, "Need to pass noise matrix!"
        N = np.fromstring(args.N, sep=" ", dtype=float)
        if len(N) == 1:
            self_prob = N[0]
            N = np.ones((num_classes, num_classes)) * \
                ((1 - self_prob) / (num_classes-1))
            np.fill_diagonal(N, self_prob)
            # Note: this could potentially cause some numerical problem
        elif len(N) == num_classes**2:
            N = N.reshape(num_classes, -1)
        else:
            raise ValueError("N needs to be a single value or square matrix.")
        print("Corrupting training label with N:")
        print(N)
        train_graphs = corrupt_label(train_graphs, N)

    if args.denoise != "exact":
        model = GraphCNN(args.num_layers, args.num_mlp_layers,
                         train_graphs[0].node_features.shape[1],
                         args.hidden_dim, num_classes, args.final_dropout,
                         args.learn_eps, args.graph_pooling_type,
                         args.neighbor_pooling_type, device, args.bn,
                         args.gbn).to(device)

        optimizer = optim.Adam(model.parameters(), lr=args.lr)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=50,
                                              gamma=0.5)

        for epoch in range(1, args.epochs + 1):
            scheduler.step()

            avg_loss = train(args, model, device, train_graphs, optimizer,
                             epoch)
            acc_train, acc_test = test(args, model, device, train_graphs,
                                       test_graphs, epoch)

            if not args.filename == "":
                with open(args.filename, 'w') as f:
                    f.write("%f %f %f" % (avg_loss, acc_train, acc_test))
                    f.write("\n")
            print("")

            print(model.eps)
    else:
        model = None

    if args.denoise in ["estimate", "anchors", "exact"]:
        C = None
        anchors = None
        if args.denoise == "estimate" or args.denoise == "anchors":
            anchors = _parse_anchors(args.anchors, train_graphs)
            C = estimate_C(model,
                           train_graphs,
                           anchors,
                           est_mode=args.est_mode)
        elif args.denoise == "exact":
            C = estimate_C(model, train_graphs, anchors, N)

        criterion = None
        if args.correction == "backward":
            criterion = lambda x, y: backward_correction(
                x, y, C, device, model.num_classes)
        elif args.correction == "forward":
            criterion = lambda x, y: forward_correction_xentropy(
                x, y, C, device, model.num_classes)
        elif args.correction == "compound":
            criterion = lambda x, y: compound_correction(
                x, y, C, device, model.num_classes)
        del model
        if not args.skip_new:
            print("Training new denoising model")
            model = GraphCNN(args.num_layers, args.num_mlp_layers,
                             train_graphs[0].node_features.shape[1],
                             args.hidden_dim, num_classes, args.final_dropout,
                             args.learn_eps, args.graph_pooling_type,
                             args.neighbor_pooling_type, device, args.bn,
                             args.gbn).to(device)
            optimizer = optim.Adam(model.parameters(), lr=args.lr)
            scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                  step_size=50,
                                                  gamma=0.5)
            for epoch in range(1, args.epochs + 1):
                scheduler.step()
                avg_loss = train(args, model, device, train_graphs, optimizer,
                                 epoch, criterion)
                acc_train, acc_test = test(args, model, device, train_graphs,
                                           test_graphs, epoch)
                if not args.filename == "":
                    with open(args.denoise+'_'+args.correction+'_'+args.est_mode\
                              +'_'+args.filename, 'w') as f:
                        f.write("%f %f %f" % (avg_loss, acc_train, acc_test))
                        f.write("\n")
                print("")
                print(model.eps)
예제 #8
0
def main():
    # Training settings
    # Note: Hyper-parameters need to be tuned in order to obtain results reported in the paper.
    parser = argparse.ArgumentParser(description='PyTorch graph convolutional neural net for whole-graph classification')
    parser.add_argument('--dataset', type=str, default="NCI1",
                        help='name of dataset (default: MUTAG)')
    parser.add_argument('--device', type=int, default=0,
                        help='which gpu to use if any (default: 0)')
    parser.add_argument('--batch_size', type=int, default=32,
                        help='input batch size for training (default: 32)')
    parser.add_argument('--iters_per_epoch', type=int, default=50,
                        help='number of iterations per each epoch (default: 50)')
    parser.add_argument('--epochs', type=int, default=300,   #defeat
                        help='number of epochs to train (default: 350)')
    parser.add_argument('--lr', type=float, default=0.01,
                        help='learning rate (default: 0.01)')
    parser.add_argument('--wl2', type=float, default=0.0,
                        help='learning rate (default: 0.0)')
    parser.add_argument('--seed', type=int, default=0,
                        help='random seed for splitting the dataset into 10 (default: 0)')
    parser.add_argument('--fold_idx', type=int, default=0,
                        help='the index of fold in 10-fold validation. Should be less then 10.')
    parser.add_argument('--num_layers', type=int, default=6,
                        help='number of layers INCLUDING the input one (default: 5)')
    parser.add_argument('--num_mlp_layers', type=int, default=2,
                        help='number of layers for MLP EXCLUDING the input one (default: 2). 1 means linear model.')
    parser.add_argument('--hidden_dim', type=int, default=64,
                        help='number of hidden units (default: 64)')
    parser.add_argument('--final_dropout', type=float, default=0.5,
                        help='final layer dropout (default: 0.5)')
    parser.add_argument('--graph_pooling_type', type=str, default="sum", choices=["sum", "average"],
                        help='Pooling for over nodes in a graph: sum or average')
    parser.add_argument('--neighbor_pooling_type', type=str, default="sum", choices=["sum", "average", "max"],
                        help='Pooling for over neighboring nodes: sum, average or max')
    parser.add_argument('--learn_eps', action="store_true",
                                        help='Whether to learn the epsilon weighting for the center nodes. Does not affect training accuracy though.')
    parser.add_argument('--degree_as_tag', action="store_true",
    					help='let the input node features be the degree of nodes (heuristics for unlabeled graph)')
    parser.add_argument('--filename', type = str, default = "yanzheng-MUTAG_sumsoftmax",
                                        help='output file')
    parser.add_argument('--attention', type=bool, default=True,  #defeault false
                       help='if attention,defeaut:False')
    parser.add_argument('--tqdm', type=bool, default=False,
                        help='if use tqdm')
    parser.add_argument('--multi_head', type=int, default=1,  # defeat:3
                        help='if use tqdm')
    parser.add_argument('--sum_flag', type=int, default=1,     #defeatut :1
                        help='if 0: don;t sum')
    parser.add_argument('--inter', type=int, default=1,        #defeult :0
                        help='if 0: not do unteraction in attention')

    parser.add_argument('--dire_sigmod', type=int, default=0,  # defeult :0
                        help='if 0: do softmax in dire attention,else if 1: sigmod')

    parser.add_argument('--attention_type', type=str, default="mlp-sigmod",
                        help='attention type:dire(sum or not), mlp-softmax , mlp-sigmod')
    args = parser.parse_args()
    writer_path = args.filename + 'TBX'
    writer = SummaryWriter(log_dir=writer_path)
    ISOTIMEFORMAT = '%Y-%m-%d-%H-%M'
    theTime = datetime.datetime.now().strftime(ISOTIMEFORMAT)
    save_fold_path = "result/save_model/" + args.filename + str(theTime)  # result save path

    #set up seeds and gpu device
    print("lr:",args.lr)
    print("attention: ",args.attention)
    print("attention_type:",args.attention_type)
    print("sum_flag:",args.sum_flag)
    print("inter:",args.inter)
    print("filename:",args.filename)
    if args.attention == True:   # if do attention we need sum  graph pool information
        args.graph_pooling_type = 'sum'
    print("data sets:",args.dataset)
    print("degree as tag:",args.degree_as_tag)
    print("flod_idx:",args.fold_idx)
    if args.sum_flag == 1:
        print("if use  directly attention is sum attention ,besides use sigmod attention model")

    f = open(args.filename+"_train", 'w')

    torch.manual_seed(0)
    np.random.seed(0)
    device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    graphs, num_classes = load_data(args.dataset, args.degree_as_tag)

    ##10-fold cross validation. Conduct an experiment on the fold specified by args.fold_idx.
    train_graphs, test_graphs = separate_data(graphs, args.seed, args.fold_idx)

    model = GraphCNN(args.num_layers, args.num_mlp_layers, train_graphs[0].node_features.shape[1], args.hidden_dim, num_classes, args.final_dropout, args.learn_eps, args.graph_pooling_type, args.neighbor_pooling_type, device,attention=args.attention,multi_head=args.multi_head,sum_flag=args.sum_flag,inter=args.inter,attention_type=args.attention_type,dire_sigmod=args.dire_sigmod).to(device)
     
    model.load_state_dict(torch.load("result/save_model/meta-guass-pow1062019-07-15-13-48_last.pt"))
    model.eval()
    # acc_train, acc_test = ftest(args, model, device, train_graphs, test_graphs, epoch)
    '''
    here to decide which dataset to be test!!
    '''
    test_graphs = train_graphs#train_graphs#train_graphs
    output = pass_data_iteratively(model, test_graphs)
    pred = output.max(1, keepdim=True)[1]
    labels = torch.LongTensor([graph.label for graph in test_graphs]).to(device)
    correct = pred.eq(labels.view_as(pred)).sum().cpu().item()
    acc_test = correct / float(len(test_graphs))
    print("acc test: ",acc_test*100)
예제 #9
0
def main():
    # Training settings
    # Note: Hyper-parameters need to be tuned in order to obtain results reported in the paper.
    parser = argparse.ArgumentParser(description='PyTorch graph convolutional neural net for whole-graph classification')
    parser.add_argument('--dataset', type=str, default="MUTAG",
                        help='name of dataset (default: MUTAG)')
    parser.add_argument('--device', type=int, default=0,
                        help='which gpu to use if any (default: 0)')
    parser.add_argument('--batch_size', type=int, default=32,
                        help='input batch size for training (default: 32)')
    parser.add_argument('--iters_per_epoch', type=int, default=50,
                        help='number of iterations per each epoch (default: 50)')
    parser.add_argument('--epochs', type=int, default=350,
                        help='number of epochs to train (default: 350)')
    parser.add_argument('--lr', type=float, default=0.01,
                        help='learning rate (default: 0.01)')
    parser.add_argument('--seed', type=int, default=0,
                        help='random seed for splitting the dataset into 10 (default: 0)')
    parser.add_argument('--fold_idx', type=int, default=0,
                        help='the index of fold in 10-fold validation. Should be less then 10.')
    parser.add_argument('--num_layers', type=int, default=5,
                        help='number of layers INCLUDING the input one (default: 5)')
    parser.add_argument('--num_mlp_layers', type=int, default=2,
                        help='number of layers for MLP EXCLUDING the input one (default: 2). 1 means linear model.')
    parser.add_argument('--hidden_dim', type=int, default=64,
                        help='number of hidden units (default: 64)')
    parser.add_argument('--final_dropout', type=float, default=0.5,
                        help='final layer dropout (default: 0.5)')
    parser.add_argument('--graph_pooling_type', type=str, default="sum", choices=["sum", "average"],
                        help='Pooling for over nodes in a graph: sum or average')
    parser.add_argument('--neighbor_pooling_type', type=str, default="sum", choices=["sum", "average", "max"],
                        help='Pooling for over neighboring nodes: sum, average or max')
    parser.add_argument('--opt', type=str, default="adam", choices=["adam", "sgd"])
    parser.add_argument('--learn_eps', action="store_true",
                                        help='Whether to learn the epsilon weighting for the center nodes. Does not affect training accuracy though.')
    parser.add_argument('--degree_as_tag', action="store_true",
    					help='let the input node features be the degree of nodes (heuristics for unlabeled graph)')
    parser.add_argument('--filename', type = str, default = "",
                                        help='output file')
    parser.add_argument('--random', type=int, default=None,
                                        help='the range of random features (default: None). None means it does not add random features.')
    args = parser.parse_args()

    #set up seeds and gpu device
    torch.manual_seed(0)
    np.random.seed(0)    
    device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(0)

    if args.dataset in ['TRIANGLE', 'TRIANGLE_EX', 'LCC', 'LCC_EX', 'MDS', 'MDS_EX']:
        node_classification = True
        train_graphs, _ = load_data(f'dataset/{args.dataset}/{args.dataset}_train.txt', args.degree_as_tag)
        test_graphs, _ = load_data(f'dataset/{args.dataset}/{args.dataset}_test.txt', args.degree_as_tag)
        for g in train_graphs + test_graphs:
            if args.random:
                g.node_features = torch.ones(g.node_features.shape[0], 0)
            else:
                g.node_features = torch.ones(g.node_features.shape[0], 1)
        if args.dataset in ['TRIANGLE', 'TRIANGLE_EX', 'MDS', 'MDS_EX']:
            num_classes = 2
        elif args.dataset in ['LCC', 'LCC_EX']:
            num_classes = 3
        else:
            assert(False)
        if args.dataset in ['MDS', 'MDS_EX']:
            get_labels = lambda batch_graph, model: torch.LongTensor(MDS_LOCAL(model, batch_graph))
            criterion = nn.CrossEntropyLoss()
        else:
            get_labels = lambda batch_graph, model: torch.LongTensor(sum([graph.node_tags for graph in batch_graph], []))
            bc = [0 for i in range(num_classes)]
            for G in train_graphs:
                for t in G.node_tags:
                    bc[t] += 1
            w = torch.FloatTensor([max(bc) / bc[i] for i in range(num_classes)]).to(device)
            criterion = nn.CrossEntropyLoss(weight=w)
    else:
        node_classification = False
        graphs, num_classes = load_data(f'dataset/{args.dataset}/{args.dataset}.txt', args.degree_as_tag)
        
        ##10-fold cross validation. Conduct an experiment on the fold specified by args.fold_idx.
        train_graphs, test_graphs = separate_data(graphs, args.seed, args.fold_idx)
        
        criterion = nn.CrossEntropyLoss()
        get_labels = lambda batch_graph, model: torch.LongTensor([graph.label for graph in batch_graph])

    model = GraphCNN(args.num_layers, args.num_mlp_layers, train_graphs[0].node_features.shape[1], args.hidden_dim, num_classes, args.final_dropout, args.learn_eps, args.graph_pooling_type, args.neighbor_pooling_type, args.random, node_classification, device).to(device)

    if args.opt == 'adam':
        optimizer = optim.Adam(model.parameters(), lr=args.lr)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)
    elif args.opt == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.epochs, gamma=0.5)

    for epoch in range(1, args.epochs + 1):
        scheduler.step()
        
        avg_loss = train(args, model, device, train_graphs, optimizer, criterion, get_labels, epoch)
        acc_train, acc_test = test(args, model, device, train_graphs, test_graphs, num_classes, get_labels, epoch)

        if not args.filename == "":
            with open(args.filename, 'w') as f:
                f.write("%f %f %f" % (avg_loss, acc_train, acc_test))
                f.write("\n")
        print("")

        print(model.eps)