示例#1
0
    def load_data(self):
        self.args = get_args()
        self.args.inductive = self.args.dataset in ['reddit', 'ppi']
        self.args.sigmoid = self.args.dataset in ['ppi']

        start_time = perf_counter()
        if not self.args.inductive:
            self.graph, self.feats, self.labels, self.label_max, self.idx, _ = load_citation(
                self.args.dataset)
        else:
            self.graph, self.graph_test, self.feats, self.labels, self.label_max, self.idx = load_inductive_dataset(
                self.args.dataset)
        print("Data load and preprocess done: {:.4f}s".format(perf_counter() -
                                                              start_time))
示例#2
0
parser.add_argument('--order_1', type=int, default=1)
parser.add_argument('--sct_inx1', type=int, default=1)
parser.add_argument('--order_2', type=int, default=1)
parser.add_argument('--sct_inx2', type=int, default=3)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
#adj, features, labels, idx_train, idx_val, idx_test = load_data()
adj, A_tilde, adj_sct1, adj_sct2, adj_sct4, features, labels, idx_train, idx_val, idx_test = load_citation(
    args.dataset, args.normalization, args.cuda)
# Model and optimizer
model = GCN(nfeat=features.shape[1],
            para3=args.hid1,
            para4=args.hid2,
            nclass=labels.max().item() + 1,
            dropout=args.dropout,
            smoo=args.smoo)

PATH = "state_dict_model.pt"
model.load_state_dict(torch.load(PATH))
if args.cuda:
    model = model.cuda()
    features = features.cuda()
    A_tilde = A_tilde.cuda()
    adj = adj.cuda()
示例#3
0
# Arguments
args = get_citation_args()

if args.tuned:
    if args.model == "SGC":
        with open("{}-tuning/{}.txt".format(args.model, args.dataset),
                  'rb') as f:
            args.weight_decay = pkl.load(f)['weight_decay']
            print("using tuned weight decay: {}".format(args.weight_decay))
    else:
        raise NotImplemented

# setting random seeds
set_seed(args.seed, args.cuda)

adj, features, labels, idx_train, idx_val, idx_test = load_citation(
    args.dataset, args.normalization, args.cuda)

model = get_model(args.model, features.size(1),
                  labels.max().item() + 1, args.hidden, args.dropout,
                  args.cuda)

if args.model == "SGC":
    features, precompute_time = sgc_precompute(features, adj, args.degree,
                                               args.alpha)
print("{:.4f}s".format(precompute_time))


def train_regression(model,
                     train_features,
                     train_labels,
                     val_features,
示例#4
0
def train():
    acc_test_list = []
    for ii in range(args.run_time):
        #seed = gen_seeds()
        seed = args.seed
        print("dataset:{}, epochs:{}, weight_decay:{},lr:{},dropout:{},seed:{}, alpha:{}, features_perturbation: rate1:{},lambda1:{}; adj_pertubation: rate2:{},lambda2:{}".format(
            args.dataset, args.epochs, args.weight_decay, args.lr, args.dropout, seed, args.alpha, args.rate1, args.lambda1, args.rate2,args.lambda2))
        args.cuda = not args.no_cuda and torch.cuda.is_available()

        np.random.seed(seed)
        torch.manual_seed(seed)
        if args.cuda:
            torch.cuda.manual_seed(seed)
        #torch.cuda.manual_seed(seed)

        if args.dataset == "R8":
             adj, features, labels, idx_train, idx_val, idx_test = load_corpus(args.dataset, args.normalization,args.cuda)
        else:
            adj, features, labels, idx_train, idx_val, idx_test, indices = load_citation(args.dataset, args.normalization, args.cuda)

        model = GCN(nfeat=features.shape[1],
                    nhid=args.hidden,
                    nclass=labels.max().item() + 1,
                    dropout=args.dropout)
        if args.cuda:
            model.cuda()
            features = features.cuda()
            adj = adj.cuda()
            labels = labels.cuda()
            idx_train = idx_train.cuda()
            idx_val = idx_val.cuda()
            idx_test = idx_test.cuda()

        optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)




        global x,y,z
        x,y,z=0,0,0
        win = vis.line(X=np.array([x]),
                       Y=np.array([y]),
                       opts=dict(title='loss_CE'))
        global b_1,b_2
        b_0,b_1,b_2=0,0,0
        win_b0 = vis.line(X=np.array([x]),
                          Y=np.array([b_0]),
                          opts=dict(title='b'))
        def train(epoch):
            t = time.time()
            model.train()
            optimizer.zero_grad()
            output = model(features, adj)
            output = F.log_softmax(output, dim=1)
            loss_CE = F.nll_loss(output[idx_train], labels[idx_train])
            #l2_reg = sum(torch.sum(param ** 2) for param in model.reg_params)
            acc_train = accuracy(output[idx_train], labels[idx_train])
            x = epoch
            y = loss_CE.detach().cpu().numpy()
            vis.line(X=np.array([x]),
                     Y=np.array([y]),
                     win=win,
                     update='append')

            #loss_train = loss_CE + args.weight_decay /2 *l2_reg
            loss_train = loss_CE
            loss_train.backward()
            optimizer.step()

            if not args.fastmode:
                model.eval()
                output = model(features, adj)
                output = F.log_softmax(output, dim=1)
            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = accuracy(output[idx_val], labels[idx_val])

            if ii == 0:
                if epoch%10==0:

                    print('Epoch: {:04d}'.format(epoch + 1),
                              'loss_train: {:.4f}'.format(loss_train.item()),
                              'loss_CE: {:.4f}'.format(loss_CE.item()),
                              'acc_train: {:.4f}'.format(acc_train.item()),
                              #'loss_0: {:.4f}'.format(loss_0.item()),

                              # 'loss_fx: {:.4f}'.format(loss_fx.item()),
                              # 'loss_logfx: {:.4f}'.format(loss_logfx.item()),
                              # 'acc_train: {:.4f}'.format(acc_train.item()),
                              'loss_val: {:.4f}'.format(loss_val.item()),
                              'acc_val: {:.4f}'.format(acc_val.item()),
                              'time: {:.4f}s'.format(time.time() - t))
            return loss_val.item()
        def test():
            model.eval()

            output = model(features,adj)

            output = F.log_softmax(output, dim=1)
            loss_test = F.nll_loss(output[idx_test], labels[idx_test])
            acc_test = accuracy(output[idx_test], labels[idx_test])
            print("Test set results:",
              "loss_test= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))
            return acc_test

        t_total = time.time()
        loss_values = []
        # bad_counter = 0
        # best = args.epochs+1
        # best_epoch=0
        # for epoch in range(args.epochs):
        #
        #     loss_values.append(train(epoch))
        #     torch.save(model.state_dict(), './checkpoints/{}/{}.pkl'.format(args.dataset, epoch))
        #     if loss_values[-1] < best:
        #         best = loss_values[-1]
        #         best_epoch = epoch
        #         bad_counter = 0
        #     else:
        #         bad_counter += 1
        #     if bad_counter == args.patience:
        #         break
        #     files = glob.glob('./checkpoints/{}/*.pkl'.format(args.dataset))  # =[/checkpoints/{}/{}/0.pkl]
        #     for file in files:
        #         epoch_nb = int(file.split('.')[-2].split('/')[-1])
        #         if epoch_nb < best_epoch:
        #             os.remove(file)
        # files = glob.glob('./checkpoints/{}/*.pkl'.format(args.dataset))
        # for file in files:
        #     epoch_nb = int(file.split('.')[-2].split('/')[-1])
        #     if epoch_nb > best_epoch:
        #         os.remove(file)
        # print("Optimization Finished!")
        # print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
        #
        # # Testing
        # print('Loading {}th epoch'.format(best_epoch))
        # model.load_state_dict(torch.load('./checkpoints/{}/{}.pkl'.format(args.dataset, best_epoch)))
        for epoch in range(args.epochs):
            loss_values.append(train(epoch))
            if epoch>args.early_stop and loss_values[-1] > np.mean(loss_values[-(args.early_stop+1):-1]):
                print("Early stopping...")
                break
            #train(epoch)
        print("Optimization Finished!")
        print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
        acc_test = test()
        acc_test_list.append(acc_test)
        acc_test = acc_test.view(1, 1)
        acc_test = acc_test.cpu().numpy()

        with open("./results/{}/{}.txt".format(args.dataset, args.dataset), 'a') as f:
            # f.write("不丢弃dropout,加上b= {:.07f}×(output-output_1),epoch : {:04d},weight_decacy={},系数lam ={:.07f}".format
            f.write("dataset{} epoch : {:04d},weight_decacy={}, lr{},seed{},dropout{},features_perturbation: rate1:{},lambda1:{}; adj_pertubation: rate2:{},lambda2:{}".format
                    (args.dataset, args.epochs, args.weight_decay, args.lr, seed, args.dropout,args.rate1,args.lambda1,args.rate2,args.lambda2))
            np.savetxt(f, acc_test, fmt="%.6f")
    acc_test_list = torch.FloatTensor(acc_test_list)
    acc_test_std = torch.std(acc_test_list)
    avg_test = torch.mean(acc_test_list)

    avg_test = avg_test.view(1, 1)
    avg_test = avg_test.cpu().numpy()
    acc_test_std = acc_test_std.view(1, 1)
    acc_test_std = acc_test_std.cpu().numpy()
    print("总共做类{}次实验,平均值为:{:.04f}".format(args.run_time, avg_test.item()))
    print("总共做类{}次实验,误差值为:{:.04f}".format(args.run_time,acc_test_std.item()))
    with open("./results/{}/{}.txt".format(args.dataset, args.dataset), 'a') as f:
        f.write("总共做类{}次实验,平均值为:{:.04f}\n".format(args.run_time, avg_test.item()))
        f.write("总共做类{}次实验,误差值为:{:.04f}\n".format(args.run_time, acc_test_std.item()))
示例#5
0
def main():
    args = get_citation_args()
    n_way = args.n_way
    train_shot = args.train_shot
    test_shot = args.test_shot
    step = args.step
    node_num = args.node_num
    iteration = args.iteration

    accuracy_meta_test = []
    total_accuracy_meta_test = []

    set_seed(args.seed, args.cuda)

    adj, features, labels = load_citation(args.dataset, args.normalization,
                                          args.cuda)

    if args.dataset == 'cora':
        class_label = [0, 1, 2, 3, 4, 5, 6]
        combination = list(combinations(class_label, n_way))
    elif args.dataset == 'citeseer':
        node_num = 3327
        iteration = 15
        class_label = [0, 1, 2, 3, 4, 5]
        combination = list(combinations(class_label, n_way))

    if args.model == 'SGC':
        features = sgc_precompute(features, adj, args.degree)

    for i in range(len(combination)):
        print('Cross_Validation: ', i + 1)
        test_label = list(combination[i])
        train_label = [n for n in class_label if n not in test_label]
        print('Cross_Validation {} Train_Label_List {}: '.format(
            i + 1, train_label))
        print('Cross_Validation {} Test_Label_List {}: '.format(
            i + 1, test_label))
        model = get_model(args.model, features.size(1), n_way, args.cuda)

        for j in range(iteration):
            labels_local = labels.clone().detach()
            select_class = random.sample(train_label, n_way)
            print('Cross_Validation {} ITERATION {} Train_Label: {}'.format(
                i + 1, j + 1, select_class))
            class1_idx = []
            class2_idx = []
            for k in range(node_num):
                if (labels_local[k] == select_class[0]):
                    class1_idx.append(k)
                    labels_local[k] = 0
                elif (labels_local[k] == select_class[1]):
                    class2_idx.append(k)
                    labels_local[k] = 1
            for m in range(step):
                class1_train = random.sample(class1_idx, train_shot)
                class2_train = random.sample(class2_idx, train_shot)
                class1_test = [
                    n1 for n1 in class1_idx if n1 not in class1_train
                ]
                class2_test = [
                    n2 for n2 in class2_idx if n2 not in class2_train
                ]
                train_idx = class1_train + class2_train
                random.shuffle(train_idx)
                test_idx = class1_test + class2_test
                random.shuffle(test_idx)

                model = train_regression(model, features[train_idx],
                                         labels_local[train_idx], args.epochs,
                                         args.weight_decay, args.lr)
                acc_query = test_regression(model, features[test_idx],
                                            labels_local[test_idx])
                reset_array()

        torch.save(model.state_dict(), 'model.pkl')

        labels_local = labels.clone().detach()
        select_class = random.sample(test_label, 2)
        class1_idx = []
        class2_idx = []
        reset_array()
        print('Cross_Validation {} Test_Label {}: '.format(
            i + 1, select_class))

        for k in range(node_num):
            if (labels_local[k] == select_class[0]):
                class1_idx.append(k)
                labels_local[k] = 0
            elif (labels_local[k] == select_class[1]):
                class2_idx.append(k)
                labels_local[k] = 1

        for m in range(step):
            class1_train = random.sample(class1_idx, test_shot)
            class2_train = random.sample(class2_idx, test_shot)
            class1_test = [n1 for n1 in class1_idx if n1 not in class1_train]
            class2_test = [n2 for n2 in class2_idx if n2 not in class2_train]
            train_idx = class1_train + class2_train
            random.shuffle(train_idx)
            test_idx = class1_test + class2_test
            random.shuffle(test_idx)

            model_meta_trained = get_model(args.model, features.size(1), n_way,
                                           args.cuda).cuda()
            model_meta_trained.load_state_dict(torch.load('model.pkl'))

            model_meta_trained = train_regression(model_meta_trained,
                                                  features[train_idx],
                                                  labels_local[train_idx],
                                                  args.epochs,
                                                  args.weight_decay, args.lr)
            acc_test = test_regression(model_meta_trained, features[test_idx],
                                       labels_local[test_idx])
            accuracy_meta_test.append(acc_test)
            total_accuracy_meta_test.append(acc_test)
            reset_array()
        if args.dataset == 'cora':
            with open('cora.txt', 'a') as f:
                f.write('Cross_Validation: {} Meta-Test_Accuracy: {}'.format(
                    i + 1,
                    torch.tensor(accuracy_meta_test).numpy().mean()))
                f.write('\n')
        elif args.dataset == 'citeseer':
            with open('citeseer.txt', 'a') as f:
                f.write('Cross_Validation: {} Meta-Test_Accuracy: {}'.format(
                    i + 1,
                    torch.tensor(accuracy_meta_test).numpy().mean()))
                f.write('\n')
        accuracy_meta_test = []
    if args.dataset == 'cora':
        with open('cora.txt', 'a') as f:
            f.write('Dataset: {}, Train_Shot: {}, Test_Shot: {}'.format(
                args.dataset, train_shot, test_shot))
            f.write('\n')
            f.write('Total_Meta-Test_Accuracy: {}'.format(
                torch.tensor(total_accuracy_meta_test).numpy().mean()))
            f.write('\n')
            f.write('\n\n\n')
    elif args.dataset == 'citeseer':
        with open('citeseer.txt', 'a') as f:
            f.write('Dataset: {}, Train_Shot: {}, Test_Shot: {}'.format(
                args.dataset, train_shot, test_shot))
            f.write('\n')
            f.write('Total_Meta-Test_Accuracy: {}'.format(
                torch.tensor(total_accuracy_meta_test).numpy().mean()))
            f.write('\n')
            f.write('\n\n\n')
示例#6
0
if args.tuned:
    if args.model == "SGC" or args.model == "KGCN":
        with open("{}-tuning/{}.txt".format(args.model, args.dataset), 'rb') as f:
            args.weight_decay = pkl.load(f)['weight_decay']
            print("using tuned weight decay: {}".format(args.weight_decay))
    else:
        raise NotImplemented

# setting random seeds
set_seed(args.seed, args.cuda)

adj, features, labels, idx_train,\
idx_val, idx_test = load_citation(args.dataset,
                                  args.normalization,
                                  args.cuda,
                                  args.invlap_alpha,
                                  args.shuffle)

### NOISE TO FEATURES
if args.noise != "None":
    features = features.numpy()

if args.noise == "gaussian":
    features = gaussian(features,
                        mean=args.gaussian_opt[0],
                        std=args.gaussian_opt[1])
if args.noise == "gaussian_mimic":
    features = gaussian_mimic(features)
if args.noise == "add_gaussian":
    features = gaussian(features, 
示例#7
0
parser.add_argument('--dev', type=int, default=0, help='device id')
parser.add_argument('--alpha', type=float, default=0.1, help='decay factor')
parser.add_argument('--rmax', type=float, default=1e-5, help='threshold.')
parser.add_argument('--rrz', type=float, default=0.0, help='r.')
parser.add_argument('--bias', default='none', help='bias.')
parser.add_argument('--batch', type=int, default=64, help='batch size')
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)

print("--------------------------")
print(args)

features, labels, idx_train, idx_val, idx_test = load_citation(
    args.data, args.alpha, args.rmax, args.rrz)

checkpt_file = 'pretrained/' + uuid.uuid4().hex + '.pt'

model = GnnBP(nfeat=features.shape[1],
              nlayers=args.layer,
              nhidden=args.hidden,
              nclass=int(labels.max()) + 1,
              dropout=args.dropout,
              bias=args.bias).cuda(args.dev)

optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

loss_fn = nn.CrossEntropyLoss()
示例#8
0
    train_time = perf_counter() - start_time
    return model, train_time


def test_model(model, feats, labels, test_idx):
    start_time = perf_counter()
    model.eval()
    output = model(feats)
    acc_mic, acc_mac = calc_f1(labels[test_idx], output[test_idx])
    test_time = perf_counter() - start_time
    return acc_mic, acc_mac, test_time


start_time = perf_counter()
if not args.inductive:
    graph, feats, labels, label_max, idx, _ = load_citation(args.dataset)
else:
    graph, graph_test, feats, labels, label_max, idx = load_inductive_dataset(
        args.dataset)

trial = 10
acc_acc = 0
acc_train_time = 0
acc_test_time = 0
for _ in range(trial):
    model = get_auto_model(feats.size(1), graph, args.hidden_dim,
                           args.step_num, args.sample_num, args.nonlinear,
                           args.aggregator, label_max, args.dropout)

    model, train_time = train_model(model, feats, labels, idx["train"],
                                    idx["val"], args.epochs, args.weight_decay,
示例#9
0
import os
print(os.getcwd())

if not args.non_tuned:
    if args.model == "SGC":
        with open("{}-tuning/{}.txt".format(args.model, args.dataset),
                  'rb') as f:
            args.weight_decay = pkl.load(f)['weight_decay']
            print("using tuned weight decay: {}".format(args.weight_decay))
    else:
        raise NotImplemented

# setting random seeds
set_seed(args.seed, args.cuda)

adj, features, labels, idx_train, idx_val, idx_test = load_citation(
    args.dataset, args.normalization, args.cuda, args.modified, args.attacked)

model = get_model(args.model, features.size(1),
                  labels.max().item() + 1, args.hidden, args.dropout,
                  args.cuda)

if args.model == "SGC":
    features, precompute_time = sgc_precompute(features, adj, args.degree)
print("{:.4f}s".format(precompute_time))
model_file = 'model_save/' + args.dataset + '.pkl'


def train_regression(model,
                     train_features,
                     train_labels,
                     val_features,
示例#10
0
文件: sgc.py 项目: kpzhang/Meta-GNN
def main(args):
    step = args.step
    set_seed(args.seed)

    adj, features, labels = load_citation(args.dataset, args.normalization)

    features = sgc_precompute(features, adj, args.degree)

    if args.dataset == 'citeseer':
        node_num = 3327
        class_label = [0, 1, 2, 3, 4, 5]
        combination = list(combinations(class_label, 2))
    elif args.dataset == 'cora':
        node_num = 2708
        class_label = [0, 1, 2, 3, 4, 5, 6]
        combination = list(combinations(class_label, 2))

    config = [('linear', [args.hidden, features.size(1)]),
              ('linear', [args.n_way, args.hidden])]

    device = torch.device('cuda')

    for i in range(len(combination)):
        print("Cross Validation: {}".format((i + 1)))

        maml = Meta(args, config).to(device)

        test_label = list(combination[i])
        train_label = [n for n in class_label if n not in test_label]
        print('Cross Validation {} Train_Label_List: {} '.format(
            i + 1, train_label))
        print('Cross Validation {} Test_Label_List: {} '.format(
            i + 1, test_label))

        for j in range(args.epoch):
            x_spt, y_spt, x_qry, y_qry = sgc_data_generator(
                features, labels, node_num, train_label, args.task_num,
                args.n_way, args.k_spt, args.k_qry)
            accs = maml.forward(x_spt, y_spt, x_qry, y_qry)
            print('Step:', j, '\tMeta_Training_Accuracy:', accs)
            if j % 100 == 0:
                torch.save(maml.state_dict(), 'maml.pkl')
                meta_test_acc = []
                for k in range(step):
                    model_meta_trained = Meta(args, config).to(device)
                    model_meta_trained.load_state_dict(torch.load('maml.pkl'))
                    model_meta_trained.eval()
                    x_spt, y_spt, x_qry, y_qry = sgc_data_generator(
                        features, labels, node_num, test_label, args.task_num,
                        args.n_way, args.k_spt, args.k_qry)
                    accs = model_meta_trained.forward(x_spt, y_spt, x_qry,
                                                      y_qry)
                    meta_test_acc.append(accs)
                if args.dataset == 'citeseer':
                    with open('citeseer.txt', 'a') as f:
                        f.write(
                            'Cross Validation:{}, Step: {}, Meta-Test_Accuracy: {}'
                            .format(
                                i + 1, j,
                                np.array(meta_test_acc).mean(axis=0).astype(
                                    np.float16)))
                        f.write('\n')
                elif args.dataset == 'cora':
                    with open('cora.txt', 'a') as f:
                        f.write(
                            'Cross Validation:{}, Step: {}, Meta-Test_Accuracy: {}'
                            .format(
                                i + 1, j,
                                np.array(meta_test_acc).mean(axis=0).astype(
                                    np.float16)))
                        f.write('\n')
示例#11
0
                    action='store_true',
                    default=False,
                    help='GCN* model.')
parser.add_argument('--test',
                    action='store_true',
                    default=False,
                    help='evaluation on test set.')
args = parser.parse_args()
print(args)

support00 = np.load('supports/' + args.data + '_support00.npy')
support01 = np.load('supports/' + args.data + '_support01.npy')
support10 = np.load('supports/' + args.data + '_support10.npy')
support11 = np.load('supports/' + args.data + '_support11.npy')

adj, features, labels, idx_train, idx_val, idx_test = load_citation(args.data)
cudaid = "cuda:" + str(args.dev)
device = torch.device(cudaid)
features = features.to(device)
adj = adj.to(device)
support0 = torch.sparse_coo_tensor(support00, support01,
                                   (adj.shape[0], adj.shape[0]))
support1 = torch.sparse_coo_tensor(support10, support11,
                                   (adj.shape[0], adj.shape[0]))
support0 = support0.to(device).to(torch.float32)
support1 = support1.to(device).to_dense().to(torch.float32)


def build_and_train(hype_space):
    modelname = 'nof'
    modeltype = {
示例#12
0
normalization_choices = [
    '',
    'AugNormAdj',
    'LeftNorm',
    'InvLap',
    'CombLap',
    'SymNormAdj',
    'SymNormLap'
]


args = get_feat_args()

adj, features, labels, idx_train,\
    idx_val, idx_test = load_citation(args.dataset, 
                                      normalization=args.normalization, 
                                      cuda=False) 


model = get_model(model_opt=args.model,
                  nfeat=features.size(1),
                  nclass=labels.max().item()+1,
                  nhid=args.hidden,
                  dropout=args.dropout,
                  cuda=False)


# TODO: Calculate time here
features = features.numpy()
transformer = fetch_transformation(args.preprocess)
forward, invert, evals = transformer(adj.to_dense())
示例#13
0
from models import get_model
from utils import sgc_precompute, load_citation, set_seed
from args import get_citation_args
import torch
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials

# Arguments
args = get_citation_args()

# setting random seeds
set_seed(args.seed, args.cuda)

# Hyperparameter optimization
space = {'weight_decay' : hp.loguniform('weight_decay', log(1e-10), log(1e-4))}

adj, adj_dist, features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, args.normalization, args.cuda, gamma=args.gamma,degree=args.degree, L=args.L, K=args.K)
if args.model != "GCN":
    features, precompute_time = sgc_precompute(features, adj, adj_dist, args.degree, args.concat, args.L, args.K, idx_train, idx_val, idx_test)
def sgc_objective(space):
    if args.K:
        model = get_model(args.model, features[0][0].size(1), labels.max().item()+1, args.hidden, args.decay, args.L, args.K, args.dropout, args.cuda)
    else:
        model = get_model(args.model, features.size(1), labels.max().item()+1, args.hidden, args.decay, args.L, args.K, args.dropout, args.cuda)
    if args.model != 'GCN':
        if args.K:
            model, acc_val, _, _ = train_regression(model, features[0], labels[idx_train], features[1], labels[idx_val],  features[2], labels[idx_test], idx_test, adj,
                                      args.epochs, space['weight_decay'], args.lr, args.dropout)
        else:
            model, acc_val, _, _ = train_regression(model, features[idx_train], labels[idx_train], features[idx_val], labels[idx_val],  features[idx_test], labels[idx_test], idx_test, adj,
                                      args.epochs, space['weight_decay'], args.lr, args.dropout)
    else:
示例#14
0
文件: citation.py 项目: ostapen/SGC
# Arguments
args = get_citation_args()

if args.tuned:
    if args.model == "SGC":
        with open("{}-tuning/{}.txt".format(args.model, args.dataset),
                  'rb') as f:
            args.weight_decay = pkl.load(f)['weight_decay']
            print("using tuned weight decay: {}".format(args.weight_decay))
    else:
        raise NotImplemented

# setting random seeds
set_seed(args.seed, args.cuda)

adj, train_adj, val_adj, features, labels, idx_train, idx_val, idx_test = load_citation(
    args.dataset, args.normalization, args.cuda, args.load_bigger_train)

model = get_model(args.model, features.size(1),
                  labels.max().item() + 1, args.hidden, args.dropout,
                  args.cuda)

if args.model == "SGC":
    features, precompute_time = sgc_precompute(features, adj, args.degree)
print("{:.4f}s".format(precompute_time))


def train_regression(model,
                     train_features,
                     train_labels,
                     val_features,
                     val_labels,