Esempio n. 1
0
        cnt_wait = 0
        torch.save(model.state_dict(), args.save_name)
    else:
        cnt_wait += 1

    if cnt_wait == patience:
        print('Early stopping!')
        break

    loss.backward()
    optimiser.step()

print('Loading {}th epoch'.format(best_t))
model.load_state_dict(torch.load(args.save_name))

embeds, _ = model.embed(features, sp_adj if sparse else adj, sparse, None)
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]

train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)

tot = torch.zeros(1)
tot = tot.cuda()

accs = []

for _ in range(50):
    log = LogReg(hid_units, nb_classes)
Esempio n. 2
0
def main():

    saved_graph = os.path.join('assets', 'saved_graphs', 'best_dgi.pickle')
    saved_logreg = os.path.join('assets', 'saved_graphs', 'best_logreg.pickle')

    dataset = 'cora'

    # training params
    batch_size = 1
    nb_epochs = 10000
    patience = 25
    lr = 0.001
    l2_coef = 0.0
    drop_prob = 0.0
    hid_units = 512
    sparse = True
    nonlinearity = 'prelu' # special name to separate parameters

    adj, features, labels, idx_train, idx_test, idx_val = process.load_data(dataset)

    features, _ = process.preprocess_features(features)

    nb_nodes = features.shape[0]
    ft_size = features.shape[1]
    nb_classes = labels.shape[1]

    adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))

    if sparse:
        adj = process.sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features[np.newaxis])
    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])
    labels = torch.FloatTensor(labels[np.newaxis])
    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)

    print("Training Nodes: {}, Testing Nodes: {}, Validation Nodes: {}".format(len(idx_train), len(idx_test), len(idx_val)))

    model = DGI(ft_size, hid_units, nonlinearity)
    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

    if torch.cuda.is_available():
        print('Using CUDA')
        model.cuda()
        features = features.cuda()
        if sparse:
            sp_adj = sp_adj.cuda()
        else:
            adj = adj.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
        idx_test = idx_test.cuda()

    b_xent = nn.BCEWithLogitsLoss()
    xent = nn.CrossEntropyLoss()
    cant_wait = 0
    best = 1e9
    best_t = 0

    if not os.path.exists(saved_graph):
        pbar = trange(nb_epochs)
        for epoch in pbar:
            model.train()
            optimiser.zero_grad()

            idx = np.random.permutation(nb_nodes)
            shuf_fts = features[:, idx, :]

            lbl_1 = torch.ones(batch_size, nb_nodes)
            lbl_2 = torch.zeros(batch_size, nb_nodes)
            lbl = torch.cat((lbl_1, lbl_2), 1)

            if torch.cuda.is_available():
                shuf_fts = shuf_fts.cuda()
                lbl = lbl.cuda()

            logits = model(features, shuf_fts, adj, sparse, None, None, None)

            loss = b_xent(logits, lbl)

            pbar.desc = 'Loss: {:.4f}'.format(loss)

            if loss < best:
                best = loss
                best_t = epoch
                cnt_wait = 0
                torch.save(model.state_dict(), saved_graph)
            else:
                cant_wait += 1

            if cant_wait == patience:
                tqdm.write('Early stopping!')
                break

            loss.backward()
            optimiser.step()


    print('Loading {}th Epoch'.format(best_t) if best_t else 'Loading Existing Graph')
    model.load_state_dict(torch.load(saved_graph))

    embeds, _ = model.embed(features, adj, sparse, None)
    train_embs = embeds[0, idx_train]
    val_embs = embeds[0, idx_val]
    test_embs = embeds[0, idx_test]

    train_lbls = torch.argmax(labels[0, idx_train], dim=1)
    val_lbls = torch.argmax(labels[0, idx_val], dim=1)
    test_lbls = torch.argmax(labels[0, idx_test], dim=1)

    tot = torch.zeros(1)
    if torch.cuda.is_available():
        tot = tot.cuda()

    accs = []

    print("\nValidation:")
    pbar = trange(50)
    for _ in pbar:
        log = LogReg(hid_units, nb_classes)
        opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)

        pat_steps = 0
        best_acc = torch.zeros(1)
        if torch.cuda.is_available():
            log.cuda()
            best_acc = best_acc.cuda()
        for _ in range(100):
            log.train()
            opt.zero_grad()

            logits = log(train_embs)
            loss = xent(logits, train_lbls)

            loss.backward()
            opt.step()

        logits = log(test_embs)
        preds = torch.argmax(logits, dim=1)
        acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
        accs.append(acc * 100)
        pbar.desc = "Accuracy: {:.2f}%".format(100 * acc)
        tot += acc

    torch.save(log.state_dict(), saved_logreg)

    accs = torch.stack(accs)
    print('Average Accuracy: {:.2f}%'.format(accs.mean()))
    print('Standard Deviation: {:.3f}'.format(accs.std()))

    print("\nTesting")
    logits = log(val_embs)
    preds = torch.argmax(logits, dim=1)
    acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
    print("Accuracy: {:.2f}%".format(100 * acc))
Esempio n. 3
0
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()

sp_adj_ori = sp_adj.clone()
if attack_mode != 'A':
    features_ori = features.clone()

xent = nn.CrossEntropyLoss()
if args.gpu == "":
    model.load_state_dict(
        torch.load(args.model, map_location=torch.device('cpu')))
else:
    model.load_state_dict(torch.load(args.model))
print("Load model")

embeds, _ = model.embed(features, sp_adj if sparse else adj, sparse, None)
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]

train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)

tot = torch.zeros(1)
if torch.cuda.is_available():
    tot = tot.cuda()

accs = []

for _ in range(50):
Esempio n. 4
0
    if args.hinge:
        loss_ori = mi_loss(encoder, sp_adj_ori, features_ori, nb_nodes, b_xent, batch_size, sparse)
        RV = loss - loss_ori
        print("RV: {}; RV-tau: {}; MI-nature: {}; MI-worst: {}".format(RV.detach().cpu().numpy(),
                                                                       (RV - args.tau).detach().cpu().numpy(),
                                                                       loss_ori.detach().cpu().numpy(),
                                                                       loss.detach().cpu().numpy()))
        if RV - args.tau < 0:
            loss = loss_ori

    if args.show_task and epoch%5==0:
        adv = atm(sp_adj_ori, sp_A, None, n_flips, b_xent=b_xent, step_size=20,
                  eps_x=args.epsilon, step_size_x=1e-3,
                  iterations=50, should_normalize=True, random_restarts=False, make_adv=True)
        if attack_mode == 'A':
            embeds, _ = encoder.embed(features_ori, adv, sparse, None)
        elif attack_mode == 'X':
            embeds, _ = encoder.embed(adv, sp_adj_ori, sparse, None)
        elif attack_mode == 'both':
            embeds, _ = encoder.embed(adv[1], adv[0], sparse, None)
        acc_adv = task(embeds)

        embeds, _ = encoder.embed(features_ori, sp_adj_ori, sparse, None)
        acc_nat = task(embeds)

        print('Epoch:{} Step_size: {:.4f} Loss:{:.4f} Natural_Acc:{:.4f} Adv_Acc:{:.4f}'.format(
            epoch, step_size, loss.detach().cpu().numpy(), acc_nat, acc_adv))
    else:
        print('Epoch:{} Step_size: {:.4f} Loss:{:.4f}'.format(epoch, step_size, loss.detach().cpu().numpy()))

    if loss < best:
Esempio n. 5
0
    print("wait: " + str(cnt_wait))
    if cnt_wait == patience:
        print('Early stopping!')
        break

    loss.backward()
    optimiser.step()

print('Loading {}th epoch'.format(best_t))

model.load_state_dict(
    torch.load(
        str('best_dgi_head_' + str(args.nb_heads) + '_nhidden_' +
            str(args.hidden) + '_exp_' + str(exp) + '.pkl')))

embeds, _ = model.embed(features, nor_adjs, sparse, None)
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]

train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)

tot = torch.zeros(1)
if args.cuda:
    tot = tot.cuda()
tot_mac = 0
accs = []
mac_f1 = []
Esempio n. 6
0
for epoch in range(nb_epochs):
    model.train()
    optimiser.zero_grad()

    idx = np.random.permutation(nb_nodes)
    shuf_fts = features[:, idx, :]

    lbl_1 = torch.ones(batch_size, nb_nodes)
    lbl_2 = torch.zeros(batch_size, nb_nodes)
    lbl = torch.cat((lbl_1, lbl_2), 1)

    logits = model(features, shuf_fts, adj, sparse, None, None, None)
    loss = b_xent(logits, lbl)

    embed, _ = model.embed(features, adj, sparse, None)
    pred = kmeans.fit_predict(embed.squeeze(0).data.numpy())

    # logits = model(features, adj, sparse)
    # embed = model.embed(features, adj, sparse)
    # loss = b_xent(logits, adj)

    # pred = kmeans.fit_predict(embed.data.numpy())

    cm = metrics.clustering_metrics(labels.tolist(), pred.tolist())
    acc, nmi, ari = cm.Evaluation_Cluster_Model_From_Label()
    mod = metrics.get_modularity(pred.tolist(), g)
    print(
        "epoch={:.4f} loss={:.4f} nmi={:.4f} ari={:.4f} acc={:.4f} mod={:.4f}".
        format(epoch, loss, nmi, ari, acc, mod))
Esempio n. 7
0
def process_transductive(dataset, gnn_type='GCNConv', K=None, random_init=False, runs=10, drop_sigma=False, just_plot=False):
    dataset_str = dataset
    norm_features = torch_geometric.transforms.NormalizeFeatures()
    dataset = Planetoid("./geometric_datasets"+'/'+dataset,
                        dataset,
                        transform=norm_features)[0]

    # training params
    batch_size = 1 # Transductive setting
    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    xent = nn.CrossEntropyLoss()
    l2_coef = hyperparameters["l2_coef"]
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]

    nb_nodes = dataset.x.shape[0]
    ft_size = dataset.x.shape[1]
    nb_classes = torch.max(dataset.y).item()+1 # 0 based cnt
    features = dataset.x
    labels = dataset.y
    edge_index = dataset.edge_index
    edge_index, _ = torch_geometric.utils.add_remaining_self_loops(edge_index)

    mask_train = dataset.train_mask
    mask_val = dataset.val_mask
    mask_test = dataset.test_mask

    model_name = get_model_name(dataset_str, gnn_type, K, random_init=random_init, drop_sigma=drop_sigma)
    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        pass

    accs = []

    for i in range(runs): 
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, K=K, drop_sigma=drop_sigma)
        print(model, model_name, drop_sigma)
        optimiser = torch.optim.Adam(model.parameters(), lr=lr)

        if torch.cuda.is_available():
            print('Using CUDA')
            features = features.cuda()
            labels = labels.cuda()
            edge_index = edge_index.cuda()
            mask_train = mask_train.cuda()
            mask_val = mask_val.cuda()
            mask_test = mask_test.cuda()
            model = model.cuda()

        best_t = train_transductive(dataset, dataset_str, edge_index, gnn_type, model_name, K=K, random_init=random_init, drop_sigma=drop_sigma)

        xent = nn.CrossEntropyLoss()
        print('Loading {}th epoch'.format(best_t))
        print(model, model_name)
        if not random_init:
            model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()

        embeds, _ = model.embed(features, edge_index, None, standardise=False)
        if just_plot:
            plot_tsne(embeds, labels, model_name)
            exit(0)
        train_embs = embeds[mask_train, :]
        val_embs = embeds[mask_val, :]
        test_embs = embeds[mask_test, :]

        train_lbls = labels[mask_train]
        val_lbls = labels[mask_val]
        test_lbls = labels[mask_test]

        tot = torch.zeros(1)
        tot = tot.cuda()

        for _ in range(50):
            log = LogReg(hid_units, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best_acc = torch.zeros(1)
            best_acc = best_acc.cuda()
            for _ in range(150):
                log.train()
                opt.zero_grad()

                logits = log(train_embs)
                loss = xent(logits, train_lbls)
                
                loss.backward()
                opt.step()

            logits = log(test_embs)
            preds = torch.argmax(logits, dim=1)
            acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
            accs.append(acc * 100)
            print(acc)
            tot += acc

        print('Average accuracy:', tot / 50)
        
    all_accs = torch.stack(accs, dim=0)
    with open("./results/"+model_name[:-4]+"_results.txt", "a+") as f:
        f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())+'\n'])

    print(all_accs.mean())
    print(all_accs.std())
        cnt_wait = 0
        torch.save(model.state_dict(), 'best_dgi.pkl')
    else:
        cnt_wait += 1

    if cnt_wait == patience:
        print('Early stopping!')
        break

    loss.backward()
    optimiser.step()

print('Loading {}th epoch'.format(best_t))
model.load_state_dict(torch.load('best_dgi.pkl'))

embeds, _ = model.embed(features, sp_nor_adjs if sparse else nor_adjs, sparse,
                        None)
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]

train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)

tot = torch.zeros(1)
tot = tot.cuda()
tot_mac = 0
accs = []
mac_f1 = []

for _ in range(5):