Exemplo n.º 1
0
def main():
    args, unknown = parse_args()

    if args.embedder == 'DMGI':
        from models import DMGI
        embedder = DMGI(args)
    elif args.embedder == 'DGI':
        from models import DGI
        embedder = DGI(args)

    embedder.training()
Exemplo n.º 2
0
------------------------------------------------------------
'''
'''
------------------------------------------------------------
'''
if not sparse:
    adj = torch.FloatTensor(adj[np.newaxis])
    aug_adj1 = torch.FloatTensor(aug_adj1[np.newaxis])
    aug_adj2 = torch.FloatTensor(aug_adj2[np.newaxis])

labels = torch.FloatTensor(labels[np.newaxis])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)

model = DGI(ft_size, hid_units, nonlinearity)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

if torch.cuda.is_available():
    print('Using CUDA')
    model.cuda()
    features = features.cuda()
    aug_features1 = aug_features1.cuda()
    aug_features2 = aug_features2.cuda()
    if sparse:
        sp_adj = sp_adj.cuda()
        sp_aug_adj1 = sp_aug_adj1.cuda()
        sp_aug_adj2 = sp_aug_adj2.cuda()
    else:
        adj = adj.cuda()
        aug_adj1 = aug_adj1.cuda()
Exemplo n.º 3
0
    # adj_tack = process.sparse_mx_to_torch_sparse_tensor(adj_tack)
else:
    adj = (adj + sp.eye(adj.shape[0])).todense()

features = torch.FloatTensor(features[np.newaxis])
# features_tack = torch.FloatTensor(features_tack[np.newaxis])
if not sparse:
    adj = torch.FloatTensor(adj[np.newaxis])
labels = torch.FloatTensor(labels[np.newaxis])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)

model = DGI(ft_size,
            hid_units,
            nonlinearity,
            critic=args.critic,
            dataset=dataset)
model.eval()

if torch.cuda.is_available():
    print('Using CUDA')
    model.cuda()
    features = features.cuda()
    # features_tack = features_tack.cuda()
    if sparse:
        sp_adj = sp_adj.cuda()
        sp_A = sp_A.cuda()
        # adj_tack = adj_tack.cuda()
    else:
        adj = adj.cuda()
Exemplo n.º 4
0
def main():

    saved_graph = os.path.join('assets', 'saved_graphs', 'best_dgi.pickle')
    saved_logreg = os.path.join('assets', 'saved_graphs', 'best_logreg.pickle')

    dataset = 'cora'

    # training params
    batch_size = 1
    nb_epochs = 10000
    patience = 25
    lr = 0.001
    l2_coef = 0.0
    drop_prob = 0.0
    hid_units = 512
    sparse = True
    nonlinearity = 'prelu' # special name to separate parameters

    adj, features, labels, idx_train, idx_test, idx_val = process.load_data(dataset)

    features, _ = process.preprocess_features(features)

    nb_nodes = features.shape[0]
    ft_size = features.shape[1]
    nb_classes = labels.shape[1]

    adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))

    if sparse:
        adj = process.sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features[np.newaxis])
    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])
    labels = torch.FloatTensor(labels[np.newaxis])
    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)

    print("Training Nodes: {}, Testing Nodes: {}, Validation Nodes: {}".format(len(idx_train), len(idx_test), len(idx_val)))

    model = DGI(ft_size, hid_units, nonlinearity)
    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

    if torch.cuda.is_available():
        print('Using CUDA')
        model.cuda()
        features = features.cuda()
        if sparse:
            sp_adj = sp_adj.cuda()
        else:
            adj = adj.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
        idx_test = idx_test.cuda()

    b_xent = nn.BCEWithLogitsLoss()
    xent = nn.CrossEntropyLoss()
    cant_wait = 0
    best = 1e9
    best_t = 0

    if not os.path.exists(saved_graph):
        pbar = trange(nb_epochs)
        for epoch in pbar:
            model.train()
            optimiser.zero_grad()

            idx = np.random.permutation(nb_nodes)
            shuf_fts = features[:, idx, :]

            lbl_1 = torch.ones(batch_size, nb_nodes)
            lbl_2 = torch.zeros(batch_size, nb_nodes)
            lbl = torch.cat((lbl_1, lbl_2), 1)

            if torch.cuda.is_available():
                shuf_fts = shuf_fts.cuda()
                lbl = lbl.cuda()

            logits = model(features, shuf_fts, adj, sparse, None, None, None)

            loss = b_xent(logits, lbl)

            pbar.desc = 'Loss: {:.4f}'.format(loss)

            if loss < best:
                best = loss
                best_t = epoch
                cnt_wait = 0
                torch.save(model.state_dict(), saved_graph)
            else:
                cant_wait += 1

            if cant_wait == patience:
                tqdm.write('Early stopping!')
                break

            loss.backward()
            optimiser.step()


    print('Loading {}th Epoch'.format(best_t) if best_t else 'Loading Existing Graph')
    model.load_state_dict(torch.load(saved_graph))

    embeds, _ = model.embed(features, adj, sparse, None)
    train_embs = embeds[0, idx_train]
    val_embs = embeds[0, idx_val]
    test_embs = embeds[0, idx_test]

    train_lbls = torch.argmax(labels[0, idx_train], dim=1)
    val_lbls = torch.argmax(labels[0, idx_val], dim=1)
    test_lbls = torch.argmax(labels[0, idx_test], dim=1)

    tot = torch.zeros(1)
    if torch.cuda.is_available():
        tot = tot.cuda()

    accs = []

    print("\nValidation:")
    pbar = trange(50)
    for _ in pbar:
        log = LogReg(hid_units, nb_classes)
        opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)

        pat_steps = 0
        best_acc = torch.zeros(1)
        if torch.cuda.is_available():
            log.cuda()
            best_acc = best_acc.cuda()
        for _ in range(100):
            log.train()
            opt.zero_grad()

            logits = log(train_embs)
            loss = xent(logits, train_lbls)

            loss.backward()
            opt.step()

        logits = log(test_embs)
        preds = torch.argmax(logits, dim=1)
        acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
        accs.append(acc * 100)
        pbar.desc = "Accuracy: {:.2f}%".format(100 * acc)
        tot += acc

    torch.save(log.state_dict(), saved_logreg)

    accs = torch.stack(accs)
    print('Average Accuracy: {:.2f}%'.format(accs.mean()))
    print('Standard Deviation: {:.3f}'.format(accs.std()))

    print("\nTesting")
    logits = log(val_embs)
    preds = torch.argmax(logits, dim=1)
    acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
    print("Accuracy: {:.2f}%".format(100 * acc))
Exemplo n.º 5
0
#    adj = (adj + sp.eye(adj.shape[0])).todense()

#features = torch.FloatTensor(features[np.newaxis])
#features = torch.FloatTensor(features)
##if not sparse:
#adj = torch.FloatTensor(adj)
#labels = torch.FloatTensor(labels[np.newaxis])
features, _ = process.preprocess_features(features)
features = torch.FloatTensor(features[np.newaxis])
labels = torch.FloatTensor(labels[np.newaxis])

idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)

model = DGI(ft_size, hid_units, shid, args.alpha, args.nb_heads, P)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)
#msk = torch.randn(1, nb_nodes).ge(0.5).float()
#if torch.cuda.is_available():
if args.cuda:
    print('Using CUDA')
    model.cuda()

    features = features.cuda()

    nor_adjs = nor_adjs.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()
#    msk = msk.cuda()
Exemplo n.º 6
0
        sp_A = sp_A.cuda()
    else:
        adj = adj.cuda()
        A = A.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()

sp_adj = sp_adj.to_dense()
sp_adj_ori = sp_adj.clone()
features_ori = features.clone()
sp_A = sp_A.to_dense()


encoder = DGI(ft_size, hid_units, nonlinearity, critic=args.critic)
atm = Attacker(encoder, features, nb_nodes, attack_mode=attack_mode,
               show_attack=args.show_attack, gpu=torch.cuda.is_available())
optimiser = torch.optim.Adam(encoder.parameters(), lr=lr, weight_decay=l2_coef)

if torch.cuda.is_available():
    encoder.cuda()
    atm.cuda()

b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()
cnt_wait = 0
best = 1e9
best_t = 0

step_size_init = 20
Exemplo n.º 7
0
def train_transductive(dataset, dataset_str, edge_index, gnn_type, model_name, K=None, random_init=False, drop_sigma=False):
    batch_size = 1 # Transductive setting
    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    if gnn_type == "SGConv":
        lr /= 3.
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]

    nb_nodes = dataset.x.shape[0]
    ft_size = dataset.x.shape[1]
    nb_classes = torch.max(dataset.y).item()+1 # 0 based cnt
    features = dataset.x

    model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, K=K, drop_sigma=drop_sigma)
    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0)

    if torch.cuda.is_available():
        features = features.cuda()
        edge_index = edge_index.cuda()
        model = model.cuda()


    b_xent = nn.BCEWithLogitsLoss()
    xent = nn.CrossEntropyLoss()
    cnt_wait = 0
    best = 1e9
    best_t = 0

    for epoch in range(nb_epochs):
        if random_init:
            break
        model.train()
        optimiser.zero_grad()

        idx = np.random.permutation(nb_nodes)
        shuf_fts = features[idx, :]

        lbl_1 = torch.ones(nb_nodes)
        lbl_2 = torch.zeros(nb_nodes)
        lbl = torch.cat((lbl_1, lbl_2), 0)

        if torch.cuda.is_available():
            shuf_fts = shuf_fts.cuda()
            lbl = lbl.cuda()
        
        logits = model(features, shuf_fts, edge_index)

        loss = b_xent(logits, lbl)

        print('Loss:', loss)

        if loss < best:
            best = loss
            best_t = epoch
            cnt_wait = 0
            torch.save(model.state_dict(), './trained_models/'+model_name)
        else:
            cnt_wait += 1

        if cnt_wait == patience:
            print('Early stopping!')
            break

        loss.backward()
        optimiser.step()
    return best_t
Exemplo n.º 8
0
adj, features, labels, g = process.load_data(dataset)
features, _ = process.preprocess_features(features)

nb_nodes = features.shape[0]
ft_size = features.shape[1]
nb_classes = len(labels)

adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
adj = (adj + sp.eye(adj.shape[0])).todense()
adj = torch.FloatTensor(adj[np.newaxis])

features = torch.FloatTensor(features[np.newaxis])
# labels = torch.FloatTensor(labels[np.newaxis])

model = DGI(ft_size, hid_units, nonlinearity)
# model = GAE(ft_size, hid_units, out_units, nonlinearity)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()

kmeans = KMeans(n_clusters=6)

for epoch in range(nb_epochs):
    model.train()
    optimiser.zero_grad()

    idx = np.random.permutation(nb_nodes)
    shuf_fts = features[:, idx, :]
Exemplo n.º 9
0
def process_inductive(dataset, gnn_type="GCNConv", K=None, random_init=False, runs=10):

    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    l2_coef = hyperparameters["l2_coef"]
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]
    batch_size = hyperparameters["batch_size"]

    norm_features = torch_geometric.transforms.NormalizeFeatures()
    dataset_train = PPI(
        "./geometric_datasets/"+dataset,
        split="train",
        transform=norm_features,
    )
    print(dataset_train)
    dataset_val = PPI(
        "./geometric_datasets/"+dataset,
        split="val",
        transform=norm_features,
    )
    print(dataset_val)
    dataset_test = PPI(
        "./geometric_datasets/"+dataset,
        split="test",
        transform=norm_features,
    )
    data = []
    for d in dataset_train:
        data.append(d)
    for d in dataset_val:
        data.append(d)

    ft_size = dataset_train[0].x.shape[1]
    nb_classes = dataset_train[0].y.shape[1] # multilabel
    b_xent = nn.BCEWithLogitsLoss()

    loader_train = DataLoader(
        data,
        batch_size=hyperparameters["batch_size"],
        shuffle=True,
    )
    loader_test = DataLoader(
        dataset_test,
        batch_size=hyperparameters["batch_size"],
        shuffle=False
    )

    all_accs = []
    for _ in range(runs):
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, batch_size=1, K=K)
        model_name = get_model_name(dataset, gnn_type, K, random_init=random_init)
        print(model)
        optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

        if torch.cuda.is_available():
            print('Using CUDA')
            model = model.cuda()
        model.train()

        torch.cuda.empty_cache()
        for epoch in range(20):
            if random_init:
                break
            total_loss = 0
            batch_id = 0
            model.train()
            loaded = list(loader_train)
            for batch in loaded:
                optimiser.zero_grad()
                if torch.cuda.is_available:
                    batch = batch.to('cuda')
                nb_nodes = batch.x.shape[0]
                features = batch.x
                labels = batch.y
                edge_index = batch.edge_index

                idx = np.random.randint(0, len(data))
                while idx == batch_id:
                    idx = np.random.randint(0, len(data))
                shuf_fts = torch.nn.functional.dropout(loaded[idx].x, drop_prob)
                edge_index2 = loaded[idx].edge_index

                lbl_1 = torch.ones(nb_nodes)
                lbl_2 = torch.zeros(shuf_fts.shape[0])
                lbl = torch.cat((lbl_1, lbl_2), 0)

                if torch.cuda.is_available():
                    shuf_fts = shuf_fts.cuda()
                    if edge_index2 is not None:
                        edge_index2 = edge_index2.cuda()
                    lbl = lbl.cuda()
                
                logits = model(features, shuf_fts, edge_index, batch=batch.batch, edge_index_alt=edge_index2)

                loss = b_xent(logits, lbl)
                loss.backward()
                optimiser.step()
                batch_id += 1
                total_loss += loss.item()


            print(epoch, 'Train Loss:', total_loss/(len(dataset_train)))

        torch.save(model.state_dict(), './trained_models/'+model_name)
        torch.cuda.empty_cache()

        print('Loading last epoch')
        if not random_init:
            model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()

        b_xent_reg = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(2.25))
        train_embs, whole_train_data = preprocess_embeddings(model, dataset_train)
        val_embs, whole_val_data = preprocess_embeddings(model, dataset_val)
        test_embs, whole_test_data = preprocess_embeddings(model, dataset_test)

        for _ in range(50):
            log = LogReg(hid_units, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best = 1e9
            log.train()
            for _ in range(250):
                opt.zero_grad()

                logits = log(train_embs)
                loss = b_xent_reg(logits, whole_train_data.y)
                
                loss.backward()
                opt.step()

                log.eval()
                val_logits = log(val_embs) 
                loss = b_xent_reg(val_logits, whole_val_data.y)
                if loss.item() < best:
                    best = loss.item()
                    pat_steps = 0
                if pat_steps >= 5:
                    break

                pat_steps += 1


            log.eval()
            logits = log(test_embs)
            preds = torch.sigmoid(logits) > 0.5
            f1 = sklearn.metrics.f1_score(whole_test_data.y.cpu(), preds.long().cpu(), average='micro')
            all_accs.append(float(f1))
            print()
            print('Micro-averaged f1:', f1)

    all_accs = torch.tensor(all_accs)

    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())])
    print(all_accs.mean())
    print(all_accs.std())
Exemplo n.º 10
0
def process_link_prediction(dataset, gnn_type="GCNConv", K=None, runs=10, drop_sigma=False):

    batch_size = 1 # Transductive setting
    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    xent = nn.CrossEntropyLoss()
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]

    dataset_str = dataset
    dataset = Planetoid("./geometric_datasets"+'/'+dataset,
                        dataset,
                        transform=torch_geometric.transforms.NormalizeFeatures())[0]
    nb_nodes = dataset.x.shape[0]
    ft_size = dataset.x.shape[1]
    nb_classes = torch.max(dataset.y).item()+1 # 0 based cnt
    features = dataset.x
    labels = dataset.y

    mask_train = dataset.train_mask
    mask_val = dataset.val_mask
    mask_test = dataset.test_mask

    all_auc, all_ap = [], []

    for _ in range(runs):
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, K=K, drop_sigma=drop_sigma)
        print(model)
        model_name = get_model_name(dataset_str, gnn_type, K, link_prediction=True, drop_sigma=drop_sigma)
        dataset = Planetoid("./geometric_datasets"+'/'+dataset_str,
                            dataset_str,
                            transform=torch_geometric.transforms.NormalizeFeatures())[0]
        gae = nng.GAE(model)
        gae_data = gae.split_edges(dataset)
        edge_index = gae_data.train_pos_edge_index
        optimiser = torch.optim.Adam(model.parameters(), lr=lr)

        if torch.cuda.is_available():
            print('Using CUDA')
            features = features.cuda()
            labels = labels.cuda()
            edge_index = edge_index.cuda()
            mask_train = mask_train.cuda()
            mask_val = mask_val.cuda()
            mask_test = mask_test.cuda()
            model = model.cuda()

        best_t = train_transductive(dataset, dataset_str, edge_index, gnn_type, model_name, drop_sigma=drop_sigma, K=K)
        print('Loading {}th epoch'.format(best_t))
        model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()
        Z = gae.encode(features, None, edge_index, embed_gae=True)
        res = gae.test(Z, gae_data.test_pos_edge_index.cuda(), gae_data.test_neg_edge_index.cuda())
        auc = res[0]
        ap = res[1]
        print(auc, ap)
        all_auc.append(auc)
        all_ap.append(ap)

    all_auc = torch.tensor(all_auc)
    all_ap = torch.tensor(all_ap)
    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        f.writelines([str(all_auc.mean().item())+'\n', str(all_auc.std().item())+'\n'])
        f.writelines([str(all_ap.mean().item())+'\n', str(all_ap.std().item())])

    print(str(all_auc.mean().item()), str(all_auc.std().item()))
    print(str(all_ap.mean().item()), str(all_ap.std().item()))
Exemplo n.º 11
0
def process_transductive(dataset, gnn_type='GCNConv', K=None, random_init=False, runs=10, drop_sigma=False, just_plot=False):
    dataset_str = dataset
    norm_features = torch_geometric.transforms.NormalizeFeatures()
    dataset = Planetoid("./geometric_datasets"+'/'+dataset,
                        dataset,
                        transform=norm_features)[0]

    # training params
    batch_size = 1 # Transductive setting
    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    xent = nn.CrossEntropyLoss()
    l2_coef = hyperparameters["l2_coef"]
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]

    nb_nodes = dataset.x.shape[0]
    ft_size = dataset.x.shape[1]
    nb_classes = torch.max(dataset.y).item()+1 # 0 based cnt
    features = dataset.x
    labels = dataset.y
    edge_index = dataset.edge_index
    edge_index, _ = torch_geometric.utils.add_remaining_self_loops(edge_index)

    mask_train = dataset.train_mask
    mask_val = dataset.val_mask
    mask_test = dataset.test_mask

    model_name = get_model_name(dataset_str, gnn_type, K, random_init=random_init, drop_sigma=drop_sigma)
    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        pass

    accs = []

    for i in range(runs): 
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, K=K, drop_sigma=drop_sigma)
        print(model, model_name, drop_sigma)
        optimiser = torch.optim.Adam(model.parameters(), lr=lr)

        if torch.cuda.is_available():
            print('Using CUDA')
            features = features.cuda()
            labels = labels.cuda()
            edge_index = edge_index.cuda()
            mask_train = mask_train.cuda()
            mask_val = mask_val.cuda()
            mask_test = mask_test.cuda()
            model = model.cuda()

        best_t = train_transductive(dataset, dataset_str, edge_index, gnn_type, model_name, K=K, random_init=random_init, drop_sigma=drop_sigma)

        xent = nn.CrossEntropyLoss()
        print('Loading {}th epoch'.format(best_t))
        print(model, model_name)
        if not random_init:
            model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()

        embeds, _ = model.embed(features, edge_index, None, standardise=False)
        if just_plot:
            plot_tsne(embeds, labels, model_name)
            exit(0)
        train_embs = embeds[mask_train, :]
        val_embs = embeds[mask_val, :]
        test_embs = embeds[mask_test, :]

        train_lbls = labels[mask_train]
        val_lbls = labels[mask_val]
        test_lbls = labels[mask_test]

        tot = torch.zeros(1)
        tot = tot.cuda()

        for _ in range(50):
            log = LogReg(hid_units, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best_acc = torch.zeros(1)
            best_acc = best_acc.cuda()
            for _ in range(150):
                log.train()
                opt.zero_grad()

                logits = log(train_embs)
                loss = xent(logits, train_lbls)
                
                loss.backward()
                opt.step()

            logits = log(test_embs)
            preds = torch.argmax(logits, dim=1)
            acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
            accs.append(acc * 100)
            print(acc)
            tot += acc

        print('Average accuracy:', tot / 50)
        
    all_accs = torch.stack(accs, dim=0)
    with open("./results/"+model_name[:-4]+"_results.txt", "a+") as f:
        f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())+'\n'])

    print(all_accs.mean())
    print(all_accs.std())
Exemplo n.º 12
0
        predictions = F.log_softmax(classifier(outputs), dim=1)
    loss_test = F.nll_loss(predictions[idx_test], labels[idx_test])
    acc_test = accuracy(predictions[idx_test], labels[idx_test])
    print("Test set results:", "loss: {:.4f}".format(loss_test.item()),
          "accuracy: {:.4f}".format(acc_test.item()))
    return acc_test.item()


if __name__ == "__main__":
    results = []

    for i in range(args.repeat):
        # model
        model = DGI(num_feat=features.shape[1],
                    num_hid=args.hidden,
                    dropout=args.dropout,
                    rho=args.rho,
                    corruption=args.corruption)
        classifier = nn.Linear(args.hidden, labels.max().item() + 1)
        if args.cuda:
            model.cuda()
            classifier = classifier.cuda()

        print("----- %d / %d runs -----" % (i + 1, args.repeat))
        # Train model
        t_total = time.time()
        if args.test_only:
            model = torch.load("model")
        else:
            train(args.epochs, verbose=args.verbose)
            print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
        sp_nor_adjs.append(sp_adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()
        adj = adj[np.newaxis]
        nor_adjs.append(adj)
features = torch.FloatTensor(features[np.newaxis])
if sparse:
    sp_nor_adjs = torch.FloatTensor(np.array(sp_nor_adjs))
else:
    nor_adjs = torch.FloatTensor(np.array(nor_adjs))
labels = torch.FloatTensor(labels[np.newaxis])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)

model = DGI(ft_size, hid_units, shid, P, nonlinearity)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

if torch.cuda.is_available():
    print('Using CUDA')
    model.cuda()
    features = features.cuda()
    if sparse:
        sp_nor_adjs = sp_nor_adjs.cuda()
    else:
        nor_adjs = nor_adjs.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()
Exemplo n.º 14
0
    adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))

    if sparse:
        sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features[np.newaxis])
    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])
    labels = torch.FloatTensor(labels[np.newaxis])
    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)

    model = DGI(ft_size, hid_units, nonlinearity)
    optimiser = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 weight_decay=l2_coef)

    if torch.cuda.is_available():
        print('Using CUDA')
        model.cuda()
        features = features.cuda()
        if sparse:
            sp_adj = sp_adj.cuda()
        else:
            adj = adj.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
#    adj = (adj + sp.eye(adj.shape[0])).todense()

#features = torch.FloatTensor(features[np.newaxis])
#features = torch.FloatTensor(features)
##if not sparse:
#adj = torch.FloatTensor(adj)
#labels = torch.FloatTensor(labels[np.newaxis])
features, _ = process.preprocess_features(features)
features = torch.FloatTensor(features[np.newaxis])
labels = torch.FloatTensor(labels[np.newaxis])

idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)

model = DGI(ft_size, hid_units, shid, args.alpha, args.nb_heads, P)
#msk = torch.randn(1, nb_nodes).ge(0.5).float()
#if torch.cuda.is_available():
if args.cuda:
    print('Using CUDA')
    model.cuda()
    features = features.cuda()

    nor_adjs = nor_adjs.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()
#    msk = msk.cuda()

xent = nn.CrossEntropyLoss()