Exemplo n.º 1
0
    opt1 = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
    log.cuda()
    log.train()
    for _ in range(100):

        opt1.zero_grad()

        logits = log(train_embs)
        loss1 = xent(logits, train_lbls)

        #print('logreg loss ', loss1)

        loss1.backward()
        opt1.step()

    log.eval()

    with torch.no_grad():
        logits = log(test_embs)
    preds = torch.argmax(logits, dim=1)
    acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
    accs.append(acc * 100)
    print(acc)
    tot += acc

print('Average accuracy:', tot / 50)

accs = torch.stack(accs)
print(accs.mean())
print(accs.std())
Exemplo n.º 2
0
def process_inductive(dataset, gnn_type="GCNConv", K=None, random_init=False, runs=10):

    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    l2_coef = hyperparameters["l2_coef"]
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]
    batch_size = hyperparameters["batch_size"]

    norm_features = torch_geometric.transforms.NormalizeFeatures()
    dataset_train = PPI(
        "./geometric_datasets/"+dataset,
        split="train",
        transform=norm_features,
    )
    print(dataset_train)
    dataset_val = PPI(
        "./geometric_datasets/"+dataset,
        split="val",
        transform=norm_features,
    )
    print(dataset_val)
    dataset_test = PPI(
        "./geometric_datasets/"+dataset,
        split="test",
        transform=norm_features,
    )
    data = []
    for d in dataset_train:
        data.append(d)
    for d in dataset_val:
        data.append(d)

    ft_size = dataset_train[0].x.shape[1]
    nb_classes = dataset_train[0].y.shape[1] # multilabel
    b_xent = nn.BCEWithLogitsLoss()

    loader_train = DataLoader(
        data,
        batch_size=hyperparameters["batch_size"],
        shuffle=True,
    )
    loader_test = DataLoader(
        dataset_test,
        batch_size=hyperparameters["batch_size"],
        shuffle=False
    )

    all_accs = []
    for _ in range(runs):
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, batch_size=1, K=K)
        model_name = get_model_name(dataset, gnn_type, K, random_init=random_init)
        print(model)
        optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

        if torch.cuda.is_available():
            print('Using CUDA')
            model = model.cuda()
        model.train()

        torch.cuda.empty_cache()
        for epoch in range(20):
            if random_init:
                break
            total_loss = 0
            batch_id = 0
            model.train()
            loaded = list(loader_train)
            for batch in loaded:
                optimiser.zero_grad()
                if torch.cuda.is_available:
                    batch = batch.to('cuda')
                nb_nodes = batch.x.shape[0]
                features = batch.x
                labels = batch.y
                edge_index = batch.edge_index

                idx = np.random.randint(0, len(data))
                while idx == batch_id:
                    idx = np.random.randint(0, len(data))
                shuf_fts = torch.nn.functional.dropout(loaded[idx].x, drop_prob)
                edge_index2 = loaded[idx].edge_index

                lbl_1 = torch.ones(nb_nodes)
                lbl_2 = torch.zeros(shuf_fts.shape[0])
                lbl = torch.cat((lbl_1, lbl_2), 0)

                if torch.cuda.is_available():
                    shuf_fts = shuf_fts.cuda()
                    if edge_index2 is not None:
                        edge_index2 = edge_index2.cuda()
                    lbl = lbl.cuda()
                
                logits = model(features, shuf_fts, edge_index, batch=batch.batch, edge_index_alt=edge_index2)

                loss = b_xent(logits, lbl)
                loss.backward()
                optimiser.step()
                batch_id += 1
                total_loss += loss.item()


            print(epoch, 'Train Loss:', total_loss/(len(dataset_train)))

        torch.save(model.state_dict(), './trained_models/'+model_name)
        torch.cuda.empty_cache()

        print('Loading last epoch')
        if not random_init:
            model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()

        b_xent_reg = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(2.25))
        train_embs, whole_train_data = preprocess_embeddings(model, dataset_train)
        val_embs, whole_val_data = preprocess_embeddings(model, dataset_val)
        test_embs, whole_test_data = preprocess_embeddings(model, dataset_test)

        for _ in range(50):
            log = LogReg(hid_units, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best = 1e9
            log.train()
            for _ in range(250):
                opt.zero_grad()

                logits = log(train_embs)
                loss = b_xent_reg(logits, whole_train_data.y)
                
                loss.backward()
                opt.step()

                log.eval()
                val_logits = log(val_embs) 
                loss = b_xent_reg(val_logits, whole_val_data.y)
                if loss.item() < best:
                    best = loss.item()
                    pat_steps = 0
                if pat_steps >= 5:
                    break

                pat_steps += 1


            log.eval()
            logits = log(test_embs)
            preds = torch.sigmoid(logits) > 0.5
            f1 = sklearn.metrics.f1_score(whole_test_data.y.cpu(), preds.long().cpu(), average='micro')
            all_accs.append(float(f1))
            print()
            print('Micro-averaged f1:', f1)

    all_accs = torch.tensor(all_accs)

    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())])
    print(all_accs.mean())
    print(all_accs.std())