def test(test_adj, test_fea, idx_test, labels):
    unsupervised_model.eval()
    classifier_model.eval()

    if sampler.learning_type=='inductive':
        unsupervised_model.cpu()
        classifier_model.cpu()
        labels = labels.cpu()

    # construct g from test adj
    if sampler.learning_type=='inductive':
        test_edges = test_adj._indices().data.numpy()
    else:
        test_edges = test_adj._indices().data.cpu().numpy()

    test_edges = sp.coo_matrix((np.ones(test_edges.shape[1]),
                             (test_edges[0], test_edges[1])),
                            shape=(test_adj.shape[0], test_adj.shape[0]),
                            dtype=np.float32)

    test_g = nx.from_scipy_sparse_matrix(test_edges, create_using=nx.DiGraph())
    test_g = DGLGraph(test_g)
    feats = unsupervised_model(test_fea, test_g)

    output = classifier_model(feats)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])
    if args.debug:
        print("Test set results:",
              "loss= {:.4f}".format(loss_test.item()),
              "auc= {:.4f}".format(auc_test),
              "accuracy= {:.4f}".format(acc_test.item()))
        print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())
Esempio n. 2
0
def test(test_adj, test_fea):
    model.eval()
    output = model(test_fea, test_adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])
    if args.debug:
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "auc= {:.4f}".format(auc_test),
              "accuracy= {:.4f}".format(acc_test.item()))
    return (loss_test.item(), acc_test.item())
def test_sampling(model, test_g, val_batch_size):
    model.eval()
    output = model.inference(test_g, test_g.ndata['features'], val_batch_size,
                             'cpu')

    loss_test = F.nll_loss(output[idx_test.cpu()], labels[idx_test].cpu())
    acc_test = accuracy(output[idx_test.cpu()], labels[idx_test].cpu())
    auc_test = roc_auc_compute_fn(output[idx_test.cpu()],
                                  labels[idx_test].cpu())
    if args.debug:
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "auc= {:.4f}".format(auc_test),
              "accuracy= {:.4f}".format(acc_test.item()))
        print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())
def test(test_adj, test_fea):
    unsupervised_model.eval()
    classifier_model.eval()
    feats = unsupervised_model(test_fea, test_adj)
    output = classifier_model(feats)
    
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])
    if args.debug:
        print("Test set results:",
              "loss= {:.4f}".format(loss_test.item()),
              "auc= {:.4f}".format(auc_test),
              "accuracy= {:.4f}".format(acc_test.item()))
        print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())
Esempio n. 5
0
def test_sampling(test_g, val_batch_size):
    unsupervised_model.eval()
    classifier_model.eval()
    feats = unsupervised_model.inference(test_g, test_g.ndata['features'], val_batch_size, 'cpu')

    output = classifier_model(feats[idx_test.cpu()].cuda())

    loss_test = F.nll_loss(output, labels[idx_test])
    acc_test = accuracy(output, labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])
    if args.debug:
        print("Test set results:",
              "loss= {:.4f}".format(loss_test.item()),
              "auc= {:.4f}".format(auc_test),
              "accuracy= {:.4f}".format(acc_test.item()))
        print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())
def test_full(model, test_g, idx_test, labels):
    model.eval()
    if sampler.dataset in ['coauthor_phy']:
        model.cpu()
        labels = labels.cpu()

    output = model(test_g.ndata['features'], test_g)

    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])
    if args.debug:
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "auc= {:.4f}".format(auc_test),
              "accuracy= {:.4f}".format(acc_test.item()))
        print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())
Esempio n. 7
0
def test(test_adj, test_fea):
    model.eval()
    # output = model(test_fea, test_adj)
    output, embeddings = model.myforward(test_fea, test_adj)

    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])

    if args.pca:
        args.ssl += '~PCA'


    if args.write_res:
        probs = torch.exp(output)
        np.save(f'preds/{args.dataset}_{args.seed}_pred.npy', probs.detach().cpu().numpy())


    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "auc= {:.4f}".format(auc_test),
          "accuracy= {:.4f}".format(acc_test.item()))
    print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())
Esempio n. 8
0
def test(test_adj, test_fea):
    model.eval()
    # output = model(test_fea, test_adj)
    output, embeddings = model.myforward(test_fea, test_adj)

    # import ipdb
    # ipdb.set_trace()
    # loss_ssl = ssl_agent.make_loss(embeddings)
    # print(loss_ssl)

    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])

    if False:
        # if not args.write_json:
        from utils import get_pairwise_sim
        labelled_sim = get_pairwise_sim(embeddings[idx_train]).item()

        unlabelled_sim = get_pairwise_sim(embeddings[np.union1d(
            idx_test.cpu(), idx_val.cpu())]).item()
        all_sim = labelled_sim
        # all_sim = get_pairwise_sim(embeddings)
        print(
            "Labeled Node Similarity: {:.4f}".format(labelled_sim.item()),
            "Unlabeled Node Similarity: {:.4f}".format(unlabelled_sim.item()),
            'Total Similarity: {:.4f}'.format(all_sim.item()))
    else:
        labelled_sim = 0
        unlabelled_sim = 0
        all_sim = 0

    if args.finetune:
        args.load_pretrain = 3
    if args.pca:
        args.ssl += '~PCA'

    if args.write_res:
        np.save(f'preds/{args.dataset}_{args.seed}_pred.npy',
                output.detach().cpu().numpy())

    if args.write_json:
        import json
        nlayers = args.nhiddenlayer * args.nbaseblocklayer + 2
        res = {
            'labelled_sim': labelled_sim,
            'unlabelled_sim': unlabelled_sim,
            'all_sim': all_sim,
            'test_loss': loss_test.item(),
            'test_acc': acc_test.item(),
            'loss_train': loss_train[epoch],
            'loss_ssl': loss_ssl[epoch]
        }
        # with open('results_ssl_norm_glorot/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.type, args.ssl, nlayers, args.seed), 'w') as f:
        # with open('results-pretrain/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        # with open('results-nbm/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        # with open('results-baseline/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        # with open('results-pca/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        # with open('results-val/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        # with open('results-new-seeds/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        # with open('results-3layers/validation/{0}_{1}-{2}_{3}_{4}.json'.format(args.dataset, args.label_rate, args.ssl, args.load_pretrain, args.seed), 'w') as f:
        with open(
                'results-3layers/train_size/{0}_{1}-{2}_{3}_{4}.json'.format(
                    args.dataset, args.train_size, args.ssl,
                    args.load_pretrain, args.seed), 'w') as f:
            json.dump(res, f)

    print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
          "auc= {:.4f}".format(auc_test),
          "accuracy= {:.4f}".format(acc_test.item()))
    print("accuracy=%.5f" % (acc_test.item()))
    return (loss_test.item(), acc_test.item())