def evaluate_test(model, g, inputs, labels, test_mask, lp_dict, coeffs, meta):
    model.eval()
    with torch.no_grad():
        pred = model(g, inputs).squeeze()

    output = pred.cuda()
    labels = labels.cuda()
    idx_test = lp_dict['idx_test']
    idx_train = lp_dict['idx_train']
    adj = sparse_mx_to_torch_sparse_tensor(normalize(lp_dict['sp_adj']))
    #print(adj.to_dense()[np.arange(100), np.arange(100)+1])

    labels, output, adj = labels.cpu(), output.cpu(), adj.cpu()
    loss = F.mse_loss(output[idx_test].squeeze(), labels[idx_test].squeeze())
    r2_test = compute_r2(output[idx_test], labels[idx_test])
    lp_output = lp_refine(idx_test, idx_train, labels, output, adj,
                          torch.tanh(coeffs[0]).item(),
                          torch.exp(coeffs[1]).item())
    lp_r2_test = compute_r2(lp_output, labels[idx_test])
    lp_output_raw_cov = lp_refine(idx_test, idx_train, labels, output, adj)
    lp_r2_test_raw_cov = compute_r2(lp_output_raw_cov, labels[idx_test])

    print("------------")
    print("election year {}".format(meta))
    print("loss:", loss.item())
    print("raw_r2:", r2_test)
    print("refined_r2:", lp_r2_test)
    print("refined_r2_raw_cov:", lp_r2_test_raw_cov)
    print("------------")
def evaluate_test(model, features, labels, test_mask, lp_dict, coeffs, meta="2012"):
    model.eval()
    with torch.no_grad():
        output = model(features).squeeze()

    output = output.cuda()
    labels = labels.cuda()
    idx_test = lp_dict['idx_test']
    idx_train = lp_dict['idx_train']
    adj = sparse_mx_to_torch_sparse_tensor(normalize(lp_dict['sp_adj']))

    labels, output, adj = labels.cpu(), output.cpu(), adj.cpu()
    loss = F.mse_loss(output[idx_test].squeeze(), labels[idx_test].squeeze())
    r2_test = compute_r2(output[idx_test], labels[idx_test])
    lp_output = lp_refine(idx_test, idx_train, labels, output, adj, torch.tanh(coeffs[0]).item(), torch.exp(coeffs[1]).item())
    lp_r2_test = compute_r2(lp_output, labels[idx_test])
    lp_output_raw_conv = lp_refine(idx_test, idx_train, labels, output, adj)
    lp_r2_test_raw_conv = compute_r2(lp_output_raw_conv, labels[idx_test])

    print("------------")
    print("election year {}".format(meta))
    print("loss:", loss.item())
    print("raw_r2:", r2_test)
    print("refined_r2:", lp_r2_test)
    print("refined_r2_raw_conv:", lp_r2_test_raw_conv)
    print("------------")
Beispiel #3
0
def evaluate_test(model, g, inputs, labels, test_mask, batch_size, device,
                  lp_dict, coeffs, meta):
    model.eval()
    with th.no_grad():
        pred = model.inference(g, inputs, batch_size, device).view(-1)

    output = pred.cuda()
    labels = labels.cuda()
    idx_test = lp_dict['idx_test']
    idx_train = lp_dict['idx_train']
    adj = lp_dict['adj']

    labels, output, adj = labels.cpu(), output.cpu(), adj.cpu()
    loss = F.mse_loss(output[idx_test].squeeze(), labels[idx_test].squeeze())
    r2_test = compute_r2(output[test_mask], labels[test_mask])
    lp_output = lp_refine(idx_test, idx_train, labels, output, adj,
                          torch.tanh(coeffs[0]).item(),
                          torch.exp(coeffs[1]).item())
    lp_r2_test = compute_r2(lp_output, labels[idx_test])
    lp_output_raw_conv = lp_refine(idx_test, idx_train, labels, output, adj)
    lp_r2_test_raw_conv = R2(lp_output_raw_conv, labels[idx_test])

    print("------------")
    print("election year {}".format(meta))
    print("loss:", loss.item())
    print("raw_r2:", r2_test)
    print("refined_r2:", lp_r2_test)
    print("refined_r2_raw_conv:", lp_r2_test_raw_conv)
    print("------------")

    model.train()

    return lp_r2_test
def evaluate_test(model, g, inputs, labels, test_mask, batch_size, device, lp_dict, meta):
    model.eval()
    with th.no_grad():
        pred = model.inference(g, inputs, batch_size, device).view(-1)

    output = pred.to(device)
    labels = labels.to(device)
    idx_test = lp_dict['idx_test']
    idx_train = lp_dict['idx_train']
    adj = sparse_mx_to_torch_sparse_tensor(normalize(lp_dict['sp_adj']))

    labels, output, adj = labels.cpu(), output.cpu(), adj.cpu()
    loss = F.mse_loss(output[idx_test].squeeze(), labels[idx_test].squeeze())
    r2_test = compute_r2(output[test_mask], labels[test_mask])
    lp_output = lp_refine(idx_test, idx_train, labels, output, adj)
    lp_r2_test = compute_r2(lp_output, labels[idx_test])

    print("------------")
    print("election year {}".format(meta))
    print("loss:", loss.item())
    print("raw_r2:", r2_test)
    print("refined_r2:", lp_r2_test)
    print("------------")

    model.train()
Beispiel #5
0
def test(adj, features, labels, test_meta):
    model.eval()
    output = model(features, adj).view(-1)

    loss_test = loss(output, labels, idx_test, adj, coeffs, True)
    r2_test = R2(output[idx_test], labels[idx_test])

    labels, output, adj = labels.cpu(), output.cpu(), adj.cpu()
    lp_output = lp_refine(idx_test, idx_train, labels, output, adj,
                          torch.tanh(coeffs[0]).item(),
                          torch.exp(coeffs[1]).item())
    lp_r2_test = R2(lp_output, labels[idx_test])
    lp_output_raw_conv = lp_refine(idx_test, idx_train, labels, output, adj)
    lp_r2_test_raw_conv = R2(lp_output_raw_conv, labels[idx_test])
    print("Test set ({}) results:".format(test_meta),
          "loss= {:.4f}".format(loss_test.item()),
          "R2= {:.4f}".format(r2_test.item()),
          "LP_R2= {:.4f}".format(lp_r2_test.item()),
          "LP_R2_raw_conv= {:.4f}\n".format(lp_r2_test_raw_conv.item()))
Beispiel #6
0
def test(adj, features, labels, test_meta):
    model.eval()
    output = model(features, adj).view(-1)
    '''
    np.save("test_lp/output_all.npy", output.cpu().detach().numpy())
    np.save("test_lp/labels_all.npy", labels.cpu().detach().numpy())
    np.save("test_lp/idx_train.npy", idx_train.cpu().detach().numpy())
    np.save("test_lp/idx_val.npy", idx_val.cpu().detach().numpy())
    np.save("test_lp/idx_test.npy", idx_test.cpu().detach().numpy())
    '''

    loss_test = loss(output, labels, idx_test)
    r2_test = R2(output[idx_test], labels[idx_test])

    labels, output, adj = labels.cpu(), output.cpu(), adj.cpu()
    #adj = torch.FloatTensor(np.load("test_lp/raw_S.npy"))
    lp_output = lp_refine(idx_test, idx_train, labels, output, adj)
    lp_r2_test = R2(lp_output, labels[idx_test])
    print("Test set ({}) results:".format(test_meta),
          "loss= {:.4f}".format(loss_test.item()),
          "R2= {:.4f}".format(r2_test.item()),
          "LP_R2= {:.4f}\n".format(lp_r2_test.item()))