コード例 #1
0
def evaluate(model):
    model.eval()
    loader = DataLoader(TEST, batch_size=BATCH_SIZE)

    pred = []
    target = []
    for data in loader:
        data = data.to(DEVICE)
        predicted = torch.argmax(model(data.x, data.edge_index, data.batch),
                                 dim=1)
        for p in predicted:
            pred.append(p.item())
        for y in data.y:
            target.append(y.item())

    pred = torch.tensor(pred)
    target = torch.tensor(target)
    print("Accuracy: {:.2f}%".format(100 * accuracy(pred, target)))
    print("True Positive: {}".format(true_positive(pred, target, 1).item()))
    print("True Negative: {}".format(true_negative(pred, target, 1).item()))
    print("False Positive: {}".format(false_positive(pred, target, 1).item()))
    print("False Negative: {}".format(false_negative(pred, target, 1).item()))
    print("Precision: {:.2f}%".format(100 * precision(pred, target, 1).item()))
    print("Recall: {:.2f}%".format(100 * recall(pred, target, 1).item()))
    print("F1 score: {:.2f}%".format(100 * f1_score(pred, target, 1).item()))
コード例 #2
0
def test_metric():
    pred = torch.tensor([0, 0, 1, 1])
    target = torch.tensor([0, 1, 0, 1])

    assert accuracy(pred, target) == 0.5
    assert true_positive(pred, target, num_classes=2).tolist() == [1, 1]
    assert true_negative(pred, target, num_classes=2).tolist() == [1, 1]
    assert false_positive(pred, target, num_classes=2).tolist() == [1, 1]
    assert false_negative(pred, target, num_classes=2).tolist() == [1, 1]
    assert precision(pred, target, num_classes=2).tolist() == [0.5, 0.5]
    assert recall(pred, target, num_classes=2).tolist() == [0.5, 0.5]
    assert f1_score(pred, target, num_classes=2).tolist() == [0.5, 0.5]
コード例 #3
0
def test_classifier(model, loader, device):
    model.eval()

    y = torch.tensor([]).long().to(device)
    yp = torch.tensor([]).long().to(device)

    loss_all = 0
    for data in loader:
        data = data.to(device)
        pred, _ = model(data.x, data.edge_index, batch=data.batch)
        loss = F.nll_loss(F.log_softmax(pred, dim=-1), data.y)
        pred = pred.max(dim=1)[1]

        y = torch.cat([y, data.y])
        yp = torch.cat([yp, pred])

        loss_all += data.num_graphs * loss.item()

    return (accuracy(y, yp), precision(y, yp, model.num_output).mean().item(),
            recall(y, yp, model.num_output).mean().item(),
            f1_score(y, yp, model.num_output).mean().item(), loss_all)
コード例 #4
0
 def evaluate(self, data, nodes, targets):
     loss, y = self.forward(data.adj, data.x, nodes, targets)
     f1 = torch.mean(f1_score(torch.argmax(y, dim=1), targets, num_classes=3))
     return loss.item(), f1.item()
コード例 #5
0
ファイル: c_beta.py プロジェクト: dcoukos/masif-tools
    cum_labels = torch.Tensor().to(device)
    for batch in tqdm(train_loader, desc='Training.'):
        batch = batch.to(device)
        optimizer.zero_grad()
        out = model(batch)
        labels = batch.y.to(device)
        tr_loss = F.cross_entropy(out, target=labels)
        loss.append(tr_loss.detach().item())
        tr_loss.backward()
        optimizer.step()
        cum_labels = torch.cat((cum_labels, labels.clone().detach()), dim=0)
        cum_pred = torch.cat((cum_pred, out.clone().detach()), dim=0)

    train_precision = precision(cum_pred, cum_labels, 2)[1].item()
    train_recall = recall(cum_pred, cum_labels, 2)[1].item()
    train_f1 = f1_score(cum_pred, cum_labels, 2)[1].item()

    roc_auc = roc_auc_score(cum_labels.cpu(), cum_pred.cpu())
    loss = mean(loss)

    #  --------------  REPORTING ------------------------------------
    model.eval()
    cum_pred = torch.Tensor().to(device)
    cum_labels = torch.Tensor().to(device)
    te_weights = torch.Tensor().to(device)
    for batch in tqdm(val_loader, desc='Evaluating.'):
        batch = batch.to(device)
        out = model(batch)
        labels = batch.y.to(device)
        te_loss = F.cross_entropy(out, target=labels)
        pred = out.detach().round().to(device)
コード例 #6
0
ファイル: main_sparse.py プロジェクト: jarvis08/gpackage-gtn
 loss = nn.CrossEntropyLoss()
 Ws = []
 for i in range(50):
     print('Epoch: ', i + 1)
     for param_group in optimizer.param_groups:
         if param_group['lr'] > 0.005:
             param_group['lr'] = param_group['lr'] * 0.9
     model.train()
     model.zero_grad()
     loss, y_train, _ = model(A, node_features, train_node,
                              train_target)
     loss.backward()
     optimizer.step()
     train_f1 = torch.mean(
         f1_score(torch.argmax(y_train, dim=1),
                  train_target,
                  num_classes=3)).cpu().numpy()
     print('Train - Loss: {}, Macro_F1: {}'.format(
         loss.detach().cpu().numpy(), train_f1))
     model.eval()
     # Valid
     with torch.no_grad():
         val_loss, y_valid, _ = model.forward(A, node_features,
                                              valid_node, valid_target)
         val_f1 = torch.mean(
             f1_score(torch.argmax(y_valid, dim=1),
                      valid_target,
                      num_classes=3)).cpu().numpy()
         print('Valid - Loss: {}, Macro_F1: {}'.format(
             val_loss.detach().cpu().numpy(), val_f1))
         test_loss, y_test, W = model.forward(A, node_features,