def evaluate(model): model.eval() loader = DataLoader(TEST, batch_size=BATCH_SIZE) pred = [] target = [] for data in loader: data = data.to(DEVICE) predicted = torch.argmax(model(data.x, data.edge_index, data.batch), dim=1) for p in predicted: pred.append(p.item()) for y in data.y: target.append(y.item()) pred = torch.tensor(pred) target = torch.tensor(target) print("Accuracy: {:.2f}%".format(100 * accuracy(pred, target))) print("True Positive: {}".format(true_positive(pred, target, 1).item())) print("True Negative: {}".format(true_negative(pred, target, 1).item())) print("False Positive: {}".format(false_positive(pred, target, 1).item())) print("False Negative: {}".format(false_negative(pred, target, 1).item())) print("Precision: {:.2f}%".format(100 * precision(pred, target, 1).item())) print("Recall: {:.2f}%".format(100 * recall(pred, target, 1).item())) print("F1 score: {:.2f}%".format(100 * f1_score(pred, target, 1).item()))
def test_metric(): pred = torch.tensor([0, 0, 1, 1]) target = torch.tensor([0, 1, 0, 1]) assert accuracy(pred, target) == 0.5 assert true_positive(pred, target, num_classes=2).tolist() == [1, 1] assert true_negative(pred, target, num_classes=2).tolist() == [1, 1] assert false_positive(pred, target, num_classes=2).tolist() == [1, 1] assert false_negative(pred, target, num_classes=2).tolist() == [1, 1] assert precision(pred, target, num_classes=2).tolist() == [0.5, 0.5] assert recall(pred, target, num_classes=2).tolist() == [0.5, 0.5] assert f1_score(pred, target, num_classes=2).tolist() == [0.5, 0.5]
def test_classifier(model, loader, device): model.eval() y = torch.tensor([]).long().to(device) yp = torch.tensor([]).long().to(device) loss_all = 0 for data in loader: data = data.to(device) pred, _ = model(data.x, data.edge_index, batch=data.batch) loss = F.nll_loss(F.log_softmax(pred, dim=-1), data.y) pred = pred.max(dim=1)[1] y = torch.cat([y, data.y]) yp = torch.cat([yp, pred]) loss_all += data.num_graphs * loss.item() return (accuracy(y, yp), precision(y, yp, model.num_output).mean().item(), recall(y, yp, model.num_output).mean().item(), f1_score(y, yp, model.num_output).mean().item(), loss_all)
cum_pred = torch.Tensor().to(device) cum_labels = torch.Tensor().to(device) for batch in tqdm(train_loader, desc='Training.'): batch = batch.to(device) optimizer.zero_grad() out = model(batch) labels = batch.y.to(device) tr_loss = F.cross_entropy(out, target=labels) loss.append(tr_loss.detach().item()) tr_loss.backward() optimizer.step() cum_labels = torch.cat((cum_labels, labels.clone().detach()), dim=0) cum_pred = torch.cat((cum_pred, out.clone().detach()), dim=0) train_precision = precision(cum_pred, cum_labels, 2)[1].item() train_recall = recall(cum_pred, cum_labels, 2)[1].item() train_f1 = f1_score(cum_pred, cum_labels, 2)[1].item() roc_auc = roc_auc_score(cum_labels.cpu(), cum_pred.cpu()) loss = mean(loss) # -------------- REPORTING ------------------------------------ model.eval() cum_pred = torch.Tensor().to(device) cum_labels = torch.Tensor().to(device) te_weights = torch.Tensor().to(device) for batch in tqdm(val_loader, desc='Evaluating.'): batch = batch.to(device) out = model(batch) labels = batch.y.to(device) te_loss = F.cross_entropy(out, target=labels)