def metrics(self, y_sig, y):
     y_pred = y_sig > 0.5
     return {
         'acc': metricsF.accuracy(y_pred, y),
         'roc': metricsF.auroc(y_sig, y),
         'iou': metricsF.iou(y_pred, y),
     }
Exemple #2
0
def test_v1_5_metric_auc_auroc():
    AUC.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        AUC()

    ROC.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        ROC()

    AUROC.__init__._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        AUROC()

    x = torch.tensor([0, 1, 2, 3])
    y = torch.tensor([0, 1, 2, 2])
    auc._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        assert auc(x, y) == torch.tensor(4.)

    preds = torch.tensor([0, 1, 2, 3])
    target = torch.tensor([0, 1, 1, 1])
    roc._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        fpr, tpr, thrs = roc(preds, target, pos_label=1)
    assert torch.equal(fpr, torch.tensor([0., 0., 0., 0., 1.]))
    assert torch.allclose(tpr, torch.tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000]), atol=1e-4)
    assert torch.equal(thrs, torch.tensor([4, 3, 2, 1, 0]))

    preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
    target = torch.tensor([0, 0, 1, 1, 1])
    auroc._warned = False
    with pytest.deprecated_call(match='It will be removed in v1.5.0'):
        assert auroc(preds, target) == torch.tensor(0.5)
Exemple #3
0
 def compute_metrics(self, preds, tensors, metrics):
     nelbo, nll, kl = metrics
     B, X, A, M, Y, CE = tensors
     if self.hparams['eval_type'] == 'mse':
         mse, r2, ci = calc_stats(preds, tensors)
         return {
             'val_loss': nelbo,
             'nll': nll,
             'kl': kl,
             'mse': mse,
             'r2': r2,
             'ci': ci
         }
     elif self.hparams['eval_type'] == 'f1':
         f1 = self.f1(preds.argmax(dim=1), Y)
         p = self.precision(preds.argmax(dim=1), Y)
         r = self.recall(preds.argmax(dim=1), Y)
         return {
             'val_loss': nelbo,
             'nll': nll,
             'kl': kl,
             'F1': f1,
             'precision': p,
             'recall': r
         }
     elif self.hparams['eval_type'] == 'auc':
         auc = auroc(preds.argmax(dim=1), Y)
         return {'val_loss': nelbo, 'nll': nll, 'kl': kl, 'auc': auc}
     else:
         raise ValueError('bad metric specified...')
Exemple #4
0
    def validation_epoch_end(self, outputs):

        logits = torch.cat([o["logits"] for o in outputs], dim=0)
        labels = torch.cat([o["labels"] for o in outputs], dim=0)
        if self.hparams["multilabel"]:
            self.log(
                "val_auroc",
                np.mean(
                    [
                        auroc(
                            torch.sigmoid(logits[:, i]),
                            labels[:, i],
                            pos_label=1,
                        )
                        .detach()
                        .cpu()
                        .item()
                        for i in range(logits.shape[1])
                    ]
                ),
                prog_bar=True,
            )
        else:
            self.log(
                "val_f1",
                f1_score(
                    torch.argmax(logits, dim=1),
                    labels,
                    num_classes=self.hparams["output_size"],
                    class_reduction="macro",
                )
                .detach()
                .cpu()
                .item(),
                prog_bar=True,
            )
            self.log(
                "val_acc",
                accuracy(torch.argmax(logits, dim=1), labels)
                .detach()
                .cpu()
                .item(),
                prog_bar=True,
            )
        self.log(
            "val_loss",
            self.loss(
                logits,
                labels.type(
                    logits.dtype if self.hparams["multilabel"] else labels.dtype
                ),
            )
            .detach()
            .cpu()
            .item(),
            prog_bar=True,
        )
Exemple #5
0
 def cal_auc(self):
     auc_list = []
     for id, (pred, label) in enumerate(zip(self.predict, self.label)):
         try:
             auc = auroc(torch.Tensor(pred), torch.Tensor(label))
         except:
             auc = torch.tensor(0)
         auc_list.append(auc)
     return auc_list
Exemple #6
0
def test(model, loader):
    model.eval()
    pred = []
    y = []
    with torch.no_grad():
        for data in loader:
            data = data.to(device)
            y.append(data.y.detach().cpu())
            pred.append(model(data).detach().cpu())
    return auroc(torch.cat(pred), torch.cat(y))
Exemple #7
0
 def prediction_metrics(X_bin, X_cts, mean_cts, proba_bin):
     mse = 0.
     auc = 0.
     if X_cts is not None:
         which_cts = ~X_cts.isnan()
         mse = mean_squared_error(mean_cts[which_cts],
                                  X_cts[which_cts]).item()
     if X_bin is not None:
         which_bin = ~X_bin.isnan()
         auc = auroc(proba_bin[which_bin], X_bin[which_bin]).item()
     return auc, mse
    def training_epoch_end(self, outputs: List):
        train_loss = torch.cat(
            [out["loss"].unsqueeze(dim=0) for out in outputs]).mean()
        y_pred = torch.cat([out["y_pred"] for out in outputs], dim=0)
        y_true = torch.cat([out["y_true"] for out in outputs], dim=0)
        train_auc = auroc(y_pred, y_true)

        logs = {"train_loss": train_loss, "train_auc": train_auc}
        return {
            "log": logs,
        }
Exemple #9
0
    def validation_step(self, batch: MINDBatch, batch_idx):
        loss, y_score = self.model.forward(batch)
        y_true = batch['targets']
        n_processed = batch['batch_cand'].max() + 1

        for n in range(n_processed):
            mask = batch['batch_cand'] == n
            s, t = y_score[mask], y_true[mask]
            s = torch.softmax(s, dim=0)
            self.am.val_roc.update(auroc(s, t))
            self.am.val_ndcg10.update(ndcg_score(s, t))
        self.am.val_loss.update(loss, n_processed)
Exemple #10
0
 def training_step(self, batch, batch_idx):
     """训练过程"""
     input, label = batch
     y_hat = self.forward(input)
     loss = self.loss_fn(y_hat, label)
     acc = self.compute_metric(y_hat, label)
     auc = auroc(y_hat.view(-1), label.view(-1))
     tensorboard_logs = {
         'train_loss': loss,
         'train_acc': acc,
         "train_auc": auc,
         "lr": self.trainer.optimizers[0].param_groups[0]['lr']
     }
     return {'loss': loss, 'log': tensorboard_logs}
    def validation_epoch_end(self, outputs: List):
        val_loss = torch.cat(
            [out["val_loss"].unsqueeze(dim=0) for out in outputs]).mean()
        y_pred = torch.cat([out["y_pred"] for out in outputs], dim=0)
        y_true = torch.cat([out["y_true"] for out in outputs], dim=0)
        val_auc = auroc(y_pred, y_true)

        logs = {"val_loss": val_loss, "val_auc": val_auc}
        print(
            f"Epoch {self.current_epoch} // val loss: {val_loss:.4f}, val auc: {val_auc:.4f}, pos: {y_true.sum()}, neg: {len(y_true) - y_true.sum()}"
        )
        return {
            "log": logs,
        }
Exemple #12
0
 def calc_categorical_metrics(self, categorical_feature_idcs, thr_idx):
     # Define helper functions:
     select_func = lambda x: x[:, categorical_feature_idcs].flatten()
     # Get relevant timeframe for targets and preds:
     thresholds_flat = torch.cat([self.categorical_thresholds[thr_idx][:len(target)] for target in self.all_targets]).cpu()
     targets_flat = torch.cat([select_func(target) for target in self.all_targets]).cpu()
     preds_flat = torch.cat([torch.sigmoid(select_func(pred)) for pred in self.all_preds]).cpu()
     pred_tar_thr = [[_p, _t, _thr] for _p, _t, _thr in zip(preds_flat, targets_flat, thresholds_flat) if not torch.isnan(_t)]
     preds_flat = torch.stack([_d[0] for _d in pred_tar_thr])
     targets_flat = torch.stack([_d[1] for _d in pred_tar_thr]).int()
     thresholds_flat = torch.stack([_d[2] for _d in pred_tar_thr])
     preds_bool = (preds_flat > thresholds_flat).float()
     auc = auroc(preds_flat, targets_flat)
     acc = accuracy(preds_bool.int(), targets_flat)
     return auc, acc
    def validation_epoch_end(self, outputs: List):
        val_loss = torch.cat(
            [out["val_loss"].unsqueeze(dim=0) for out in outputs]).mean()
        y_pred = torch.cat([out["y_pred"] for out in outputs], dim=0)
        y_true = torch.cat([out["y_true"] for out in outputs], dim=0)
        val_auc = auroc(y_pred, y_true)

        logs = {
            "val_loss": val_loss,
            "val_auc": val_auc,
            "model_name": self.model_name,
            "fold": self.fold,
            "sz": self.sz,
        }
        return {
            "log": logs,
        }
Exemple #14
0
    def calculate_metrics(self, pred, target, prvs_metrics=None):
        #          output.cpu().detach().numpy()
        metrics = {
            'accuracy': pl_metrics.accuracy(torch.round(pred),
                                            torch.round(target)),
            'aucroc': pl_metrics.auroc(pred, target),
            'f1_score': pl_metrics.f1_score(torch.round(pred),
                                            torch.round(target))
        }

        if prvs_metrics is not None:
            for key in metrics.keys():
                metrics[key] += prvs_metrics[key]
            metrics['counter'] = prvs_metrics['counter'] + 1
        else:
            metrics['counter'] = 1

        return metrics
Exemple #15
0
 def prediction_metrics(self, X_bin, X_cts, mean_cts, proba_bin):
     n_sample = self.model.n_samples
     mse = 0.
     auc = 0.
     if X_cts is not None:
         X_cts = X_cts.unsqueeze(1)
         which_cts = ~X_cts.isnan()
         for i in range(n_sample):
             mean_cts_tmp = mean_cts[:, [i], :]
             mse += mean_squared_error(mean_cts_tmp[which_cts], X_cts[which_cts]).item()
         mse = mse / n_sample
     if X_bin is not None:
         X_bin = X_bin.unsqueeze(1)
         which_bin = ~X_bin.isnan()
         for i in range(n_sample):
             proba_bin_tmp = proba_bin[:, [i], :]
             auc += auroc(proba_bin_tmp[which_bin], X_bin[which_bin]).item()
         auc = auc / n_sample
     return auc, mse
    def validation_epoch_end(self, outputs: List):
        val_loss = torch.cat(
            [out["val_loss"].unsqueeze(dim=0) for out in outputs]).mean()
        y_pred = torch.cat([out["y_pred"] for out in outputs], dim=0)
        y_true = torch.cat([out["y_true"] for out in outputs], dim=0)

        try:
            val_auc = auroc(y_pred, y_true)
        except ValueError as err:
            print(f"ValueError: {err}")
            val_auc = torch.Tensor([0])

        logs = {
            "val_loss": val_loss,
            "val_auc": val_auc,
            "model_name": self.model_name,
            "fold": self.fold,
            "sz": self.sz,
        }
        return {
            "log": logs,
        }
Exemple #17
0
def eval_epoch(args, logger, g, dataloader, encoder, decoder, msg2mail,
               loss_fcn, device, num_samples):

    m_ap, m_auc, m_acc = [[], [], []] if 'LP' in args.tasks else [0, 0, 0]

    labels_all = torch.zeros((num_samples)).long()
    logits_all = torch.zeros((num_samples))

    attn_weight_all = torch.zeros((num_samples, args.n_mail))

    m_loss = []
    m_infer_time = []
    with torch.no_grad():
        encoder.eval()
        decoder.eval()
        loss = torch.tensor(0)
        for batch_idx, (input_nodes, pos_graph, neg_graph, blocks, frontier,
                        current_ts) in enumerate(dataloader):
            n_sample = pos_graph.num_edges()
            start_idx = batch_idx * n_sample
            end_idx = min(num_samples, start_idx + n_sample)

            pos_graph = pos_graph.to(device)
            neg_graph = neg_graph.to(device) if neg_graph is not None else None
            if not args.no_time or not args.no_pos:
                current_ts, pos_ts, num_pos_nodes = get_current_ts(
                    args, pos_graph, neg_graph)
                pos_graph.ndata['ts'] = current_ts
            else:
                current_ts, pos_ts, num_pos_nodes = None, None, None

            _ = dgl.add_reverse_edges(
                neg_graph) if neg_graph is not None else None

            start = time.time()
            emb, attn_weight = encoder(dgl.add_reverse_edges(pos_graph), _,
                                       num_pos_nodes)
            #attn_weight_all[start_idx:end_idx] = attn_weight[:n_sample]

            logits, labels = decoder(emb, pos_graph, neg_graph)
            end = time.time() - start
            m_infer_time.append(end)

            loss = loss_fcn(logits, labels)
            m_loss.append(loss.item())
            mail = msg2mail.gen_mail(args, emb, input_nodes, pos_graph,
                                     frontier, 'val')
            if not args.no_time:
                g.ndata['last_update'][pos_graph.ndata[dgl.NID]
                                       [:num_pos_nodes]] = pos_ts.to('cpu')
            g.ndata['feat'][pos_graph.ndata[dgl.NID]] = emb.to('cpu')
            g.ndata['mail'][input_nodes] = mail

            labels = labels.long()
            logits = logits.sigmoid()
            if 'LP' in args.tasks:
                pred = logits > 0.5
                m_ap.append(average_precision(logits, labels).cpu().numpy())
                m_auc.append(auroc(logits, labels).cpu().numpy())
                m_acc.append(accuracy(pred, labels).cpu().numpy())
            else:
                labels_all[start_idx:end_idx] = labels
                logits_all[start_idx:end_idx] = logits

    if 'LP' in args.tasks:
        ap, auc, acc = np.mean(m_ap), np.mean(m_auc), np.mean(m_acc)
    else:
        pred_all = logits_all > 0.5
        ap = average_precision(logits_all, labels_all).cpu().item()
        auc = auroc(logits_all, labels_all).cpu().item()
        acc = accuracy(pred_all, labels_all).cpu().item()

        fprs, tprs, thresholds = roc(logits_all, labels_all)
        fpr_l, tpr_l, thres_l = get_TPR_FPR_metrics(fprs, tprs, thresholds)
        print_tp_fp_thres(args.tasks, logger, fpr_l, tpr_l, thres_l)

    print('总推理时间', np.sum(m_infer_time))
    logger.info(attn_weight_all.mean(0))
    encoder.train()
    decoder.train()
    return ap, auc, acc, np.mean(m_loss)
Exemple #18
0
 def validation_epoch_end(self, outputs):
     avg_loss = torch.stack([x['valid_loss'] for x in outputs]).mean()
     yb = torch.cat([x['yb'] for x in outputs], 0)
     predictions = torch.cat[x['yb'] for x in outputs], 0)
     score = auroc(predictions, yb)
     return {'val_loss': avg_loss, 'score': score}
Exemple #19
0
 def compute(self) -> torch.Tensor:
     preds, targets = self._get_preds_and_targets()
     if torch.unique(targets).numel() == 1:
         return torch.tensor(np.nan)
     return auroc(preds, targets)
Exemple #20
0
def eval_epoch(args, g, dataloader, attn, decoder, bandi_sampler, loss_fcn,
               device, num_samples):
    m_ap, m_auc, m_acc = [[], [], []] if 'LP' in args.tasks else [0, 0, 0]
    labels_all = torch.zeros((num_samples))
    logits_all = torch.zeros((num_samples))
    m_loss = []
    m_infer_time = []
    with torch.no_grad():
        attn.eval()
        decoder.eval()
        #loss = torch.tensor(0)
        for batch_idx, (input_nodes, pos_graph, neg_graph, blocks, frontier,
                        current_ts) in enumerate(dataloader):

            pos_graph = pos_graph.to(device)
            neg_graph = neg_graph.to(device)
            n_sample = pos_graph.num_edges()
            start_idx = batch_idx * n_sample
            end_idx = min(num_samples, start_idx + n_sample)
            for j in range(args.n_layer):
                blocks[j] = blocks[j].to(device)

            current_ts, pos_ts, num_pos_nodes = get_current_ts(
                pos_graph, neg_graph)
            pos_graph.ndata['ts'] = current_ts

            start = time.time()
            blocks, att_map = attn.forward(blocks)
            emb = blocks[-1].dstdata['h']

            blocks = bandi_sampler.weight_update(blocks, att_map)
            blocks = bandi_sampler.prob_update(blocks)

            logits, labels = decoder(emb, pos_graph, neg_graph)
            end = time.time() - start
            m_infer_time.append(end)

            loss = loss_fcn(logits, labels)
            m_loss.append(loss.item())

            g.ndata['last_update'][pos_graph.ndata[dgl.NID]
                                   [:num_pos_nodes]] = pos_ts.to('cpu')
            g.edata['q_ij'][
                blocks[-1].edata['eid']] = blocks[-1].edata['q_ij'].cpu()
            g.edata['weight'][
                blocks[-1].edata['eid']] = blocks[-1].edata['weight'].cpu()
            #g.ndata['h'][pos_graph.ndata[dgl.NID]] = emb.to('cpu')
            if 'LP' in args.tasks:
                pred = logits.sigmoid() > 0.5
                m_ap.append(average_precision(logits, labels).cpu().numpy())
                m_auc.append(auroc(logits, labels).cpu().numpy())
                m_acc.append(accuracy(pred, labels).cpu().numpy())
            else:
                labels_all[start_idx:end_idx] = labels
                logits_all[start_idx:end_idx] = logits
    if 'LP' in args.tasks:
        ap, auc, acc = np.mean(m_ap), np.mean(m_auc), np.mean(m_acc)
    else:
        pred_all = logits_all.sigmoid() > 0.5
        ap = average_precision(logits_all, labels_all).cpu().item()
        auc = auroc(logits_all, labels_all).cpu().item()
        acc = accuracy(pred_all, labels_all).cpu().item()

    attn.train()
    decoder.train()
    return ap, auc, acc, np.mean(m_loss), np.sum(m_infer_time)
Exemple #21
0
 def compute(self):
     return auroc(self.pred, self.true, pos_label=self.pos_label)
Exemple #22
0
    def compute(self):
        if self.binary_target.sum() == self.binary_target.numel():
            return torch.tensor(float("nan"), device=self.pred_score.device)

        return auroc(self.pred_score, self.binary_target)
Exemple #23
0
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    path = osp.join(osp.dirname(osp.realpath(__file__)), dataset_directory_path, args.fingerprint_type)
    dataset = TUDataset(path, name=args.dataset, pre_transform=PreTransform(args))
    perm = torch.randperm(len(dataset), dtype=torch.long)
    dataset = dataset[perm]
    tenpercent = int(len(dataset) * 0.1)
    test_dataset = dataset[:2 * tenpercent]
    train_dataset = dataset[int(2 * tenpercent):]
    train_loader = DataLoader(train_dataset)
    test_loader = DataLoader(test_dataset)

    X_train = []
    X_test = []
    Y_train = []
    Y_test = []
    for data in train_loader:
        X_train.append(data.emb.tolist())
        Y_train.append(data.y.tolist())
    for data in test_loader:
        X_test.append(data.emb.tolist())
        Y_test.append(data.y.tolist())
    X_train, X_test, Y_train, Y_test = np.array(X_train).reshape(-1, embedding_size[args.fingerprint_type]), np.array(
        X_test).reshape(-1, embedding_size[args.fingerprint_type]), np.array(Y_train).reshape(-1), np.array(
        Y_test).reshape(-1)
    clf = LogisticRegression(random_state=seed).fit(X_train, Y_train)
    Y_pred = clf.decision_function(X_test)
    Y_pred = torch.from_numpy(Y_pred)
    Y_test = torch.from_numpy(Y_test)
    print(auroc(Y_pred, Y_test))