Exemplo n.º 1
0
def test_wrong_params(average, mdmc_average, num_classes, inputs, ignore_index,
                      top_k, threshold):
    preds, target = inputs.preds, inputs.target

    with pytest.raises(ValueError):
        acc = Accuracy(
            average=average,
            mdmc_average=mdmc_average,
            num_classes=num_classes,
            ignore_index=ignore_index,
            threshold=threshold,
            top_k=top_k,
        )
        acc(preds[0], target[0])
        acc.compute()

    with pytest.raises(ValueError):
        accuracy(
            preds[0],
            target[0],
            average=average,
            mdmc_average=mdmc_average,
            num_classes=num_classes,
            ignore_index=ignore_index,
            threshold=threshold,
            top_k=top_k,
        )
Exemplo n.º 2
0
def test_topk_accuracy_wrong_input_types(preds, target):
    topk = Accuracy(top_k=1)

    with pytest.raises(ValueError):
        topk(preds[0], target[0])

    with pytest.raises(ValueError):
        accuracy(preds[0], target[0], top_k=1)
Exemplo n.º 3
0
def test_wrong_params(top_k, threshold):
    preds, target = _input_mcls_prob.preds, _input_mcls_prob.target

    with pytest.raises(ValueError):
        acc = Accuracy(threshold=threshold, top_k=top_k)
        acc(preds, target)
        acc.compute()

    with pytest.raises(ValueError):
        accuracy(preds, target, threshold=threshold, top_k=top_k)
Exemplo n.º 4
0
def main(args):
    g, num_rels, num_classes, labels, train_idx, test_idx, target_idx = load_data(
        args.dataset, get_norm=True)

    num_nodes = g.num_nodes()

    # Since the nodes are featureless, learn node embeddings from scratch
    # This requires passing the node IDs to the model.
    feats = th.arange(num_nodes)

    model = RGCN(num_nodes,
                 args.n_hidden,
                 num_classes,
                 num_rels,
                 num_bases=args.n_bases)

    if args.gpu >= 0 and th.cuda.is_available():
        device = th.device(args.gpu)
    else:
        device = th.device('cpu')
    feats = feats.to(device)
    labels = labels.to(device)
    model = model.to(device)
    g = g.to(device)

    optimizer = th.optim.Adam(model.parameters(),
                              lr=1e-2,
                              weight_decay=args.l2norm)

    model.train()
    for epoch in range(50):
        logits = model(g, feats)
        logits = logits[target_idx]
        loss = F.cross_entropy(logits[train_idx], labels[train_idx])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_acc = accuracy(logits[train_idx].argmax(dim=1),
                             labels[train_idx]).item()
        print("Epoch {:05d} | Train Accuracy: {:.4f} | Train Loss: {:.4f}".
              format(epoch, train_acc, loss.item()))
    print()

    model.eval()
    with th.no_grad():
        logits = model(g, feats)
    logits = logits[target_idx]
    test_acc = accuracy(logits[test_idx].argmax(dim=1),
                        labels[test_idx]).item()
    print("Test Accuracy: {:.4f}".format(test_acc))
 def test_step(self, batch, batch_idx):
     inputs, targets = batch
     outputs = self.forward(inputs)
     test_accuracy = accuracy(outputs, targets)
     loss = self.loss(outputs, targets)
     self.log('test_accuracy', test_accuracy)
     return {"test_loss": loss, "test_accuracy": test_accuracy}
Exemplo n.º 6
0
 def _loss(self, batch, batch_idx):
     x, y = batch
     body = self.body(x)
     y_pred = self.policy_head(body)
     loss = binary_cross_entropy_with_logits(body, y)
     ba = accuracy((y_pred > 0.5).float(), y.bool())
     return loss, ba
Exemplo n.º 7
0
def layerwise_infer(device, graph, nid, model, batch_size):
    model.eval()
    with torch.no_grad():
        pred = model.inference(graph, device, batch_size).to(device)
        pred = pred[nid]
        label = graph.ndata['label'][nid]
        return MF.accuracy(pred, label)
Exemplo n.º 8
0
    def on_validation_batch_end(
        self,
        trainer: Trainer,
        pl_module: LightningModule,
        outputs: Sequence,
        batch: Sequence,
        batch_idx: int,
        dataloader_idx: int,
    ) -> None:
        x, y = self.to_device(batch, pl_module.device)

        with torch.no_grad():
            representations = self.get_representations(pl_module, x)

        representations = representations.detach()

        # forward pass
        mlp_preds = pl_module.non_linear_evaluator(
            representations)  # type: ignore[operator]
        mlp_loss = F.cross_entropy(mlp_preds, y)

        # log metrics
        val_acc = accuracy(mlp_preds, y)
        pl_module.log('online_val_acc',
                      val_acc,
                      on_step=False,
                      on_epoch=True,
                      sync_dist=True)
        pl_module.log('online_val_loss',
                      mlp_loss,
                      on_step=False,
                      on_epoch=True,
                      sync_dist=True)
 def validation_step(self, batch: Tuple[Tensor, Tensor],
                     batch_idx: int) -> Dict[str, Tensor]:
     x, y = batch
     x = x.view(x.size(0), -1)
     y_hat = self.linear(x)
     acc = accuracy(F.softmax(y_hat, -1), y)
     return {'val_loss': F.cross_entropy(y_hat, y), 'acc': acc}
Exemplo n.º 10
0
 def test_step(self, batch: Tuple[Tensor, Tensor],
               batch_idx: int) -> Dict[str, Tensor]:
     x, y = batch
     x = x.view(x.size(0), -1)
     y_hat = self.linear(x)
     acc = accuracy(F.softmax(y_hat, -1), y)
     return {"test_loss": F.cross_entropy(y_hat, y), "acc": acc}
    def on_train_batch_end(
        self,
        trainer: Trainer,
        pl_module: LightningModule,
        outputs: Sequence,
        batch: Sequence,
        batch_idx: int,
        dataloader_idx: int,
    ) -> None:
        x, y = self.to_device(batch, pl_module.device)

        with torch.no_grad():
            representations = self.get_representations(pl_module, x)

        representations = representations.detach()

        # forward pass
        mlp_logits = pl_module.non_linear_evaluator(representations)  # type: ignore[operator]
        mlp_loss = F.cross_entropy(mlp_logits, y)

        # update finetune weights
        mlp_loss.backward()
        self.optimizer.step()
        self.optimizer.zero_grad()

        # log metrics
        train_acc = accuracy(mlp_logits.softmax(-1), y)
        pl_module.log('online_train_acc', train_acc, on_step=True, on_epoch=False)
        pl_module.log('online_train_loss', mlp_loss, on_step=True, on_epoch=False)
Exemplo n.º 12
0
    def test_step(self, batch_data, batch_index):
        x, y = batch_data
        logits = self(x)
        criterion = nn.CrossEntropyLoss()
        probs = torch.softmax(logits, dim=1)

        # validation metrics
        acc = accuracy(torch.argmax(probs, dim=1), torch.argmax(y, dim=1))
        loss = criterion(logits, torch.argmax(y, dim=1))
        f_score = f1(torch.argmax(probs, dim=1),
                     torch.argmax(y, dim=1),
                     average='weighted',
                     num_classes=4)
        self.log('test/f1', f_score, prog_bar=True)
        self.log('test/loss', loss, prog_bar=True)
        self.log('test/accuracy', acc, prog_bar=True)
        predictions = torch.argmax(probs, dim=1)
        targets = torch.argmax(y, dim=1)
        return {
            "test_loss": loss,
            "test_accuracy": acc,
            "f_score": f_score,
            "predictions": predictions,
            "targets": targets
        }
Exemplo n.º 13
0
def train(rank, world_size, graph, num_classes, split_idx):
    torch.cuda.set_device(rank)
    dist.init_process_group('nccl',
                            'tcp://127.0.0.1:12347',
                            world_size=world_size,
                            rank=rank)

    model = SAGE(graph.ndata['feat'].shape[1], 256, num_classes).cuda()
    model = nn.parallel.DistributedDataParallel(model,
                                                device_ids=[rank],
                                                output_device=rank)
    opt = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4)

    train_idx, valid_idx, test_idx = split_idx['train'], split_idx[
        'valid'], split_idx['test']
    if USE_WRAPPER:
        import dglnew
        graph = dglnew.graph.wrapper.DGLGraphStorage(graph)

    sampler = dgl.dataloading.NeighborSampler([5, 5, 5],
                                              output_device='cpu',
                                              prefetch_node_feats=['feat'],
                                              prefetch_labels=['label'])
    dataloader = dgl.dataloading.NodeDataLoader(
        graph,
        train_idx,
        sampler,
        device='cuda',
        batch_size=1000,
        shuffle=True,
        drop_last=False,
        pin_memory=True,
        num_workers=4,
        persistent_workers=True,
        use_ddp=True,
        use_prefetch_thread=True)  # TBD: could probably remove this argument

    durations = []
    for _ in range(10):
        t0 = time.time()
        for it, (input_nodes, output_nodes, blocks) in enumerate(dataloader):
            x = blocks[0].srcdata['feat']
            y = blocks[-1].dstdata['label'][:, 0]
            y_hat = model(blocks, x)
            loss = F.cross_entropy(y_hat, y)
            opt.zero_grad()
            loss.backward()
            opt.step()
            if it % 20 == 0:
                acc = MF.accuracy(y_hat, y)
                mem = torch.cuda.max_memory_allocated() / 1000000
                print('Loss', loss.item(), 'Acc', acc.item(), 'GPU Mem', mem,
                      'MB')
        tt = time.time()
        if rank == 0:
            print(tt - t0)
            durations.append(tt - t0)
    if rank == 0:
        print(np.mean(durations[4:]), np.std(durations[4:]))
Exemplo n.º 14
0
    def update(
        self,
        predicted_y: Tensor,
        true_y: Tensor,
        task_labels: Union[float, Tensor],
    ) -> None:
        """
        Update the running top-k accuracy given the true and predicted labels.
        Parameter `task_labels` is used to decide how to update the inner
        dictionary: if Float, only the dictionary value related to that task
        is updated. If Tensor, all the dictionary elements belonging to the
        task labels will be updated.

        :param predicted_y: The model prediction. Both labels and logit vectors
            are supported.
        :param true_y: The ground truth. Both labels and one-hot vectors
            are supported.
        :param task_labels: the int task label associated to the current
            experience or the task labels vector showing the task label
            for each pattern.

        :return: None.
        """
        if len(true_y) != len(predicted_y):
            raise ValueError(
                "Size mismatch for true_y and predicted_y tensors")

        if isinstance(task_labels, Tensor) and len(task_labels) != len(true_y):
            raise ValueError(
                "Size mismatch for true_y and task_labels tensors")

        true_y = torch.as_tensor(true_y)
        predicted_y = torch.as_tensor(predicted_y)

        if isinstance(task_labels, int):
            total_patterns = len(true_y)
            self._topk_acc_dict[task_labels].update(
                accuracy(predicted_y, true_y, top_k=self.top_k),
                total_patterns)
        elif isinstance(task_labels, Tensor):
            for pred, true, t in zip(predicted_y, true_y, task_labels):
                self._topk_acc_dict[t.item()].update(
                    accuracy(pred, true, top_k=self.top_k), 1)
        else:
            raise ValueError(f"Task label type: {type(task_labels)}, "
                             f"expected int/float or Tensor")
Exemplo n.º 15
0
 def test_step(self, batch, batch_idx):
     out = self.forward(batch)
     loss = self.criterion(out.float(), batch.label)
     preds = torch.argmax(F.softmax(out, 1), 1)
     self.test_correct += torch.sum(preds == batch.label).item()
     self.log("test_loss", loss)
     self.log("test_acc_step", accuracy(preds, batch.label), prog_bar=True)
     return {'test_loss': loss}
 def training_step(self, batch, batch_idx):
     inputs, targets = batch
     outputs = self(inputs)
     train_accuracy = accuracy(outputs, targets)
     loss = self.loss(outputs, targets)
     self.log('train_accuracy', train_accuracy, prog_bar=True)
     self.log('train_loss', loss)
     return {"loss": loss, "train_accuracy": train_accuracy}
 def validation_epoch_end(self, outputs):
     val_loss = sum([x["val_loss"] for x in outputs])
     pred = torch.cat([x["pred"] for x in outputs])
     true = torch.cat([x["true"] for x in outputs])
     f_score = metrics.f1(pred, true, num_classes=2)
     accuracy = metrics.accuracy(pred, true)
     out = {"val_loss": val_loss, "val_f_score": f_score, "val_accuracy": accuracy}
     self.log_dict({"val_loss": val_loss, "val_f_score": f_score, "val_accuracy": accuracy})
     return {**out, "log": out}
Exemplo n.º 18
0
    def _common_step(self, batch):
        input_tensor, target = batch
        output = self(input_tensor)
        _, pred = torch.max(output, axis=1)

        loss = F.cross_entropy(output, target)
        acc = M.accuracy(pred, target)

        return loss, acc
Exemplo n.º 19
0
    def score_epoch_end(self, results: list[dict[str, torch.Tensor]]):
        """
        Score and evaluate the given dataset.
        """
        items = {
            "B-M": ("pm", "ym"),
            "BIRAD": ("pb", "yb"),
            "discrim": ("cy", "cy-GT"),
        }

        for res, caption in zip(results, self.score_caption):
            res = deep_collate(res)

            if (c := ('fi' in res and 'fi') or ('ft' in res and 'ft')):
                self.logger.experiment.add_embedding(
                    res[c],
                    metadata=res["ym"].tolist(),
                    global_step=self.current_epoch,
                    tag=caption,
                )

            if "dice" in res:
                self.log(f"segment/dice/{caption}",
                         res["dice"].mean(),
                         logger=True)
            if 'confidence' in res:
                self.log(f"segment/confidence/{caption}",
                         res["confidence"].mean(),
                         logger=True)

            for k, (p, y) in items.items():
                if p not in res or y not in res: continue
                p, y = res[p], res[y]
                if all(i is None for i in y): continue

                if isinstance(y, list):
                    p = torch.stack(
                        [p[i] for i, v in enumerate(y) if v is not None])
                    y = torch.tensor(
                        [v for i, v in enumerate(y) if v is not None],
                        dtype=torch.long,
                        device=p.device)

                assert len(p) == len(y)
                err = 1 - accuracy(p, y)
                self.log(f"err/{k}/{caption}", err, logger=True)

                if p.dim() == 2 and p.size(1) <= 2:
                    p = p[:, -1]
                if p.dim() == 1:
                    self.logger.experiment.add_pr_curve(
                        f"{k}/{caption}", y, p, self.current_epoch)
                    self.logger.experiment.add_histogram(
                        f"distribution/{k}/{caption}", p, self.current_epoch)
                else:
                    self.logger.experiment.add_histogram(
                        f"distribution/{k}/{caption}", p, self.current_epoch)
Exemplo n.º 20
0
def main(args):
    g, num_rels, num_classes, labels, train_idx, test_idx, target_idx = load_data(
        args.dataset, get_norm=True)

    model = RGCN(g.num_nodes(),
                 args.n_hidden,
                 num_classes,
                 num_rels,
                 num_bases=args.n_bases)

    if args.gpu >= 0 and th.cuda.is_available():
        device = th.device(args.gpu)
    else:
        device = th.device('cpu')
    labels = labels.to(device)
    model = model.to(device)
    g = g.int().to(device)

    optimizer = th.optim.Adam(model.parameters(),
                              lr=1e-2,
                              weight_decay=args.wd)

    model.train()
    for epoch in range(100):
        logits = model(g)
        logits = logits[target_idx]
        loss = F.cross_entropy(logits[train_idx], labels[train_idx])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_acc = accuracy(logits[train_idx].argmax(dim=1),
                             labels[train_idx]).item()
        print("Epoch {:05d} | Train Accuracy: {:.4f} | Train Loss: {:.4f}".
              format(epoch, train_acc, loss.item()))
    print()

    model.eval()
    with th.no_grad():
        logits = model(g)
    logits = logits[target_idx]
    test_acc = accuracy(logits[test_idx].argmax(dim=1),
                        labels[test_idx]).item()
    print("Test Accuracy: {:.4f}".format(test_acc))
Exemplo n.º 21
0
    def validation_step(self, batch, _batch_idx):
        x, y, _idx = batch
        logits = self(x)
        loss = F.nll_loss(logits, y)
        prediction = torch.argmax(logits, dim=1)
        acc = accuracy(prediction, y)

        self.log('val_loss', loss, prog_bar=True)
        self.log('val_acc', acc, prog_bar=True)
        return loss
Exemplo n.º 22
0
def main(args):
    g, num_rels, num_classes, labels, train_idx, test_idx, target_idx, inv_target = load_data(
        args.dataset, inv_target=True)

    if args.gpu >= 0 and th.cuda.is_available():
        device = th.device(args.gpu)
    else:
        device = th.device('cpu')

    train_loader, val_loader, test_loader = init_dataloaders(
        args, g, train_idx, test_idx, target_idx, args.gpu)

    model = RGCN(g.num_nodes(),
                 args.n_hidden,
                 num_classes,
                 num_rels,
                 num_bases=args.n_bases,
                 dropout=args.dropout,
                 self_loop=args.use_self_loop,
                 ns_mode=True)
    labels = labels.to(device)
    model = model.to(device)

    optimizer = th.optim.Adam(model.parameters(),
                              lr=1e-2,
                              weight_decay=args.wd)

    for epoch in range(args.n_epochs):
        train_acc, loss = train(model, train_loader, inv_target, labels,
                                optimizer)
        print(
            "Epoch {:05d}/{:05d} | Train Accuracy: {:.4f} | Train Loss: {:.4f}"
            .format(epoch, args.n_epochs, train_acc, loss))

        val_logits, val_seeds = evaluate(model, val_loader, inv_target)
        val_acc = accuracy(val_logits.argmax(dim=1),
                           labels[val_seeds].cpu()).item()
        print("Validation Accuracy: {:.4f}".format(val_acc))

    test_logits, test_seeds = evaluate(model, test_loader, inv_target)
    test_acc = accuracy(test_logits.argmax(dim=1),
                        labels[test_seeds].cpu()).item()
    print("Final Test Accuracy: {:.4f}".format(test_acc))
Exemplo n.º 23
0
    def evaluate(self, batch, stage=None):
        outputs = self.forward(**batch)
        logits = outputs.logits
        loss = outputs.loss
        preds = torch.argmax(logits, dim=-1)
        acc = accuracy(preds, batch["labels"])

        if stage:
            self.log(f"{stage}_loss", loss, prog_bar=True)
            self.log(f"{stage}_acc", acc, prog_bar=True)
Exemplo n.º 24
0
def evaluate(model, graph, dataloader):
    model.eval()
    ys = []
    y_hats = []
    for it, (input_nodes, output_nodes, blocks) in enumerate(dataloader):
        with torch.no_grad():
            x = blocks[0].srcdata['feat']
            ys.append(blocks[-1].dstdata['label'])
            y_hats.append(model(blocks, x))
    return MF.accuracy(torch.cat(y_hats), torch.cat(ys))
Exemplo n.º 25
0
def main(args):
    g, num_rels, num_classes, labels, train_idx, test_idx, target_idx, inv_target = load_data(
        args.dataset, inv_target=True)

    if args.gpu >= 0 and th.cuda.is_available():
        device = th.device(args.gpu)
    else:
        device = th.device('cpu')

    train_loader, val_loader, test_loader = init_dataloaders(
        args, g, train_idx, test_idx, target_idx, args.gpu)
    embed_layer, model = init_models(args, device, g.num_nodes(), num_classes,
                                     num_rels)

    labels = labels.to(device)
    model = model.to(device)

    emb_optimizer = th.optim.SparseAdam(embed_layer.parameters(),
                                        lr=args.sparse_lr)
    optimizer = th.optim.Adam(model.parameters(),
                              lr=1e-2,
                              weight_decay=args.l2norm)

    for epoch in range(args.n_epochs):
        train_acc, loss = train(model, embed_layer, train_loader, inv_target,
                                labels, emb_optimizer, optimizer)
        print(
            "Epoch {:05d}/{:05d} | Train Accuracy: {:.4f} | Train Loss: {:.4f}"
            .format(epoch, args.n_epochs, train_acc, loss))

        val_logits, val_seeds = evaluate(model, embed_layer, val_loader,
                                         inv_target)
        val_acc = accuracy(val_logits.argmax(dim=1),
                           labels[val_seeds].cpu()).item()
        print("Validation Accuracy: {:.4f}".format(val_acc))

    test_logits, test_seeds = evaluate(model, embed_layer, test_loader,
                                       inv_target)
    test_acc = accuracy(test_logits.argmax(dim=1),
                        labels[test_seeds].cpu()).item()
    print("Final Test Accuracy: {:.4f}".format(test_acc))
Exemplo n.º 26
0
def collect_eval(n_gpus, queue, labels):
    eval_logits = []
    eval_seeds = []
    for _ in range(n_gpus):
        eval_l, eval_s = queue.get()
        eval_logits.append(eval_l)
        eval_seeds.append(eval_s)
    eval_logits = th.cat(eval_logits)
    eval_seeds = th.cat(eval_seeds)
    eval_acc = accuracy(eval_logits.argmax(dim=1), labels[eval_seeds].cpu()).item()

    return eval_acc
Exemplo n.º 27
0
    def evaluate(self, batch, stage=None):
        x, y = batch
        x = x.permute(0, 2, 3, 1)
        logits = self(x)
        loss = F.cross_entropy(logits, y)
        preds = torch.argmax(logits, dim=1)
        acc = accuracy(preds, y)

        if stage:
            self.log(f'{stage}_loss', loss, prog_bar=True)
            self.log(f'{stage}_acc', acc, prog_bar=True)
        return {'loss': loss, 'acc': acc, 'preds': preds, 'y': y}
Exemplo n.º 28
0
    def _evaluate(self, batch, batch_idx, stage=None):
        x, y = batch
        out = self.forward(x)
        logits = F.log_softmax(out, dim=-1)
        loss = F.nll_loss(logits, y)
        preds = torch.argmax(logits, dim=-1)
        acc = accuracy(preds, y)

        if stage:
            self.log(f'{stage}_loss', loss, prog_bar=True)
            self.log(f'{stage}_acc', acc, prog_bar=True)

        return loss, acc
Exemplo n.º 29
0
def test_ignore_index(preds, target, ignore_index, exp_result,
                      subset_accuracy):
    ignoreindex = Accuracy(ignore_index=ignore_index,
                           subset_accuracy=subset_accuracy)

    for batch in range(preds.shape[0]):
        ignoreindex(preds[batch], target[batch])

    assert ignoreindex.compute() == exp_result

    assert accuracy(preds,
                    target,
                    ignore_index=ignore_index,
                    subset_accuracy=subset_accuracy) == exp_result
Exemplo n.º 30
0
    def test_step(self, test_batch, batch_idx):
        """
        Performs test and computes the accuracy of the model

        :param test_batch: Batch data
        :param batch_idx: Batch indices

        :return: output - Testing accuracy
        """
        x, y = test_batch
        output = self.forward(x)
        _, y_hat = torch.max(output, dim=1)
        test_acc = accuracy(y_hat.cpu(), y.cpu())
        return {"test_acc": test_acc}