def test_compute(): acc = TopKCategoricalAccuracy(2) y_pred = torch.FloatTensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]]) y = torch.ones(2).type(torch.LongTensor) acc.update((y_pred, y)) assert acc.compute() == 0.5 acc.reset() y_pred = torch.FloatTensor([[0.4, 0.8, 0.2, 0.6], [0.8, 0.6, 0.4, 0.2]]) y = torch.ones(2).type(torch.LongTensor) acc.update((y_pred, y)) assert acc.compute() == 1.0
def test_compute(): acc = TopKCategoricalAccuracy(2) y_pred = torch.FloatTensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]]) y = torch.ones(2).long() acc.update((y_pred, y)) assert isinstance(acc.compute(), float) assert acc.compute() == 0.5 acc.reset() y_pred = torch.FloatTensor([[0.4, 0.8, 0.2, 0.6], [0.8, 0.6, 0.4, 0.2]]) y = torch.ones(2).long() acc.update((y_pred, y)) assert isinstance(acc.compute(), float) assert acc.compute() == 1.0
def _test_distrib_accumulator_device(device): metric_devices = [torch.device("cpu")] if device.type != "xla": metric_devices.append(idist.device()) for metric_device in metric_devices: acc = TopKCategoricalAccuracy(2, device=metric_device) assert acc._device == metric_device assert acc._num_correct.device == metric_device, "{}:{} vs {}:{}".format( type(acc._num_correct.device), acc._num_correct.device, type(metric_device), metric_device) y_pred = torch.tensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]]) y = torch.ones(2).long() acc.update((y_pred, y)) assert acc._num_correct.device == metric_device, "{}:{} vs {}:{}".format( type(acc._num_correct.device), acc._num_correct.device, type(metric_device), metric_device)
def evaluate(net, test_dataloader): with torch.no_grad(): net.eval() preds_all = torch.empty((len(test_dataloader), 256)) top_1 = TopKCategoricalAccuracy(k=1) top_5 = TopKCategoricalAccuracy() top_10 = TopKCategoricalAccuracy(k=10) for i, data in enumerate(test_dataloader): lidar, beams = data lidar = lidar.cuda() beams = beams.cuda() preds = net(lidar) preds = F.softmax(preds, dim=1) preds_all[i, :] = preds top_1.update((preds, torch.argmax(beams))) top_5.update((preds, torch.argmax(beams))) top_10.update((preds, torch.argmax(beams))) net.train() print("Top-1: {:.4f} Top-5: {:.4f} Top-10: {:.4f}".format( top_1.compute(), top_5.compute(), top_10.compute())) return preds_all
def test_topk_accuracy(self, k: int, y_pred: Tensor, y_true: Tensor, score: float): accuracy = TopKCategoricalAccuracy(k=k) accuracy.update((y_pred, y_true)) self.assertEqual(score, accuracy.compute())