def test_mean_error(): a = np.random.randn(4) b = np.random.randn(4) c = np.random.randn(4) d = np.random.randn(4) ground_truth = np.random.randn(4) m = MeanError() m.update((torch.from_numpy(a), torch.from_numpy(ground_truth))) np_sum = (ground_truth - a).sum() np_len = len(a) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans) m.update((torch.from_numpy(b), torch.from_numpy(ground_truth))) np_sum += (ground_truth - b).sum() np_len += len(b) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans) m.update((torch.from_numpy(c), torch.from_numpy(ground_truth))) np_sum += (ground_truth - c).sum() np_len += len(c) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans) m.update((torch.from_numpy(d), torch.from_numpy(ground_truth))) np_sum += (ground_truth - d).sum() np_len += len(d) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans)
def test_zero_sample(): m = MeanError() with pytest.raises( NotComputableError, match= r"MeanError must have at least one example before it can be computed" ): m.compute()
def _test(metric_device): metric_device = torch.device(metric_device) m = MeanError(device=metric_device) torch.manual_seed(10 + rank) y_pred = torch.rand(size=(100,), device=device) y = torch.rand(size=(100,), device=device) m.update((y_pred, y)) y_pred = idist.all_gather(y_pred) y = idist.all_gather(y) np_y = y.cpu().numpy() np_y_pred = y_pred.cpu().numpy() np_sum = (np_y - np_y_pred).sum() np_len = len(np_y_pred) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans)
def test_zero_div(): m = MeanError() with pytest.raises(NotComputableError): m.compute()