def test_wrong_input_shapes(): m = MeanNormalizedBias() with pytest.raises( ValueError, match=r"Input data shapes should be the same, but given"): m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises( ValueError, match=r"Input data shapes should be the same, but given"): m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises( ValueError, match=r"Input data shapes should be the same, but given"): m.update(( torch.rand(4, 1, 2), torch.rand(4, ), )) with pytest.raises( ValueError, match=r"Input data shapes should be the same, but given"): m.update(( torch.rand(4, ), torch.rand(4, 1, 2), ))
def test_zero_sample(): m = MeanNormalizedBias() with pytest.raises( NotComputableError, match= r"MeanNormalizedBias must have at least one example before it can be computed" ): m.compute()
def test_zero_gt(): a = np.random.randn(4) ground_truth = np.zeros(4) m = MeanNormalizedBias() with pytest.raises(NotComputableError, match=r"The ground truth has 0."): m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
def test_wrong_input_shapes(): m = MeanNormalizedBias() with pytest.raises(ValueError): m.update((torch.rand(4, 1, 2), torch.rand(4, 1))) with pytest.raises(ValueError): m.update((torch.rand(4, 1), torch.rand(4, 1, 2))) with pytest.raises(ValueError): m.update((torch.rand(4, 1, 2), torch.rand(4, ))) with pytest.raises(ValueError): m.update((torch.rand(4, ), torch.rand(4, 1, 2)))
def _test(n_epochs, metric_device): metric_device = torch.device(metric_device) n_iters = 80 s = 16 n_classes = 2 offset = n_iters * s y_true = torch.rand(size=(offset * idist.get_world_size(), )).to(device) y_preds = torch.rand(size=(offset * idist.get_world_size(), )).to(device) def update(engine, i): return ( y_preds[i * s + rank * offset:(i + 1) * s + rank * offset], y_true[i * s + rank * offset:(i + 1) * s + rank * offset], ) engine = Engine(update) m = MeanNormalizedBias(device=metric_device) m.attach(engine, "mnb") data = list(range(n_iters)) engine.run(data=data, max_epochs=n_epochs) assert "mnb" in engine.state.metrics res = engine.state.metrics["mnb"] np_y_true = y_true.cpu().numpy() np_y_preds = y_preds.cpu().numpy() np_sum = ((np_y_true - np_y_preds) / np_y_true).sum() np_len = len(np_y_preds) np_ans = np_sum / np_len assert pytest.approx(res) == np_ans
def _test(y_pred, y, batch_size): def update_fn(engine, batch): idx = (engine.state.iteration - 1) * batch_size y_true_batch = np_y[idx:idx + batch_size] y_pred_batch = np_y_pred[idx:idx + batch_size] return torch.from_numpy(y_pred_batch), torch.from_numpy( y_true_batch) engine = Engine(update_fn) m = MeanNormalizedBias() m.attach(engine, "mnb") np_y = y.numpy() np_y_pred = y_pred.numpy() data = list(range(y_pred.shape[0] // batch_size)) mnb = engine.run(data, max_epochs=1).metrics["mnb"] np_sum = ((np_y - np_y_pred) / np_y).sum() np_len = len(np_y_pred) np_ans = np_sum / np_len assert np_ans == pytest.approx(mnb)
def _test(metric_device): metric_device = torch.device(metric_device) m = MeanNormalizedBias(device=metric_device) torch.manual_seed(10 + rank) y_pred = torch.randint(1, 11, size=(10, ), device=device).float() y = torch.randint(1, 11, size=(10, ), device=device).float() m.update((y_pred, y)) # gather y_pred, y y_pred = idist.all_gather(y_pred) y = idist.all_gather(y) np_y_pred = y_pred.cpu().numpy() np_y = y.cpu().numpy() res = m.compute() np_sum = ((np_y - np_y_pred) / np_y).sum() np_len = len(np_y_pred) np_ans = np_sum / np_len assert np_ans == pytest.approx(res)
def test_mean_error(): a = np.random.randn(4) b = np.random.randn(4) c = np.random.randn(4) d = np.random.randn(4) ground_truth = np.random.randn(4) m = MeanNormalizedBias() m.update((torch.from_numpy(a), torch.from_numpy(ground_truth))) np_sum = ((ground_truth - a) / ground_truth).sum() np_len = len(a) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans) m.update((torch.from_numpy(b), torch.from_numpy(ground_truth))) np_sum += ((ground_truth - b) / ground_truth).sum() np_len += len(b) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans) m.update((torch.from_numpy(c), torch.from_numpy(ground_truth))) np_sum += ((ground_truth - c) / ground_truth).sum() np_len += len(c) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans) m.update((torch.from_numpy(d), torch.from_numpy(ground_truth))) np_sum += ((ground_truth - d) / ground_truth).sum() np_len += len(d) np_ans = np_sum / np_len assert m.compute() == pytest.approx(np_ans)
def test_zero_div(): m = MeanNormalizedBias() with pytest.raises(NotComputableError): m.compute()