def test_variable_accumulation_mean_variable(): mean_var = VariableAccumulation(lambda a, x: a + x) y_true = torch.rand(100) for y in y_true: mean_var.update(y) a, n = mean_var.compute() assert a.item() == pytest.approx(y_true.sum().item()) assert n == len(y_true) mean_var = VariableAccumulation(lambda a, x: a + x) y_true = torch.rand(100, 10) for y in y_true: mean_var.update(y) a, n = mean_var.compute() assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy()) assert n == len(y_true) mean_var = VariableAccumulation(lambda a, x: a + x.sum(dim=0)) # iterate by batch of 16 samples y_true = torch.rand(8, 16, 10) for y in y_true: mean_var.update(y) a, n = mean_var.compute() assert a.numpy() == pytest.approx( y_true.reshape(-1, 10).sum(dim=0).numpy()) assert n == y_true.shape[0] * y_true.shape[1]
def _test_distrib_variable_accumulation(device): mean_var = VariableAccumulation(lambda a, x: a + x, device=device) y_true = torch.rand(100, device=device, dtype=torch.float64) for y in y_true: mean_var.update(y) y_true = idist.all_reduce(y_true) a, n = mean_var.compute() assert a.item() == pytest.approx(y_true.sum().item()) assert n == len(y_true) * idist.get_world_size() # check if call compute twice a, n = mean_var.compute() assert a.item() == pytest.approx(y_true.sum().item()) assert n == len(y_true) * idist.get_world_size() mean_var = VariableAccumulation(lambda a, x: a + x, device=device) y_true = torch.rand(50, 10, device=device, dtype=torch.float64) for y in y_true: mean_var.update(y) y_true = idist.all_reduce(y_true) a, n = mean_var.compute() assert n == len(y_true) * idist.get_world_size() np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=4) a, n = mean_var.compute() assert n == len(y_true) * idist.get_world_size() np.testing.assert_almost_equal(a.cpu().numpy(), y_true.sum(dim=0).cpu().numpy(), decimal=4)
def _test_apex_average(device, amp_mode, opt_level): assert amp_mode == "apex" assert device == "cuda" model = Linear(1, 1) if device: model.to(device) model.weight.data.zero_() model.bias.data.zero_() optimizer = SGD(model.parameters(), 0.1) from apex import amp model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level) mean_var = VariableAccumulation(lambda a, x: a + x) y_true = torch.rand(100).float().to(device) for y in y_true: mean_var.update(y) a, n = mean_var.compute() assert a.item() == pytest.approx(y_true.sum().item()) assert n == len(y_true)
def test_variable_accumulation_mean_variable(): mean_var = VariableAccumulation(lambda a, x: a + x) y_true = torch.rand(100) for y in y_true: mean_var.update(y) a, n = mean_var.compute() assert a.item() == pytest.approx(y_true.sum().item()) assert n == len(y_true) mean_var = VariableAccumulation(lambda a, x: a + x) y_true = torch.rand(100, 10) for y in y_true: mean_var.update(y) a, n = mean_var.compute() assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy()) assert n == len(y_true)