示例#1
0
def _test_distrib_variable_accumulation(device):

    mean_var = VariableAccumulation(lambda a, x: a + x, device=device)
    y_true = torch.rand(100, device=device, dtype=torch.float64)

    for y in y_true:
        mean_var.update(y)

    y_true = idist.all_reduce(y_true)
    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true) * idist.get_world_size()
    # check if call compute twice
    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true) * idist.get_world_size()

    mean_var = VariableAccumulation(lambda a, x: a + x, device=device)
    y_true = torch.rand(50, 10, device=device, dtype=torch.float64)

    for y in y_true:
        mean_var.update(y)

    y_true = idist.all_reduce(y_true)
    a, n = mean_var.compute()
    assert n == len(y_true) * idist.get_world_size()
    np.testing.assert_almost_equal(a.cpu().numpy(),
                                   y_true.sum(dim=0).cpu().numpy(),
                                   decimal=4)
    a, n = mean_var.compute()
    assert n == len(y_true) * idist.get_world_size()
    np.testing.assert_almost_equal(a.cpu().numpy(),
                                   y_true.sum(dim=0).cpu().numpy(),
                                   decimal=4)
示例#2
0
def _test_apex_average(device, amp_mode, opt_level):
    assert amp_mode == "apex"
    assert device == "cuda"

    model = Linear(1, 1)

    if device:
        model.to(device)

    model.weight.data.zero_()
    model.bias.data.zero_()
    optimizer = SGD(model.parameters(), 0.1)

    from apex import amp

    model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100).float().to(device)

    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true)
示例#3
0
def test_variable_accumulation_wrong_inputs():

    with pytest.raises(TypeError, match=r"Argument op should be a callable"):
        VariableAccumulation(1)

    with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
        mean_acc = VariableAccumulation(lambda a, x: a + x)
        mean_acc.update((1, 2))

    with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
        mean_acc = VariableAccumulation(lambda a, x: a + x)
        mean_acc.update("a")
示例#4
0
def _test_distrib_accumulator_device(device):

    metric_devices = [torch.device("cpu")]
    if device.type != "xla":
        metric_devices.append(idist.device())
    for metric_device in metric_devices:

        m = VariableAccumulation(lambda a, x: x, device=metric_device)
        assert m._device == metric_device
        assert (
            m.accumulator.device == metric_device
        ), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"

        m.update(torch.tensor(1, device=device))
        assert (
            m.accumulator.device == metric_device
        ), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"
def test_variable_accumulation_mean_variable():

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100)

    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true)

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100, 10)
    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy())
    assert n == len(y_true)
示例#6
0
def test_variable_accumulation_mean_variable():

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100)

    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true)

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100, 10)
    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy())
    assert n == len(y_true)

    mean_var = VariableAccumulation(lambda a, x: a + x.sum(dim=0))
    # iterate by batch of 16 samples
    y_true = torch.rand(8, 16, 10)
    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.numpy() == pytest.approx(
        y_true.reshape(-1, 10).sum(dim=0).numpy())
    assert n == y_true.shape[0] * y_true.shape[1]