예제 #1
0
def test_output_is_tensor():

    m = RunningAverage(output_transform=lambda x: x)
    m.update(torch.rand(10, requires_grad=True).mean())
    v = m.compute()
    assert isinstance(v, torch.Tensor)
    assert not v.requires_grad

    m.update(torch.rand(10, requires_grad=True).mean())
    v = m.compute()
    assert isinstance(v, torch.Tensor)
    assert not v.requires_grad

    m.update(torch.rand(10, requires_grad=True).mean())
    v = m.compute()
    assert isinstance(v, torch.Tensor)
    assert not v.requires_grad
예제 #2
0
def _test_distrib_accumulator_device(device):

    metric_devices = [torch.device("cpu")]
    if device.type != "xla":
        metric_devices.append(idist.device())
    for metric_device in metric_devices:

        # Don't test the src=Metric case because compute() returns a scalar,
        # so the metric doesn't accumulate on the device specified
        avg = RunningAverage(output_transform=lambda x: x,
                             device=metric_device)
        assert avg._device == metric_device
        # Value is None until the first update then compute call

        for _ in range(3):
            avg.update(torch.tensor(1.0, device=device))
            avg.compute()

            assert (
                avg._value.device == metric_device
            ), f"{type(avg._value.device)}:{avg._value.device} vs {type(metric_device)}:{metric_device}"