def test_reset_compute(): a = DummyMetricSum() assert a.x == 0 a.update(tensor(5)) assert a.compute() == 5 a.reset() assert a.compute() == 0
def test_reset_compute(): a = DummyMetricSum() assert a.x == 0 a.update(tensor(5)) assert a.compute() == 5 a.reset() if not _LIGHTNING_AVAILABLE or _LIGHTNING_GREATER_EQUAL_1_3: assert a.compute() == 0 else: assert a.compute() == 5
def test_load_state_dict(tmpdir): """ test that metric states can be loaded with state dict """ metric = DummyMetricSum() metric.persistent(True) metric.update(5) loaded_metric = DummyMetricSum() loaded_metric.load_state_dict(metric.state_dict()) assert metric.compute() == 5
def _test_ddp_compositional_tensor(rank, worldsize): setup_ddp(rank, worldsize) dummy = DummyMetricSum() dummy._reductions = {"x": torch.sum} dummy = dummy.clone() + dummy.clone() dummy.update(tensor(1)) val = dummy.compute() assert val == 2 * worldsize
def test_warning_on_compute_before_update(): metric = DummyMetricSum() # make sure everything is fine with forward with pytest.warns(None) as record: val = metric(1) assert not record metric.reset() with pytest.warns(UserWarning, match=r'The ``compute`` method of metric .*'): val = metric.compute() assert val == 0.0 # after update things should be fine metric.update(2.0) with pytest.warns(None) as record: val = metric.compute() assert not record assert val == 2.0
def test_warning_on_compute_before_update(): """test that an warning is raised if user tries to call compute before update.""" metric = DummyMetricSum() # make sure everything is fine with forward with pytest.warns(None) as record: val = metric(1) assert not record metric.reset() with pytest.warns(UserWarning, match=r"The ``compute`` method of metric .*"): val = metric.compute() assert val == 0.0 # after update things should be fine metric.update(2.0) with pytest.warns(None) as record: val = metric.compute() assert not record assert val == 2.0