def test_lag_k_estimation_for_positive_k(): data = [-1, 0, 1, 0] * 10 st = Statistic(data) np.testing.assert_almost_equal(st.lag(0), 1) np.testing.assert_almost_equal(st.lag(1), 0) np.testing.assert_almost_equal(st.lag(2), -1) np.testing.assert_almost_equal(st.lag(3), 0)
def test_lag_0_is_always_1(): st1 = Statistic([10]) st2 = Statistic([-1, 1] * 5) st3 = Statistic(np.random.exponential(1, 7)) assert st1.lag(0) == 1 assert st2.lag(0) == 1 assert st3.lag(0) == 1
def test_lag_k_raises_error_when_passed_negative_or_float(): st = Statistic([1, 2, 3]) with pytest.raises(ValueError) as excinfo: st.lag(-1) assert 'non-negative integer expected' in str(excinfo.value).lower() with pytest.raises(ValueError) as excinfo: st.lag(2.5) assert 'non-negative integer expected' in str(excinfo.value).lower()
def test_lag_k_raises_error_when_k_greater_then_length(): st = Statistic([1, 2]) error_message = 'statistic has too few samples' with pytest.raises(ValueError) as excinfo1: st.lag(2) with pytest.raises(ValueError) as excinfo2: st.lag(3) assert error_message in str(excinfo1.value).lower() assert error_message in str(excinfo2.value).lower()
def test_lag_k_raises_error_when_called_for_empty_statistic(): st = Statistic() with pytest.raises(ValueError) as excinfo: st.lag(1) assert 'no data' in str(excinfo.value).lower()