コード例 #1
0
def test_innovations_mle_integrated():
    endog = np.r_[0, np.cumsum(lake.copy())]

    start_params = [0, np.var(lake.copy())]
    with assert_warns(UserWarning):
        p, mleres = innovations_mle(endog, order=(1, 1, 0),
                                    demean=False, start_params=start_params)

    mod = sarimax.SARIMAX(endog, order=(1, 1, 0),
                          simple_differencing=True)

    # Test that the maximized log-likelihood found via applications of the
    # innovations algorithm matches the log-likelihood found by the Kalman
    # filter at the same parameters
    res = mod.filter(p.params)
    assert_allclose(-mleres.minimize_results.fun, res.llf)

    # Test MLE fitting
    # To avoid small numerical differences with MLE fitting, start at the
    # parameters found from innovations_mle
    res2 = mod.fit(start_params=p.params, disp=0)

    # Test that the state space approach confirms the MLE values found by
    # innovations_mle
    # Note: atol is required only due to precision issues on Windows
    assert_allclose(p.params, res2.params, atol=1e-6)

    # Test that the result is equivalent to order=(1, 0, 0) on the differenced
    # data
    p2, _ = innovations_mle(lake.copy(), order=(1, 0, 0), demean=False,
                            start_params=start_params)
    # (doesn't need to be high-precision test since it's okay if different
    # starting parameters give slightly different MLE)
    assert_allclose(p.params, p2.params, atol=1e-5)
コード例 #2
0
def test_itsmr_with_fixed_params(fixed_params):
    # This test is a variation of test_itsmr where we fix 1 or more parameters
    # for Example 5.1.7 in Brockwell and Davis (2016) and check that free
    # parameters are still correct'.

    endog = lake.copy()
    hr, _ = hannan_rissanen(
        endog, ar_order=1, ma_order=1, demean=True,
        initial_ar_order=22, unbiased=False,
        fixed_params=fixed_params
    )

    assert_allclose(hr.ar_params, [0.69607715], atol=1e-4)
    assert_allclose(hr.ma_params, [0.3787969217], atol=1e-4)

    # Because our fast implementation of the innovations algorithm does not
    # allow for non-stationary processes, the estimate of the variance returned
    # by `hannan_rissanen` is based on the residuals from the least-squares
    # regression, rather than (as reported by BD) based on the innovations
    # algorithm output. Since the estimates here do correspond to a stationary
    # series, we can compute the innovations variance manually to check
    # against BD.
    u, v = arma_innovations(endog - endog.mean(), hr.ar_params, hr.ma_params,
                            sigma2=1)
    tmp = u / v**0.5
    assert_allclose(np.inner(tmp, tmp) / len(u), 0.4773580109, atol=1e-4)
コード例 #3
0
def test_start_params():
    endog = lake.copy()

    # Test for valid use of starting parameters
    p, _ = statespace(endog, order=(1, 0, 0), start_params=[0, 0, 1.])
    p, _ = statespace(endog,
                      order=(1, 0, 0),
                      start_params=[0, 1., 1.],
                      enforce_stationarity=False)
    p, _ = statespace(endog,
                      order=(0, 0, 1),
                      start_params=[0, 1., 1.],
                      enforce_invertibility=False)

    # Test for invalid use of starting parameters
    assert_raises(ValueError,
                  statespace,
                  endog,
                  order=(1, 0, 0),
                  start_params=[0, 1., 1.])
    assert_raises(ValueError,
                  statespace,
                  endog,
                  order=(0, 0, 1),
                  start_params=[0, 1., 1.])
コード例 #4
0
def test_itsmr():
    # This is essentially a high precision version of
    # test_brockwell_davis_example_517, where the desired values were computed
    # from R itsmr::hannan; see results/results_hr.R
    endog = lake.copy()
    hr, _ = hannan_rissanen(endog,
                            ar_order=1,
                            ma_order=1,
                            demean=True,
                            initial_ar_order=22,
                            unbiased=False)

    assert_allclose(hr.ar_params, [0.69607715], atol=1e-4)
    assert_allclose(hr.ma_params, [0.3787969217], atol=1e-4)

    # Because our fast implementation of the innovations algorithm does not
    # allow for non-stationary processes, the estimate of the variance returned
    # by `hannan_rissanen` is based on the residuals from the least-squares
    # regression, rather than (as reported by BD) based on the innovations
    # algorithm output. Since the estimates here do correspond to a stationary
    # series, we can compute the innovations variance manually to check
    # against BD.
    u, v = arma_innovations(endog - endog.mean(),
                            hr.ar_params,
                            hr.ma_params,
                            sigma2=1)
    tmp = u / v**0.5
    assert_allclose(np.inner(tmp, tmp) / len(u), 0.4773580109, atol=1e-4)
コード例 #5
0
def test_innovations_mle_statespace_nonconsecutive():
    # Test innovations output against state-space output.
    endog = lake.copy()
    endog = endog - endog.mean()

    start_params = [0, 0, np.var(endog)]
    p, mleres = innovations_mle(endog,
                                order=([0, 1], 0, [0, 1]),
                                demean=False,
                                start_params=start_params)

    mod = sarimax.SARIMAX(endog, order=([0, 1], 0, [0, 1]))

    # Test that the maximized log-likelihood found via applications of the
    # innovations algorithm matches the log-likelihood found by the Kalman
    # filter at the same parameters
    res = mod.filter(p.params)
    assert_allclose(-mleres.minimize_results.fun, res.llf)

    # Test MLE fitting
    # To avoid small numerical differences with MLE fitting, start at the
    # parameters found from innovations_mle
    res2 = mod.fit(start_params=p.params, disp=0)

    # Test that the state space approach confirms the MLE values found by
    # innovations_mle
    assert_allclose(p.params, res2.params)

    # Test that starting parameter estimation succeeds and isn't terrible
    # (i.e. leads to the same MLE)
    p2, _ = innovations_mle(endog, order=([0, 1], 0, [0, 1]), demean=False)
    # (does not need to be high-precision test since it's okay if different
    # starting parameters give slightly different MLE)
    assert_allclose(p.params, p2.params, atol=1e-5)
コード例 #6
0
def test_brockwell_davis_example_517():
    # Get the lake data
    endog = lake.copy()

    # BD do not implement the "bias correction" third step that they describe,
    # so we can't use their results to test that. Thus here `unbiased=False`.
    # Note: it's not clear why BD use initial_order=22 (and they don't mention
    # that they do this), but it is the value that allows the test to pass.
    hr, _ = hannan_rissanen(endog,
                            ar_order=1,
                            ma_order=1,
                            demean=True,
                            initial_ar_order=22,
                            unbiased=False)
    assert_allclose(hr.ar_params, [0.6961], atol=1e-4)
    assert_allclose(hr.ma_params, [0.3788], atol=1e-4)

    # Because our fast implementation of the innovations algorithm does not
    # allow for non-stationary processes, the estimate of the variance returned
    # by `hannan_rissanen` is based on the residuals from the least-squares
    # regression, rather than (as reported by BD) based on the innovations
    # algorithm output. Since the estimates here do correspond to a stationary
    # series, we can compute the innovations variance manually to check
    # against BD.
    u, v = arma_innovations(endog - endog.mean(),
                            hr.ar_params,
                            hr.ma_params,
                            sigma2=1)
    tmp = u / v**0.5
    assert_allclose(np.inner(tmp, tmp) / len(u), 0.4774, atol=1e-4)
コード例 #7
0
def test_iterations():
    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    # Test for n_iter usage
    _, res = gls(endog, exog, order=(1, 0, 0), n_iter=1)
    assert_equal(res.iterations, 1)
    assert_equal(res.converged, None)
コード例 #8
0
def test_innovations_ma_itsmr():
    # Note: apparently itsmr automatically demeans (there is no option to
    # control this)
    endog = lake.copy()

    check_innovations_ma_itsmr(endog)  # Pandas series
    check_innovations_ma_itsmr(endog.values)  # Numpy array
    check_innovations_ma_itsmr(endog.tolist())  # Python list
コード例 #9
0
def test_integrated_invalid():
    # Test for invalid versions of integrated model
    # - include_constant=True is invalid if integration is present
    endog = lake.copy()
    exog = np.arange(1, len(endog) + 1) * 1.0
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(1, 1, 0),
                  include_constant=True)
コード例 #10
0
ファイル: test_burg.py プロジェクト: arnab0000/Internships
def test_misc():
    # Test defaults (order = 0, demean=True)
    endog = lake.copy()
    res, _ = burg(endog)
    assert_allclose(res.params, np.var(endog))

    # Test that integer input gives the same result as float-coerced input.
    endog = np.array([1, 2, 5, 3, -2, 1, -3, 5, 2, 3, -1], dtype=int)
    res_int, _ = burg(endog, 2)
    res_float, _ = burg(endog * 1.0, 2)
    assert_allclose(res_int.params, res_float.params)
コード例 #11
0
def test_innovations_mle_invalid():
    endog = np.arange(2) * 1.0
    assert_raises(ValueError, innovations_mle, endog, order=(0, 0, 2))
    assert_raises(ValueError, innovations_mle, endog, order=(0, 0, -1))
    assert_raises(ValueError, innovations_mle, endog, order=(0, 0, 1.5))

    endog = lake.copy()
    assert_raises(ValueError, innovations_mle, endog, order=(1, 0, 0),
                  start_params=[1., 1.])
    assert_raises(ValueError, innovations_mle, endog, order=(0, 0, 1),
                  start_params=[1., 1.])
コード例 #12
0
def test_brockwell_davis_example_662():
    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    res, _ = gls(endog, exog, order=(2, 0, 0))

    # Parameter values taken from Table 6.3 row 2, except for sigma2 and the
    # last digit of the exog_params[0], which were given in the text
    assert_allclose(res.exog_params, [10.091, -.0216], atol=1e-3)
    assert_allclose(res.ar_params, [1.005, -.291], atol=1e-3)
    assert_allclose(res.sigma2, .4571, atol=1e-3)
コード例 #13
0
def test_brockwell_davis_example_514():
    # Note: this example is primarily tested in
    # test_burg::test_brockwell_davis_example_514.

    # Get the lake data, demean
    endog = lake.copy()

    # Yule-Walker
    res, _ = yule_walker(endog, ar_order=2, demean=True)
    assert_allclose(res.ar_params, [1.0538, -0.2668], atol=1e-4)
    assert_allclose(res.sigma2, 0.4920, atol=1e-4)
コード例 #14
0
def test_misc():
    # Test defaults (order = 0, demean=True)
    endog = lake.copy()
    res, _ = durbin_levinson(endog)
    assert_allclose(res[0].params, np.var(endog))

    # Test that integer input gives the same result as float-coerced input.
    endog = np.array([1, 2, 5, 3, -2, 1, -3, 5, 2, 3, -1], dtype=int)
    res_int, _ = durbin_levinson(endog, 2, demean=False)
    res_float, _ = durbin_levinson(endog * 1.0, 2, demean=False)
    assert_allclose(res_int[0].params, res_float[0].params)
    assert_allclose(res_int[1].params, res_float[1].params)
    assert_allclose(res_int[2].params, res_float[2].params)
コード例 #15
0
def test_alternate_arma_estimators_valid():
    # Test that we can use (valid) alternate ARMA estimators
    # Note that this does not test the results of the alternative estimators,
    # and so it is labeled as a smoke test / TODO. However, assuming those
    # estimators are tested elsewhere, the main testable concern from their
    # inclusion in the feasible GLS step is that produce results at all.
    # Thus, for example, we specify n_iter=1, and ignore the actual results.
    # Nonetheless, it would be good to test against another package.

    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    _, res_yw = gls(endog,
                    exog=exog,
                    order=(1, 0, 0),
                    arma_estimator='yule_walker',
                    n_iter=1)
    assert_equal(res_yw.arma_estimator, 'yule_walker')

    _, res_b = gls(endog,
                   exog=exog,
                   order=(1, 0, 0),
                   arma_estimator='burg',
                   n_iter=1)
    assert_equal(res_b.arma_estimator, 'burg')

    _, res_i = gls(endog,
                   exog=exog,
                   order=(0, 0, 1),
                   arma_estimator='innovations',
                   n_iter=1)
    assert_equal(res_i.arma_estimator, 'innovations')

    _, res_hr = gls(endog,
                    exog=exog,
                    order=(1, 0, 1),
                    arma_estimator='hannan_rissanen',
                    n_iter=1)
    assert_equal(res_hr.arma_estimator, 'hannan_rissanen')

    _, res_ss = gls(endog,
                    exog=exog,
                    order=(1, 0, 1),
                    arma_estimator='statespace',
                    n_iter=1)
    assert_equal(res_ss.arma_estimator, 'statespace')

    # Finally, default method is innovations
    _, res_imle = gls(endog, exog=exog, order=(1, 0, 1), n_iter=1)
    assert_equal(res_imle.arma_estimator, 'innovations_mle')
コード例 #16
0
def test_integrated():
    # Get the lake data
    endog1 = lake.copy()
    exog1 = np.c_[np.ones_like(endog1), np.arange(1, len(endog1) + 1) * 1.0]

    endog2 = np.r_[0, np.cumsum(endog1)]
    exog2 = np.c_[[0, 0], np.cumsum(exog1, axis=0).T].T

    # Estimate without integration
    p1, _ = gls(endog1, exog1, order=(1, 0, 0))

    # Estimate with integration
    with assert_warns(UserWarning):
        p2, _ = gls(endog2, exog2, order=(1, 1, 0))

    assert_allclose(p1.params, p2.params)
コード例 #17
0
ファイル: test_burg.py プロジェクト: arnab0000/Internships
def test_brockwell_davis_example_514():
    # Test against Example 5.1.4 in Brockwell and Davis (2016)
    # (low-precision test, since we are testing against values printed in the
    # textbook)

    # Get the lake data
    endog = lake.copy()

    # Should have 98 observations
    assert_equal(len(endog), 98)
    desired = 9.0041
    assert_allclose(endog.mean(), desired, atol=1e-4)

    # Burg
    res, _ = burg(endog, ar_order=2, demean=True)
    assert_allclose(res.ar_params, [1.0449, -0.2456], atol=1e-4)
    assert_allclose(res.sigma2, 0.4706, atol=1e-4)
コード例 #18
0
def test_brockwell_davis_example_525():
    # Difference and demean the series
    endog = lake.copy()

    # Use HR method to get initial coefficients for MLE
    initial, _ = hannan_rissanen(endog, ar_order=1, ma_order=1, demean=True)

    # Fit MLE via innovations algorithm
    p, _ = innovations_mle(endog, order=(1, 0, 1), demean=True,
                           start_params=initial.params)

    assert_allclose(p.params, [0.7446, 0.3213, 0.4750], atol=1e-4)

    # Fit MLE via innovations algorithm, with default starting parameters
    p, _ = innovations_mle(endog, order=(1, 0, 1), demean=True)

    assert_allclose(p.params, [0.7446, 0.3213, 0.4750], atol=1e-4)
コード例 #19
0
def test_basic():
    endog = lake.copy()
    exog = np.arange(1, len(endog) + 1) * 1.0

    # Test default options (include_constant=True, concentrate_scale=False)
    p, res = statespace(endog,
                        exog=exog,
                        order=(1, 0, 0),
                        include_constant=True,
                        concentrate_scale=False)

    mod_ss = sarimax.SARIMAX(endog, exog=add_constant(exog), order=(1, 0, 0))
    res_ss = mod_ss.filter(p.params)

    assert_allclose(res.statespace_results.llf, res_ss.llf)

    # Test include_constant=False
    p, res = statespace(endog,
                        exog=exog,
                        order=(1, 0, 0),
                        include_constant=False,
                        concentrate_scale=False)

    mod_ss = sarimax.SARIMAX(endog, exog=exog, order=(1, 0, 0))
    res_ss = mod_ss.filter(p.params)

    assert_allclose(res.statespace_results.llf, res_ss.llf)

    # Test concentrate_scale=True
    p, res = statespace(endog,
                        exog=exog,
                        order=(1, 0, 0),
                        include_constant=True,
                        concentrate_scale=True)

    mod_ss = sarimax.SARIMAX(endog,
                             exog=add_constant(exog),
                             order=(1, 0, 0),
                             concentrate_scale=True)
    res_ss = mod_ss.filter(p.params)

    assert_allclose(res.statespace_results.llf, res_ss.llf)
コード例 #20
0
def test_arma_kwargs():
    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    # Test with the default method for scipy.optimize.minimize (BFGS)
    _, res1_imle = gls(endog, exog=exog, order=(1, 0, 1), n_iter=1)
    assert_equal(res1_imle.arma_estimator_kwargs, {})
    assert_equal(res1_imle.arma_results[1].minimize_results.message,
                 'Optimization terminated successfully.')

    # Now specify a different method (L-BFGS-B)
    arma_estimator_kwargs = {'minimize_kwargs': {'method': 'L-BFGS-B'}}
    _, res2_imle = gls(endog,
                       exog=exog,
                       order=(1, 0, 1),
                       n_iter=1,
                       arma_estimator_kwargs=arma_estimator_kwargs)
    assert_equal(res2_imle.arma_estimator_kwargs, arma_estimator_kwargs)
    assert_equal(res2_imle.arma_results[1].minimize_results.message,
                 b'CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH')
コード例 #21
0
def test_results():
    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    # Test for results output
    p, res = gls(endog, exog, order=(1, 0, 0))

    assert_('params' in res)
    assert_('converged' in res)
    assert_('differences' in res)
    assert_('iterations' in res)
    assert_('arma_estimator' in res)
    assert_('arma_results' in res)

    assert_(res.converged)
    assert_(res.iterations > 0)
    assert_equal(res.arma_estimator, 'innovations_mle')
    assert_equal(len(res.params), res.iterations + 1)
    assert_equal(len(res.differences), res.iterations + 1)
    assert_equal(len(res.arma_results), res.iterations + 1)
    assert_equal(res.params[-1], p)
コード例 #22
0
def test_set_default_unbiased():
    # setting unbiased=None with stationary and invertible parameters should
    # yield the exact same results as setting unbiased=True
    endog = lake.copy()
    p_1, other_results_2 = hannan_rissanen(
        endog, ar_order=1, ma_order=1, unbiased=None
    )

    # unbiased=True
    p_2, other_results_1 = hannan_rissanen(
        endog, ar_order=1, ma_order=1, unbiased=True
    )

    np.testing.assert_array_equal(p_1.ar_params, p_2.ar_params)
    np.testing.assert_array_equal(p_1.ma_params, p_2.ma_params)
    assert p_1.sigma2 == p_2.sigma2
    np.testing.assert_array_equal(other_results_1.resid, other_results_2.resid)

    # unbiased=False
    p_3, _ = hannan_rissanen(
        endog, ar_order=1, ma_order=1, unbiased=False
    )
    assert not np.array_equal(p_1.ar_params, p_3.ar_params)
コード例 #23
0
def test_misc():
    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    # Test for warning if iterations fail to converge
    assert_warns(UserWarning, gls, endog, exog, order=(2, 0, 0), max_iter=0)
コード例 #24
0
def test_alternate_arma_estimators_invalid():
    # Test that specifying an invalid ARMA estimators raises an error
    endog = lake.copy()
    exog = np.c_[np.ones_like(endog), np.arange(1, len(endog) + 1) * 1.0]

    # Test for invalid estimator
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 1),
                  arma_estimator='invalid_estimator')

    # Yule-Walker, Burg can only handle consecutive AR
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 1),
                  arma_estimator='yule_walker')
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 0),
                  seasonal_order=(1, 0, 0, 4),
                  arma_estimator='yule_walker')
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=([0, 1], 0, 0),
                  arma_estimator='yule_walker')

    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 1),
                  arma_estimator='burg')
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 0),
                  seasonal_order=(1, 0, 0, 4),
                  arma_estimator='burg')
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=([0, 1], 0, 0),
                  arma_estimator='burg')

    # Innovations (MA) can only handle consecutive MA
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(1, 0, 0),
                  arma_estimator='innovations')
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 0),
                  seasonal_order=(0, 0, 1, 4),
                  arma_estimator='innovations')
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, [0, 1]),
                  arma_estimator='innovations')

    # Hannan-Rissanen can't handle seasonal components
    assert_raises(ValueError,
                  gls,
                  endog,
                  exog,
                  order=(0, 0, 0),
                  seasonal_order=(0, 0, 1, 4),
                  arma_estimator='hannan_rissanen')