def test_integrated_process(ar_params, diff, ma_params, sigma2):
    # Test loglikelihood computation when model has integration

    nobs = 100

    endog = np.cumsum(np.random.normal(size=nobs))

    # Innovations algorithm approach
    llf_obs = arma_innovations.arma_loglikeobs(
        np.diff(endog, diff), ar_params, ma_params, sigma2)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), diff, len(ma_params)),
                  simple_differencing=True)
    res = mod.filter(np.r_[ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    assert_allclose(llf_obs, res.llf_obs)
def test_integrated_process(ar_params, diff, ma_params, sigma2):
    # Test loglikelihood computation when model has integration

    nobs = 100

    endog = np.cumsum(np.random.normal(size=nobs))

    # Innovations algorithm approach
    llf_obs = arma_innovations.arma_loglikeobs(
        np.diff(endog, diff), ar_params, ma_params, sigma2)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), diff, len(ma_params)),
                  simple_differencing=True)
    res = mod.filter(np.r_[ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    assert_allclose(llf_obs, res.llf_obs)
def test_innovations_algo_filter_kalman_filter(ar_params, ma_params, sigma2):
    # Test the innovations algorithm and filter against the Kalman filter
    # for exact likelihood evaluation of an ARMA process
    endog = np.random.normal(size=100)

    # Innovations algorithm approach
    llf = arma_innovations.arma_loglike(endog, ar_params, ma_params, sigma2)
    llf_obs = arma_innovations.arma_loglikeobs(endog, ar_params, ma_params,
                                               sigma2)
    score = arma_innovations.arma_score(endog, ar_params, ma_params, sigma2)
    score_obs = arma_innovations.arma_scoreobs(endog, ar_params, ma_params,
                                               sigma2)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
    params = np.r_[ar_params, ma_params, sigma2]

    # Test that the two approaches are the same
    assert_allclose(llf, mod.loglike(params))
    assert_allclose(llf_obs, mod.loglikeobs(params))
    # Note: the tolerance on the two gets worse as more nobs are added
    assert_allclose(score, mod.score(params), atol=1e-5)
    assert_allclose(score_obs, mod.score_obs(params), atol=1e-5)
def test_regression_with_arma_errors(ar_params, ma_params, sigma2):
    # Test loglikelihood computation when model has regressors
    nobs = 100

    eps = np.random.normal(nobs)
    exog = np.c_[np.ones(nobs), np.random.uniform(size=nobs)]
    beta = [5, -0.2]
    endog = np.dot(exog, beta) + eps

    # Innovations algorithm approach
    beta_hat = np.squeeze(np.linalg.pinv(exog).dot(endog))
    demeaned = endog - np.dot(exog, beta_hat)
    llf_obs = arma_innovations.arma_loglikeobs(
        demeaned, ar_params, ma_params, sigma2)

    # Kalman filter approach
    # (this works since we impose here that the regression coefficients are
    # beta_hat - in practice, the MLE estimates will not necessarily match
    # the OLS estimates beta_hat)
    mod = SARIMAX(endog, exog=exog, order=(len(ar_params), 0, len(ma_params)))
    res = mod.filter(np.r_[beta_hat, ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    assert_allclose(llf_obs, res.llf_obs)
def test_regression_with_arma_errors(ar_params, ma_params, sigma2):
    # Test loglikelihood computation when model has regressors
    nobs = 100

    eps = np.random.normal(nobs)
    exog = np.c_[np.ones(nobs), np.random.uniform(size=nobs)]
    beta = [5, -0.2]
    endog = np.dot(exog, beta) + eps

    # Innovations algorithm approach
    beta_hat = np.squeeze(np.linalg.pinv(exog).dot(endog))
    demeaned = endog - np.dot(exog, beta_hat)
    llf_obs = arma_innovations.arma_loglikeobs(
        demeaned, ar_params, ma_params, sigma2)

    # Kalman filter approach
    # (this works since we impose here that the regression coefficients are
    # beta_hat - in practice, the MLE estimates will not necessarily match
    # the OLS estimates beta_hat)
    mod = SARIMAX(endog, exog=exog, order=(len(ar_params), 0, len(ma_params)))
    res = mod.filter(np.r_[beta_hat, ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    assert_allclose(llf_obs, res.llf_obs)