Пример #1
0
def test_arma_acov_compare_theoretical_arma_acov():
    # Test against the older version of this function, which used a different
    # approach that nicely shows the theoretical relationship
    # See GH:5324 when this was removed for full version of the function
    # including documentation and inline comments

    def arma_acovf_historical(ar, ma, nobs=10):
        if np.abs(np.sum(ar) - 1) > 0.9:
            nobs_ir = max(1000, 2 * nobs)
        else:
            nobs_ir = max(100, 2 * nobs)
        ir = arma_impulse_response(ar, ma, leads=nobs_ir)
        while ir[-1] > 5 * 1e-5:
            nobs_ir *= 10
            ir = arma_impulse_response(ar, ma, leads=nobs_ir)
        if nobs_ir > 50000 and nobs < 1001:
            end = len(ir)
            acovf = np.array([np.dot(ir[:end-nobs-t], ir[t:end-nobs])
                              for t in range(nobs)])
        else:
            acovf = np.correlate(ir, ir, 'full')[len(ir) - 1:]
        return acovf[:nobs]

    assert_allclose(arma_acovf([1, -0.5], [1, 0.2]),
                    arma_acovf_historical([1, -0.5], [1, 0.2]))
    assert_allclose(arma_acovf([1, -0.99], [1, 0.2]),
                    arma_acovf_historical([1, -0.99], [1, 0.2]))
Пример #2
0
def test_arma_acov_compare_theoretical_arma_acov():
    # Test against the older version of this function, which used a different
    # approach that nicely shows the theoretical relationship
    # See GH:5324 when this was removed for full version of the function
    # including documentation and inline comments

    def arma_acovf_historical(ar, ma, nobs=10):
        if np.abs(np.sum(ar) - 1) > 0.9:
            nobs_ir = max(1000, 2 * nobs)
        else:
            nobs_ir = max(100, 2 * nobs)
        ir = arma_impulse_response(ar, ma, leads=nobs_ir)
        while ir[-1] > 5 * 1e-5:
            nobs_ir *= 10
            ir = arma_impulse_response(ar, ma, leads=nobs_ir)
        if nobs_ir > 50000 and nobs < 1001:
            end = len(ir)
            acovf = np.array(
                [
                    np.dot(ir[: end - nobs - t], ir[t : end - nobs])
                    for t in range(nobs)
                ]
            )
        else:
            acovf = np.correlate(ir, ir, "full")[len(ir) - 1 :]
        return acovf[:nobs]

    assert_allclose(
        arma_acovf([1, -0.5], [1, 0.2]),
        arma_acovf_historical([1, -0.5], [1, 0.2]),
    )
    assert_allclose(
        arma_acovf([1, -0.99], [1, 0.2]),
        arma_acovf_historical([1, -0.99], [1, 0.2]),
    )
Пример #3
0
def test_innovations_algo_filter_kalman_filter(reset_randomstate):
    # Test the innovations algorithm and filter against the Kalman filter
    # for exact likelihood evaluation of an ARMA process
    ar_params = np.array([0.5])
    ma_params = np.array([0.2])
    # TODO could generalize to sigma2 != 1, if desired, after #5324 is merged
    # and there is a sigma2 argument to arma_acovf
    # (but maybe this is not really necessary for the point of this test)
    sigma2 = 1

    endog = np.random.normal(size=10)

    # Innovations algorithm approach
    acovf = arma_acovf(np.r_[1, -ar_params], np.r_[1, ma_params],
                       nobs=len(endog))

    theta, v = innovations_algo(acovf)
    u = innovations_filter(endog, theta)
    llf_obs = -0.5 * u ** 2 / (sigma2 * v) - 0.5 * np.log(2 * np.pi * v)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
    res = mod.filter(np.r_[ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    atol = 1e-6 if PLATFORM_WIN else 0.0
    assert_allclose(u, res.forecasts_error[0], rtol=1e-6, atol=atol)
    assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1],
                    atol=atol)
    assert_allclose(llf_obs, res.llf_obs, atol=atol)
Пример #4
0
def test_innovations_algo_filter_kalman_filter(reset_randomstate):
    # Test the innovations algorithm and filter against the Kalman filter
    # for exact likelihood evaluation of an ARMA process
    ar_params = np.array([0.5])
    ma_params = np.array([0.2])
    # TODO could generalize to sigma2 != 1, if desired, after #5324 is merged
    # and there is a sigma2 argument to arma_acovf
    # (but maybe this is not really necessary for the point of this test)
    sigma2 = 1

    endog = np.random.normal(size=10)

    # Innovations algorithm approach
    acovf = arma_acovf(np.r_[1, -ar_params], np.r_[1, ma_params],
                       nobs=len(endog))

    theta, v = innovations_algo(acovf)
    u = innovations_filter(endog, theta)
    llf_obs = -0.5 * u**2 / (sigma2 * v) - 0.5 * np.log(2 * np.pi * v)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
    res = mod.filter(np.r_[ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    atol = 1e-6 if PLATFORM_WIN else 0.0
    assert_allclose(u, res.forecasts_error[0], atol=atol)
    assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1],
                    atol=atol)
    assert_allclose(llf_obs, res.llf_obs, atol=atol)
Пример #5
0
def test_arma_acovf():
    # Check for specific AR(1)
    N = 20
    phi = 0.9
    sigma = 1
    # rep 1: from module function
    rep1 = arma_acovf([1, -phi], [1], N)
    # rep 2: manually
    rep2 = [1.0 * sigma * phi ** i / (1 - phi ** 2) for i in range(N)]
    assert_allclose(rep1, rep2)
Пример #6
0
def test_arma_acovf():
    # Check for specific AR(1)
    N = 20
    phi = 0.9
    sigma = 1
    # rep 1: from module function
    rep1 = arma_acovf([1, -phi], [1], N)
    # rep 2: manually
    rep2 = [1. * sigma * phi ** i / (1 - phi ** 2) for i in range(N)]
    assert_almost_equal(rep1, rep2, 7)  # 7 is max precision here
Пример #7
0
def test_arma_acovf():
    # Check for specific AR(1)
    N = 20
    phi = 0.9
    sigma = 1
    # rep 1: from module function
    rep1 = arma_acovf([1, -phi], [1], N)
    # rep 2: manually
    rep2 = [1. * sigma * phi ** i / (1 - phi ** 2) for i in range(N)]
    assert_almost_equal(rep1, rep2, 7)  # 7 is max precision here
Пример #8
0
    def _params2cov(self, params, nobs):
        '''get autocovariance matrix from ARMA regression parameter

        ar parameters are assumed to have rhs parameterization

        '''
        ar = np.r_[[1], -params[:self.nar]]
        ma = np.r_[[1], params[-self.nma:]]
        #print('ar', ar
        #print('ma', ma
        #print('nobs', nobs
        autocov = arma_acovf(ar, ma, nobs=nobs)
        #print('arma_acovf(%r, %r, nobs=%d)' % (ar, ma, nobs)
        #print(autocov.shape
        #something is strange  fixed in aram_acovf
        autocov = autocov[:nobs]
        sigma = toeplitz(autocov)
        return sigma
Пример #9
0
    def _params2cov(self, params, nobs):
        '''get autocovariance matrix from ARMA regression parameter

        ar parameters are assumed to have rhs parameterization

        '''
        ar = np.r_[[1], -params[:self.nar]]
        ma = np.r_[[1], params[-self.nma:]]
        #print('ar', ar
        #print('ma', ma
        #print('nobs', nobs
        autocov = arma_acovf(ar, ma, nobs=nobs)
        #print('arma_acovf(%r, %r, nobs=%d)' % (ar, ma, nobs)
        #print(autocov.shape
        #something is strange  fixed in aram_acovf
        autocov = autocov[:nobs]
        sigma = toeplitz(autocov)
        return sigma
def test_innovations_algo_filter_kalman_filter(ar_params, ma_params, sigma2):
    # Test the innovations algorithm and filter against the Kalman filter
    # for exact likelihood evaluation of an ARMA process

    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    endog = np.random.normal(size=10)
    nobs = len(endog)

    # Innovations algorithm approach
    arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
    transformed_acov = _arma_innovations.darma_transformed_acovf_fast(
        ar, ma, arma_process_acovf / sigma2)
    acovf, acovf2 = (np.array(mv) for mv in transformed_acov)
    theta, r = _arma_innovations.darma_innovations_algo_fast(
        nobs, ar_params, ma_params, acovf, acovf2)
    u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
                                                   theta)

    v = np.array(r) * sigma2
    u = np.array(u)

    llf_obs = -0.5 * u**2 / v - 0.5 * np.log(2 * np.pi * v)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
    res = mod.filter(np.r_[ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    assert_allclose(u, res.forecasts_error[0])
    # assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1])
    assert_allclose(llf_obs, res.llf_obs)

    # Get llf_obs directly
    llf_obs2 = _arma_innovations.darma_loglikeobs_fast(
        endog, ar_params, ma_params, sigma2)

    assert_allclose(llf_obs2, res.llf_obs)
def test_innovations_algo_filter_kalman_filter(ar_params, ma_params, sigma2):
    # Test the innovations algorithm and filter against the Kalman filter
    # for exact likelihood evaluation of an ARMA process

    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    endog = np.random.normal(size=10)
    nobs = len(endog)

    # Innovations algorithm approach
    arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
    acovf, acovf2 = np.array(_arma_innovations.darma_transformed_acovf_fast(
                     ar, ma, arma_process_acovf / sigma2))
    theta, r = _arma_innovations.darma_innovations_algo_fast(
        nobs, ar_params, ma_params, acovf, acovf2)
    u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
                                                   theta)

    v = np.array(r) * sigma2
    u = np.array(u)

    llf_obs = -0.5 * u**2 / v - 0.5 * np.log(2 * np.pi * v)

    # Kalman filter apparoach
    mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
    res = mod.filter(np.r_[ar_params, ma_params, sigma2])

    # Test that the two approaches are identical
    assert_allclose(u, res.forecasts_error[0])
    # assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1])
    assert_allclose(llf_obs, res.llf_obs)

    # Get llf_obs directly
    llf_obs2 = _arma_innovations.darma_loglikeobs_fast(
        endog, ar_params, ma_params, sigma2)

    assert_allclose(llf_obs2, res.llf_obs)
Пример #12
0
        [('ma1', acovf_ma1),
        ('ma2', acovf_ma2),
        ('arma11', acovf_arma11),
        ('ar1', acovf_arma11)])

cases = [('ma1', (ar0, ma1)),
        ('ma2', (ar0, ma2)),
        ('arma11', (ar1, ma1)),
        ('ar1', (ar1, ma0))]

for c, args in cases:

    ar, ma = args
    print('')
    print(c, ar, ma)
    myacovf = arma_acovf(ar, ma, nobs=10)
    myacf = arma_acf(ar, ma, nobs=10)
    if c[:2]=='ma':
        othacovf = comparefn[c](ma)
    else:
        othacovf = comparefn[c](ar, ma)
    print(myacovf[:5])
    print(othacovf[:5])
    #something broke again,
    #for high persistence case eg ar=0.99, nobs of IR has to be large
    #made changes to arma_acovf
    assert_array_almost_equal(myacovf, othacovf,10)
    assert_array_almost_equal(myacf, othacovf/othacovf[0],10)


#from nitime.utils
def test_brockwell_davis_ex533():
    # See Brockwell and Davis (2009) - Time Series Theory and Methods
    # Example 5.3.3: ARMA(1, 1) process, p.g. 177
    nobs = 10

    ar_params = np.array([0.2])
    ma_params = np.array([0.4])
    sigma2 = 8.92
    p = len(ar_params)
    q = len(ma_params)
    m = max(p, q)

    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # First, get the autocovariance of the process
    arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
    unconditional_variance = (
        sigma2 * (1 + 2 * ar_params[0] * ma_params[0] + ma_params[0]**2) /
        (1 - ar_params[0]**2))
    assert_allclose(arma_process_acovf[0], unconditional_variance)

    # Next, get the autocovariance of the transformed process
    # Note: as required by {{prefix}}arma_transformed_acovf, we first divide
    # through by sigma^2
    arma_process_acovf /= sigma2
    unconditional_variance /= sigma2
    out = np.array(_arma_innovations.darma_transformed_acovf_fast(
        ar, ma, arma_process_acovf))
    acovf = np.array(out[0])
    acovf2 = np.array(out[1])

    # `acovf` is an m^2 x m^2 matrix, where m = max(p, q)
    # but it is only valid for the autocovariances of the first m observations
    # (this means in particular that the block `acovf[m:, m:]` should *not* be
    # used)
    # `acovf2` then contains the (time invariant) autocovariance terms for
    # the observations m + 1, ..., nobs - since the autocovariance is the same
    # for these terms, to save space we do not construct the autocovariance
    # matrix as we did for the first m terms. Thus `acovf2[0]` is the variance,
    # `acovf2[1]` is the first autocovariance, etc.

    # Test the autocovariance function for observations m + 1, ..., nobs
    # (it is time invariant here)
    assert_equal(acovf2.shape, (nobs - m,))
    assert_allclose(acovf2[0], 1 + ma_params[0]**2)
    assert_allclose(acovf2[1], ma_params[0])
    assert_allclose(acovf2[2:], 0)

    # Test the autocovariance function for observations 1, ..., m
    # (it is time varying here)
    assert_equal(acovf.shape, (m * 2, m * 2))

    # (we need to check `acovf[:m * 2, :m]`, i.e. `acovf[:2, :1])`
    ix = np.diag_indices_from(acovf)
    ix_lower = (ix[0][:-1] + 1, ix[1][:-1])

    # acovf[ix] is the diagonal, and we want to check the first m
    # elements of the diagonal
    assert_allclose(acovf[ix][:m], unconditional_variance)

    # acovf[ix_lower] is the first lower off-diagonal
    assert_allclose(acovf[ix_lower][:m], ma_params[0])

    # Now, check that we compute the moving average coefficients and the
    # associated variances correctly
    out = _arma_innovations.darma_innovations_algo_fast(
        nobs, ar_params, ma_params, acovf, acovf2)
    theta = np.array(out[0])
    v = np.array(out[1])

    # Test v (see eq. 5.3.13)
    desired_v = np.zeros(nobs)
    desired_v[0] = unconditional_variance
    for i in range(1, nobs):
        desired_v[i] = 1 + (1 - 1 / desired_v[i - 1]) * ma_params[0]**2
    assert_allclose(v, desired_v)

    # Test theta (see eq. 5.3.13)
    # Note that they will have shape (nobs, m + 1) here, not (nobs, nobs - 1)
    # as in the original (non-fast) version
    assert_equal(theta.shape, (nobs, m + 1))
    desired_theta = np.zeros(nobs)
    for i in range(1, nobs):
        desired_theta[i] = ma_params[0] / desired_v[i - 1]
    assert_allclose(theta[:, 0], desired_theta)
    assert_allclose(theta[:, 1:], 0)

    # Test against Table 5.3.1
    endog = np.array([
        -1.1, 0.514, 0.116, -0.845, 0.872, -0.467, -0.977, -1.699, -1.228,
        -1.093])
    u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
                                                   theta)

    # Note: Table 5.3.1 has \hat X_n+1 = -0.5340 for n = 1, but this seems to
    # be a typo, since equation 5.3.12 gives the form of the prediction
    # equation as \hat X_n+1 = \phi X_n + \theta_n1 (X_n - \hat X_n)
    # Then for n = 1 we have:
    # \hat X_n+1 = 0.2 (-1.1) + (0.2909) (-1.1 - 0) = -0.5399
    # And for n = 2 if we use what we have computed, then we get:
    # \hat X_n+1 = 0.2 (0.514) + (0.3833) (0.514 - (-0.54)) = 0.5068
    # as desired, whereas if we used the book's number for n=1 we would get:
    # \hat X_n+1 = 0.2 (0.514) + (0.3833) (0.514 - (-0.534)) = 0.5045
    # which is not what Table 5.3.1 shows.
    desired_hat = np.array([
        0, -0.540, 0.5068, -0.1321, -0.4539, 0.7046, -0.5620, -0.3614,
        -0.8748, -0.3869])
    desired_u = endog - desired_hat
    assert_allclose(u, desired_u, atol=1e-4)
def test_brockwell_davis_ex534():
    # See Brockwell and Davis (2009) - Time Series Theory and Methods
    # Example 5.3.4: ARMA(1, 1) process, p.g. 178
    nobs = 10

    ar_params = np.array([1, -0.24])
    ma_params = np.array([0.4, 0.2, 0.1])
    sigma2 = 1
    p = len(ar_params)
    q = len(ma_params)
    m = max(p, q)

    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # First, get the autocovariance of the process
    arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
    assert_allclose(arma_process_acovf[:3],
                    [7.17133, 6.44139, 5.06027], atol=1e-5)

    # Next, get the autocovariance of the transformed process
    out = np.array(_arma_innovations.darma_transformed_acovf_fast(
        ar, ma, arma_process_acovf))
    acovf = np.array(out[0])
    acovf2 = np.array(out[1])
    # See test_brockwell_davis_ex533 for details on acovf vs acovf2

    # Test acovf
    assert_equal(acovf.shape, (m * 2, m * 2))

    ix = np.diag_indices_from(acovf)
    ix_lower1 = (ix[0][:-1] + 1, ix[1][:-1])
    ix_lower2 = (ix[0][:-2] + 2, ix[1][:-2])
    ix_lower3 = (ix[0][:-3] + 3, ix[1][:-3])
    ix_lower4 = (ix[0][:-4] + 4, ix[1][:-4])

    assert_allclose(acovf[ix][:m], 7.17133, atol=1e-5)
    desired = [6.44139, 6.44139, 0.816]
    assert_allclose(acovf[ix_lower1][:m], desired, atol=1e-5)
    assert_allclose(acovf[ix_lower2][0], 5.06027, atol=1e-5)
    assert_allclose(acovf[ix_lower2][1:m], 0.34, atol=1e-5)
    assert_allclose(acovf[ix_lower3][:m], 0.1, atol=1e-5)
    assert_allclose(acovf[ix_lower4][:m], 0, atol=1e-5)

    # Test acovf2
    assert_equal(acovf2.shape, (nobs - m,))
    assert_allclose(acovf2[:4], [1.21, 0.5, 0.24, 0.1])
    assert_allclose(acovf2[4:], 0)

    # Test innovations algorithm output
    out = _arma_innovations.darma_innovations_algo_fast(
        nobs, ar_params, ma_params, acovf, acovf2)
    theta = np.array(out[0])
    v = np.array(out[1])

    # Test v (see Table 5.3.2)
    desired_v = [7.1713, 1.3856, 1.0057, 1.0019, 1.0016, 1.0005, 1.0000,
                 1.0000, 1.0000, 1.0000]
    assert_allclose(v, desired_v, atol=1e-4)

    # Test theta (see Table 5.3.2)
    assert_equal(theta.shape, (nobs, m + 1))
    desired_theta = np.array([
        [0, 0.8982, 1.3685, 0.4008, 0.3998, 0.3992, 0.4000, 0.4000, 0.4000,
         0.4000],
        [0, 0, 0.7056, 0.1806, 0.2020, 0.1995, 0.1997, 0.2000, 0.2000, 0.2000],
        [0, 0, 0, 0.0139, 0.0722, 0.0994, 0.0998, 0.0998, 0.0999, 0.1]]).T
    assert_allclose(theta[:, :m], desired_theta, atol=1e-4)
    assert_allclose(theta[:, m:], 0)

    # Test innovations filter output
    endog = np.array([1.704, 0.527, 1.041, 0.942, 0.555, -1.002, -0.585, 0.010,
                      -0.638, 0.525])
    u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
                                                   theta)

    desired_hat = np.array([
        0, 1.5305, -0.1710, 1.2428, 0.7443, 0.3138, -1.7293, -0.1688,
        0.3193, -0.8731])
    desired_u = endog - desired_hat
    assert_allclose(u, desired_u, atol=1e-4)
def test_brockwell_davis_ex534():
    # See Brockwell and Davis (2009) - Time Series Theory and Methods
    # Example 5.3.4: ARMA(1, 1) process, p.g. 178
    nobs = 10

    ar_params = np.array([1, -0.24])
    ma_params = np.array([0.4, 0.2, 0.1])
    sigma2 = 1
    p = len(ar_params)
    q = len(ma_params)
    m = max(p, q)

    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # First, get the autocovariance of the process
    arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
    assert_allclose(arma_process_acovf[:3],
                    [7.17133, 6.44139, 5.06027], atol=1e-5)

    # Next, get the autocovariance of the transformed process
    transformed_acovf = _arma_innovations.darma_transformed_acovf_fast(
        ar, ma, arma_process_acovf)
    acovf, acovf2 = (np.array(arr) for arr in transformed_acovf)
    # See test_brockwell_davis_ex533 for details on acovf vs acovf2

    # Test acovf
    assert_equal(acovf.shape, (m * 2, m * 2))

    ix = np.diag_indices_from(acovf)
    ix_lower1 = (ix[0][:-1] + 1, ix[1][:-1])
    ix_lower2 = (ix[0][:-2] + 2, ix[1][:-2])
    ix_lower3 = (ix[0][:-3] + 3, ix[1][:-3])
    ix_lower4 = (ix[0][:-4] + 4, ix[1][:-4])

    assert_allclose(acovf[ix][:m], 7.17133, atol=1e-5)
    desired = [6.44139, 6.44139, 0.816]
    assert_allclose(acovf[ix_lower1][:m], desired, atol=1e-5)
    assert_allclose(acovf[ix_lower2][0], 5.06027, atol=1e-5)
    assert_allclose(acovf[ix_lower2][1:m], 0.34, atol=1e-5)
    assert_allclose(acovf[ix_lower3][:m], 0.1, atol=1e-5)
    assert_allclose(acovf[ix_lower4][:m], 0, atol=1e-5)

    # Test acovf2
    assert_equal(acovf2.shape, (nobs - m,))
    assert_allclose(acovf2[:4], [1.21, 0.5, 0.24, 0.1])
    assert_allclose(acovf2[4:], 0)

    # Test innovations algorithm output
    out = _arma_innovations.darma_innovations_algo_fast(
        nobs, ar_params, ma_params, acovf, acovf2)
    theta = np.array(out[0])
    v = np.array(out[1])

    # Test v (see Table 5.3.2)
    desired_v = [7.1713, 1.3856, 1.0057, 1.0019, 1.0016, 1.0005, 1.0000,
                 1.0000, 1.0000, 1.0000]
    assert_allclose(v, desired_v, atol=1e-4)

    # Test theta (see Table 5.3.2)
    assert_equal(theta.shape, (nobs, m + 1))
    desired_theta = np.array([
        [0, 0.8982, 1.3685, 0.4008, 0.3998, 0.3992, 0.4000, 0.4000, 0.4000,
         0.4000],
        [0, 0, 0.7056, 0.1806, 0.2020, 0.1995, 0.1997, 0.2000, 0.2000, 0.2000],
        [0, 0, 0, 0.0139, 0.0722, 0.0994, 0.0998, 0.0998, 0.0999, 0.1]]).T
    assert_allclose(theta[:, :m], desired_theta, atol=1e-4)
    assert_allclose(theta[:, m:], 0)

    # Test innovations filter output
    endog = np.array([1.704, 0.527, 1.041, 0.942, 0.555, -1.002, -0.585, 0.010,
                      -0.638, 0.525])
    u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
                                                   theta)

    desired_hat = np.array([
        0, 1.5305, -0.1710, 1.2428, 0.7443, 0.3138, -1.7293, -0.1688,
        0.3193, -0.8731])
    desired_u = endog - desired_hat
    assert_allclose(u, desired_u, atol=1e-4)
Пример #16
0
    '''
    print('\n*********************\n    chapter2: 1.5\n*********************')
    p, q, acovf = 0, 2, (12.4168, -4.7520, 5.2)
    maPara = armaME(p, q, acovf)

    print('\n*********************\n    chapter2: 2.4\n*********************')
    p, q, acovf = 2, 2, (5.61, -1.1, 0.23, 0.43, -0.1)
    armaPara = armaME(p, q, acovf)

    '''
    ### chapter 3: 2.5
    use function arima_process.arma_acovf() in statsmodels.tsa
    for computing theoretical autocovariance function of ARMA process
    '''
    ar, ma, sigma2 = (1, -0.0894, 0.6265), (1, -0.3334, 0.8158), 4.0119
    armaAcovf = arima_process.arma_acovf(ar, ma, nobs=11, sigma2=sigma2)

    print('\n*********************\n    chapter2: 2.5\n*********************')
    for i, value in enumerate(armaAcovf):
        if i > 4:
            print('    γ%d = %.4f' % (i, value))
    print('\n')

    '''
    ### chapter 4: 3.4
    use custom function _rejRate()
    for computing rejection rate of White Noise hypothesis test
    with different c2 for fixed c1, d1 and d2
    '''
    print('\n*********************\n    chapter4: 3.4\n*********************')
    print('\nCannot get all availabe (c1,c2,d1,d2)'
Пример #17
0
ma1 = [1., 0.4]
ma2 = [1., 0.4, 0.6]
ma0 = [1., 0.]

comparefn = dict([('ma1', acovf_ma1), ('ma2', acovf_ma2),
                  ('arma11', acovf_arma11), ('ar1', acovf_arma11)])

cases = [('ma1', (ar0, ma1)), ('ma2', (ar0, ma2)), ('arma11', (ar1, ma1)),
         ('ar1', (ar1, ma0))]

for c, args in cases:

    ar, ma = args
    print('')
    print(c, ar, ma)
    myacovf = arma_acovf(ar, ma, nobs=10)
    myacf = arma_acf(ar, ma, nobs=10)
    if c[:2] == 'ma':
        othacovf = comparefn[c](ma)
    else:
        othacovf = comparefn[c](ar, ma)
    print(myacovf[:5])
    print(othacovf[:5])
    #something broke again,
    #for high persistence case eg ar=0.99, nobs of IR has to be large
    #made changes to arma_acovf
    assert_array_almost_equal(myacovf, othacovf, 10)
    assert_array_almost_equal(myacf, othacovf / othacovf[0], 10)


#from nitime.utils
def arma_innovations(endog,
                     ar_params=None,
                     ma_params=None,
                     sigma2=1,
                     normalize=False,
                     prefix=None):
    """
    Compute innovations using a given ARMA process

    Parameters
    ----------
    endog : ndarray
        The observed time-series process, may be univariate or multivariate.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    normalize : boolean, optional
        Whether or not to normalize the returned innovations. Default is False.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    innovations : ndarray
        Innovations (one-step-ahead prediction errors) for the given `endog`
        series with predictions based on the given ARMA process. If
        `normalize=True`, then the returned innovations have been "whitened" by
        dividing through by the square root of the mean square error.
    innovations_mse : ndarray
        Mean square error for the innovations.

    """
    # Parameters
    endog = np.array(endog)
    squeezed = endog.ndim == 1
    if squeezed:
        endog = endog[:, None]

    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    nobs, k_endog = endog.shape
    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # Get BLAS prefix
    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params,
             np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    # Make arrays contiguous for BLAS calls
    endog = np.asfortranarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = dtype(sigma2).item()

    # Get the appropriate functions
    arma_transformed_acovf_fast = getattr(
        _arma_innovations, prefix + 'arma_transformed_acovf_fast')
    arma_innovations_algo_fast = getattr(_arma_innovations,
                                         prefix + 'arma_innovations_algo_fast')
    arma_innovations_filter = getattr(_arma_innovations,
                                      prefix + 'arma_innovations_filter')

    # Run the innovations algorithm for ARMA coefficients
    arma_acovf = arima_process.arma_acovf(ar, ma, sigma2=sigma2,
                                          nobs=nobs) / sigma2
    acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
    theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params, acovf,
                                          acovf2)
    v = np.array(v)
    if normalize:
        v05 = v**0.5

    # Run the innovations filter across each series
    u = []
    for i in range(k_endog):
        u_i = np.array(
            arma_innovations_filter(endog[:, i], ar_params, ma_params, theta))
        u.append(u_i / v05 if normalize else u_i)
    u = np.vstack(u).T

    # Post-processing
    if squeezed:
        u = u.squeeze()

    return u, v
def test_brockwell_davis_ex533():
    # See Brockwell and Davis (2009) - Time Series Theory and Methods
    # Example 5.3.3: ARMA(1, 1) process, p.g. 177
    nobs = 10

    ar_params = np.array([0.2])
    ma_params = np.array([0.4])
    sigma2 = 8.92
    p = len(ar_params)
    q = len(ma_params)
    m = max(p, q)

    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # First, get the autocovariance of the process
    arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
    unconditional_variance = (
        sigma2 * (1 + 2 * ar_params[0] * ma_params[0] + ma_params[0]**2) /
        (1 - ar_params[0]**2))
    assert_allclose(arma_process_acovf[0], unconditional_variance)

    # Next, get the autocovariance of the transformed process
    # Note: as required by {{prefix}}arma_transformed_acovf, we first divide
    # through by sigma^2
    arma_process_acovf /= sigma2
    unconditional_variance /= sigma2
    transformed_acovf = _arma_innovations.darma_transformed_acovf_fast(
        ar, ma, arma_process_acovf)
    acovf, acovf2 = (np.array(arr) for arr in transformed_acovf)

    # `acovf` is an m^2 x m^2 matrix, where m = max(p, q)
    # but it is only valid for the autocovariances of the first m observations
    # (this means in particular that the block `acovf[m:, m:]` should *not* be
    # used)
    # `acovf2` then contains the (time invariant) autocovariance terms for
    # the observations m + 1, ..., nobs - since the autocovariance is the same
    # for these terms, to save space we do not construct the autocovariance
    # matrix as we did for the first m terms. Thus `acovf2[0]` is the variance,
    # `acovf2[1]` is the first autocovariance, etc.

    # Test the autocovariance function for observations m + 1, ..., nobs
    # (it is time invariant here)
    assert_equal(acovf2.shape, (nobs - m,))
    assert_allclose(acovf2[0], 1 + ma_params[0]**2)
    assert_allclose(acovf2[1], ma_params[0])
    assert_allclose(acovf2[2:], 0)

    # Test the autocovariance function for observations 1, ..., m
    # (it is time varying here)
    assert_equal(acovf.shape, (m * 2, m * 2))

    # (we need to check `acovf[:m * 2, :m]`, i.e. `acovf[:2, :1])`
    ix = np.diag_indices_from(acovf)
    ix_lower = (ix[0][:-1] + 1, ix[1][:-1])

    # acovf[ix] is the diagonal, and we want to check the first m
    # elements of the diagonal
    assert_allclose(acovf[ix][:m], unconditional_variance)

    # acovf[ix_lower] is the first lower off-diagonal
    assert_allclose(acovf[ix_lower][:m], ma_params[0])

    # Now, check that we compute the moving average coefficients and the
    # associated variances correctly
    out = _arma_innovations.darma_innovations_algo_fast(
        nobs, ar_params, ma_params, acovf, acovf2)
    theta = np.array(out[0])
    v = np.array(out[1])

    # Test v (see eq. 5.3.13)
    desired_v = np.zeros(nobs)
    desired_v[0] = unconditional_variance
    for i in range(1, nobs):
        desired_v[i] = 1 + (1 - 1 / desired_v[i - 1]) * ma_params[0]**2
    assert_allclose(v, desired_v)

    # Test theta (see eq. 5.3.13)
    # Note that they will have shape (nobs, m + 1) here, not (nobs, nobs - 1)
    # as in the original (non-fast) version
    assert_equal(theta.shape, (nobs, m + 1))
    desired_theta = np.zeros(nobs)
    for i in range(1, nobs):
        desired_theta[i] = ma_params[0] / desired_v[i - 1]
    assert_allclose(theta[:, 0], desired_theta)
    assert_allclose(theta[:, 1:], 0)

    # Test against Table 5.3.1
    endog = np.array([
        -1.1, 0.514, 0.116, -0.845, 0.872, -0.467, -0.977, -1.699, -1.228,
        -1.093])
    u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
                                                   theta)

    # Note: Table 5.3.1 has \hat X_n+1 = -0.5340 for n = 1, but this seems to
    # be a typo, since equation 5.3.12 gives the form of the prediction
    # equation as \hat X_n+1 = \phi X_n + \theta_n1 (X_n - \hat X_n)
    # Then for n = 1 we have:
    # \hat X_n+1 = 0.2 (-1.1) + (0.2909) (-1.1 - 0) = -0.5399
    # And for n = 2 if we use what we have computed, then we get:
    # \hat X_n+1 = 0.2 (0.514) + (0.3833) (0.514 - (-0.54)) = 0.5068
    # as desired, whereas if we used the book's number for n=1 we would get:
    # \hat X_n+1 = 0.2 (0.514) + (0.3833) (0.514 - (-0.534)) = 0.5045
    # which is not what Table 5.3.1 shows.
    desired_hat = np.array([
        0, -0.540, 0.5068, -0.1321, -0.4539, 0.7046, -0.5620, -0.3614,
        -0.8748, -0.3869])
    desired_u = endog - desired_hat
    assert_allclose(u, desired_u, atol=1e-4)
Пример #20
0
def arma_innovations(endog, ar_params=None, ma_params=None, sigma2=1,
                     normalize=False, prefix=None):
    """
    Compute innovations using a given ARMA process

    Parameters
    ----------
    endog : ndarray
        The observed time-series process, may be univariate or multivariate.
    ar_params : ndarray, optional
        Autoregressive parameters.
    ma_params : ndarray, optional
        Moving average parameters.
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    normalize : boolean, optional
        Whether or not to normalize the returned innovations. Default is False.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    -------
    innovations : ndarray
        Innovations (one-step-ahead prediction errors) for the given `endog`
        series with predictions based on the given ARMA process. If
        `normalize=True`, then the returned innovations have been "whitened" by
        dividing through by the square root of the mean square error.
    innovations_mse : ndarray
        Mean square error for the innovations.

    """
    # Parameters
    endog = np.array(endog)
    squeezed = endog.ndim == 1
    if squeezed:
        endog = endog[:, None]

    ar_params = np.atleast_1d([] if ar_params is None else ar_params)
    ma_params = np.atleast_1d([] if ma_params is None else ma_params)

    nobs, k_endog = endog.shape
    ar = np.r_[1, -ar_params]
    ma = np.r_[1, ma_params]

    # Get BLAS prefix
    if prefix is None:
        prefix, dtype, _ = find_best_blas_type(
            [endog, ar_params, ma_params, np.array(sigma2)])
    dtype = prefix_dtype_map[prefix]

    # Make arrays contiguous for BLAS calls
    endog = np.asfortranarray(endog, dtype=dtype)
    ar_params = np.asfortranarray(ar_params, dtype=dtype)
    ma_params = np.asfortranarray(ma_params, dtype=dtype)
    sigma2 = dtype(sigma2).item()

    # Get the appropriate functions
    arma_transformed_acovf_fast = getattr(
        _arma_innovations, prefix + 'arma_transformed_acovf_fast')
    arma_innovations_algo_fast = getattr(
        _arma_innovations, prefix + 'arma_innovations_algo_fast')
    arma_innovations_filter = getattr(
        _arma_innovations, prefix + 'arma_innovations_filter')

    # Run the innovations algorithm for ARMA coefficients
    arma_acovf = arima_process.arma_acovf(ar, ma,
                                          sigma2=sigma2, nobs=nobs) / sigma2
    acovf, acovf2 = arma_transformed_acovf_fast(ar, ma, arma_acovf)
    theta, v = arma_innovations_algo_fast(nobs, ar_params, ma_params,
                                          acovf, acovf2)
    v = np.array(v)
    if normalize:
        v05 = v**0.5

    # Run the innovations filter across each series
    u = []
    for i in range(k_endog):
        u_i = np.array(arma_innovations_filter(endog[:, i], ar_params,
                                               ma_params, theta))
        u.append(u_i / v05 if normalize else u_i)
    u = np.vstack(u).T

    # Post-processing
    if squeezed:
        u = u.squeeze()

    return u, v