def test_slice_notation():
    endog = np.arange(10)*1.0
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod.bind(endog)

    # Test invalid __setitem__
    def set_designs():
        mod['designs'] = 1
    def set_designs2():
        mod['designs',0,0] = 1
    def set_designs3():
        mod[0] = 1
    assert_raises(IndexError, set_designs)
    assert_raises(IndexError, set_designs2)
    assert_raises(IndexError, set_designs3)

    # Test invalid __getitem__
    assert_raises(IndexError, lambda: mod['designs'])
    assert_raises(IndexError, lambda: mod['designs',0,0,0])
    assert_raises(IndexError, lambda: mod[0])

    # Test valid __setitem__, __getitem__
    assert_equal(mod.design[0,0,0], 0)
    mod['design',0,0,0] = 1
    assert_equal(mod['design'].sum(), 1)
    assert_equal(mod.design[0,0,0], 1)
    assert_equal(mod['design',0,0,0], 1)

    # Test valid __setitem__, __getitem__ with unspecified time index
    mod['design'] = np.zeros(mod['design'].shape)
    assert_equal(mod.design[0,0], 0)
    mod['design',0,0] = 1
    assert_equal(mod.design[0,0], 1)
    assert_equal(mod['design',0,0], 1)
示例#2
0
class Options(object):
    def __init__(self, *args, **kwargs):

        # Dummy data
        endog = np.arange(10)
        k_states = 1

        self.model = KalmanFilter(k_endog=1, k_states=k_states, *args, **kwargs)
        self.model.bind(endog)
def test_no_endog():
    # Test for RuntimeError when no endog is provided by the time filtering
    # is initialized.

    mod = KalmanFilter(k_endog=1, k_states=1)

    # directly call the _initialize_filter function
    assert_raises(RuntimeError, mod._initialize_filter)
    # indirectly call it through filtering
    mod.initialize_approximate_diffuse()
    assert_raises(RuntimeError, mod.filter)
def test_filter():
    # Tests of invalid calls to the filter function

    endog = np.ones((10,1))
    mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
    mod['design', :] = 1
    mod['selection', :] = 1
    mod['state_cov', :] = 1

    # Test default filter results
    res = mod.filter()
    assert_equal(isinstance(res, FilterResults), True)
def test_loglike():
    # Tests of invalid calls to the loglike function

    endog = np.ones((10, 1))
    mod = KalmanFilter(endog, k_states=1, initialization="approximate_diffuse")
    mod["design", :] = 1
    mod["selection", :] = 1
    mod["state_cov", :] = 1

    # Test that self.memory_no_likelihood = True raises an error
    mod.memory_no_likelihood = True
    assert_raises(RuntimeError, mod.loglike)
    assert_raises(RuntimeError, mod.loglikeobs)
def test_missing():
    # Datasets
    endog = np.arange(10).reshape(10,1)
    endog_pre_na = np.ascontiguousarray(np.c_[
        endog.copy() * np.nan, endog.copy() * np.nan, endog, endog])
    endog_post_na = np.ascontiguousarray(np.c_[
        endog, endog, endog.copy() * np.nan, endog.copy() * np.nan])
    endog_inject_na = np.ascontiguousarray(np.c_[
        endog, endog.copy() * np.nan, endog, endog.copy() * np.nan])

    # Base model
    mod = KalmanFilter(np.ascontiguousarray(np.c_[endog, endog]), k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf = mod.loglikeobs()

    # Model with prepended nans
    mod = KalmanFilter(endog_pre_na, k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf_pre_na = mod.loglikeobs()

    assert_allclose(llf_pre_na, llf)

    # Model with appended nans
    mod = KalmanFilter(endog_post_na, k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf_post_na = mod.loglikeobs()

    assert_allclose(llf_post_na, llf)

    # Model with injected nans
    mod = KalmanFilter(endog_inject_na, k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf_inject_na = mod.loglikeobs()

    assert_allclose(llf_inject_na, llf)
def test_filter():
    # Tests of invalid calls to the filter function

    endog = np.ones((10, 1))
    mod = KalmanFilter(endog, k_states=1, initialization="approximate_diffuse")
    mod["design", :] = 1
    mod["selection", :] = 1
    mod["state_cov", :] = 1

    # Test default filter results
    res = mod.filter()
    assert_equal(isinstance(res, FilterResults), True)

    # Test specified invalid results class
    assert_raises(ValueError, mod.filter, results=object)

    # Test specified valid results class
    res = mod.filter(results=FilterResults)
    assert_equal(isinstance(res, FilterResults), True)
    def __init__(self, dtype=float, **kwargs):
        self.true = results_kalman_filter.uc_bi
        self.true_states = pd.DataFrame(self.true["states"])

        # GDP and Unemployment, Quarterly, 1948.1 - 1995.3
        data = pd.DataFrame(
            self.true["data"], index=pd.date_range("1947-01-01", "1995-07-01", freq="QS"), columns=["GDP", "UNEMP"]
        )[4:]
        data["GDP"] = np.log(data["GDP"])
        data["UNEMP"] = data["UNEMP"] / 100

        k_states = 6
        self.model = KalmanFilter(k_endog=2, k_states=k_states, **kwargs)
        self.model.bind(np.ascontiguousarray(data.values))

        # Statespace representation
        self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
        self.model.transition[([0, 0, 1, 1, 2, 3, 4, 5], [0, 4, 1, 2, 1, 2, 4, 5], [0, 0, 0, 0, 0, 0, 0, 0])] = [
            1,
            1,
            0,
            0,
            1,
            1,
            1,
            1,
        ]
        self.model.selection = np.eye(self.model.k_states)

        # Update matrices with given parameters
        (sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec, phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
            self.true["parameters"]
        )
        self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [alpha_1, alpha_2, alpha_3]
        self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
        self.model.obs_cov[1, 1, 0] = sigma_ec ** 2
        self.model.state_cov[np.diag_indices(k_states) + (np.zeros(k_states, dtype=int),)] = [
            sigma_v ** 2,
            sigma_e ** 2,
            0,
            0,
            sigma_w ** 2,
            sigma_vl ** 2,
        ]

        # Initialization
        initial_state = np.zeros((k_states,))
        initial_state_cov = np.eye(k_states) * 100

        # Initialization: self.modelification
        initial_state_cov = np.dot(
            np.dot(self.model.transition[:, :, 0], initial_state_cov), self.model.transition[:, :, 0].T
        )
        self.model.initialize_known(initial_state, initial_state_cov)
def test_slice_notation():
    # Test setting and getting state space representation matrices using the
    # slice notation.

    endog = np.arange(10) * 1.0
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod.bind(endog)

    # Test invalid __setitem__
    def set_designs():
        mod["designs"] = 1

    def set_designs2():
        mod["designs", 0, 0] = 1

    def set_designs3():
        mod[0] = 1

    assert_raises(IndexError, set_designs)
    assert_raises(IndexError, set_designs2)
    assert_raises(IndexError, set_designs3)

    # Test invalid __getitem__
    assert_raises(IndexError, lambda: mod["designs"])
    assert_raises(IndexError, lambda: mod["designs", 0, 0, 0])
    assert_raises(IndexError, lambda: mod[0])

    # Test valid __setitem__, __getitem__
    assert_equal(mod.design[0, 0, 0], 0)
    mod["design", 0, 0, 0] = 1
    assert_equal(mod["design"].sum(), 1)
    assert_equal(mod.design[0, 0, 0], 1)
    assert_equal(mod["design", 0, 0, 0], 1)

    # Test valid __setitem__, __getitem__ with unspecified time index
    mod["design"] = np.zeros(mod["design"].shape)
    assert_equal(mod.design[0, 0], 0)
    mod["design", 0, 0] = 1
    assert_equal(mod.design[0, 0], 1)
    assert_equal(mod["design", 0, 0], 1)
    def __init__(self, dtype=float, **kwargs):
        self.true = results_kalman_filter.uc_uni
        self.true_states = pd.DataFrame(self.true['states'])

        # GDP, Quarterly, 1947.1 - 1995.3
        data = pd.DataFrame(
            self.true['data'],
            index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
            columns=['GDP']
        )
        data['lgdp'] = np.log(data['GDP'])

        # Construct the statespace representation
        k_states = 4
        self.model = KalmanFilter(k_endog=1, k_states=k_states, **kwargs)
        self.model.bind(data['lgdp'].values)

        self.model.design[:, :, 0] = [1, 1, 0, 0]
        self.model.transition[([0, 0, 1, 1, 2, 3],
                               [0, 3, 1, 2, 1, 3],
                               [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
        self.model.selection = np.eye(self.model.k_states)

        # Update matrices with given parameters
        (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
            self.true['parameters']
        )
        self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
        self.model.state_cov[
            np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
            sigma_v**2, sigma_e**2, 0, sigma_w**2
        ]

        # Initialization
        initial_state = np.zeros((k_states,))
        initial_state_cov = np.eye(k_states)*100

        # Initialization: modification
        initial_state_cov = np.dot(
            np.dot(self.model.transition[:, :, 0], initial_state_cov),
            self.model.transition[:, :, 0].T
        )
        self.model.initialize_known(initial_state, initial_state_cov)
示例#11
0
def test_impulse_responses():
    # Test for impulse response functions

    # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
    # for all periods
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10)
    desired = np.ones((11, 1))

    assert_allclose(actual, desired)

    # Random walk: 2-unit impulse response (i.e. non-orthogonalized irf) is 2
    # for all periods
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, impulse=[2])
    desired = np.ones((11, 1)) * 2

    assert_allclose(actual, desired)

    # Random walk: 1-standard-deviation response (i.e. orthogonalized irf) is
    # sigma for all periods (here sigma^2 = 2)
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, orthogonalized=True)
    desired = np.ones((11, 1)) * 2**0.5

    assert_allclose(actual, desired)

    # Random walk: 1-standard-deviation cumulative response (i.e. cumulative
    # orthogonalized irf)
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10,
                                   orthogonalized=True,
                                   cumulative=True)
    desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]

    actual = mod.impulse_responses(steps=10,
                                   impulse=[1],
                                   orthogonalized=True,
                                   cumulative=True)
    desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]

    assert_allclose(actual, desired)

    # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
    # for all periods, even when intercepts are present
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['state_intercept', 0] = 100.
    mod['design', 0, 0] = 1.
    mod['obs_intercept', 0] = -1000.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10)
    desired = np.ones((11, 1))

    assert_allclose(actual, desired)

    # Univariate model (random walk): test that an error is thrown when
    # a multivariate or empty "impulse" is sent
    mod = KalmanFilter(k_endog=1, k_states=1)
    assert_raises(ValueError, mod.impulse_responses, impulse=1)
    assert_raises(ValueError, mod.impulse_responses, impulse=[1, 1])
    assert_raises(ValueError, mod.impulse_responses, impulse=[])

    # Univariate model with two uncorrelated shocks
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod['design', 0, 0:2] = 1.
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.eye(2)

    desired = np.ones((11, 1))

    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[1, 0])
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[0, 1])
    assert_allclose(actual, desired)

    # In this case (with sigma=sigma^2=1), orthogonalized is the same as not
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10,
                                   impulse=[1, 0],
                                   orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10,
                                   impulse=[0, 1],
                                   orthogonalized=True)
    assert_allclose(actual, desired)

    # Univariate model with two correlated shocks
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod['design', 0, 0:2] = 1.
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])

    desired = np.ones((11, 1))

    # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's
    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, desired)

    # Orthogonalized (i.e. 1-std-dev) impulses now generate different responses
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, desired + desired * 0.5)

    actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True)
    assert_allclose(actual, desired)

    # Multivariate model with two correlated shocks
    mod = KalmanFilter(k_endog=2, k_states=2)
    mod['design', :, :] = np.eye(2)
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])

    ones = np.ones((11, 1))
    zeros = np.zeros((11, 1))

    # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's, but
    # only for the appropriate series
    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, np.c_[ones, zeros])

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, np.c_[zeros, ones])

    # Orthogonalized (i.e. 1-std-dev) impulses now generate different
    # responses, and only for the appropriate series
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, np.c_[ones, ones * 0.5])

    actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True)
    assert_allclose(actual, np.c_[zeros, ones])

    # AR(1) model generates a geometrically declining series
    mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1, 0, 0))
    phi = 0.5
    mod.update([phi, 1])

    desired = np.cumprod(np.r_[1, [phi] * 10])[:, np.newaxis]

    # Test going through the model directly
    actual = mod.ssm.impulse_responses(steps=10)
    assert_allclose(actual, desired)

    # Test going through the results object
    res = mod.filter([phi, 1.])
    actual = res.impulse_responses(steps=10)
    assert_allclose(actual, desired)
示例#12
0
def test_cython():
    # Test the cython _kalman_filter creation, re-creation, calling, etc.

    # Check that datatypes are correct:
    for prefix, dtype in tools.prefix_dtype_map.items():
        endog = np.array(1.0, ndmin=2, dtype=dtype)
        mod = KalmanFilter(k_endog=1, k_states=1, dtype=dtype)

        # Bind data and initialize the ?KalmanFilter object
        mod.bind(endog)
        mod._initialize_filter()

        # Check that the dtype and prefix are correct
        assert_equal(mod.prefix, prefix)
        assert_equal(mod.dtype, dtype)

        # Test that a dKalmanFilter instance was created
        assert_equal(prefix in mod._kalman_filters, True)
        kf = mod._kalman_filters[prefix]
        assert_equal(isinstance(kf, tools.prefix_kalman_filter_map[prefix]), True)

        # Test that the default returned _kalman_filter is the above instance
        assert_equal(mod._kalman_filter, kf)

    # Check that upcasting datatypes / ?KalmanFilter works (e.g. d -> z)
    mod = KalmanFilter(k_endog=1, k_states=1)

    # Default dtype is float
    assert_equal(mod.prefix, "d")
    assert_equal(mod.dtype, np.float64)

    # Prior to initialization, no ?KalmanFilter exists
    assert_equal(mod._kalman_filter, None)

    # Bind data and initialize the ?KalmanFilter object
    endog = np.ascontiguousarray(np.array([1.0, 2.0], dtype=np.float64))
    mod.bind(endog)
    mod._initialize_filter()
    kf = mod._kalman_filters["d"]

    # Rebind data, still float, check that we haven't changed
    mod.bind(endog)
    mod._initialize_filter()
    assert_equal(mod._kalman_filter, kf)

    # Force creating new ?Statespace and ?KalmanFilter, by changing the
    # time-varying character of an array
    mod.design = np.zeros((1, 1, 2))
    mod._initialize_filter()
    assert_equal(mod._kalman_filter == kf, False)
    kf = mod._kalman_filters["d"]

    # Rebind data, now complex, check that the ?KalmanFilter instance has
    # changed
    endog = np.ascontiguousarray(np.array([1.0, 2.0], dtype=np.complex128))
    mod.bind(endog)
    assert_equal(mod._kalman_filter == kf, False)
示例#13
0
def test_missing():
    # Datasets
    endog = np.arange(10).reshape(10, 1)
    endog_pre_na = np.ascontiguousarray(np.c_[endog.copy() * np.nan,
                                              endog.copy() * np.nan, endog,
                                              endog])
    endog_post_na = np.ascontiguousarray(np.c_[endog, endog,
                                               endog.copy() * np.nan,
                                               endog.copy() * np.nan])
    endog_inject_na = np.ascontiguousarray(np.c_[endog,
                                                 endog.copy() * np.nan, endog,
                                                 endog.copy() * np.nan])

    # Base model
    mod = KalmanFilter(np.ascontiguousarray(np.c_[endog, endog]),
                       k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog) * 0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf = mod.loglikeobs()

    # Model with prepended nans
    mod = KalmanFilter(endog_pre_na,
                       k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog) * 0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf_pre_na = mod.loglikeobs()

    assert_allclose(llf_pre_na, llf)

    # Model with appended nans
    mod = KalmanFilter(endog_post_na,
                       k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog) * 0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf_post_na = mod.loglikeobs()

    assert_allclose(llf_post_na, llf)

    # Model with injected nans
    mod = KalmanFilter(endog_inject_na,
                       k_states=1,
                       initialization='approximate_diffuse')
    mod['design', :, :] = 1
    mod['obs_cov', :, :] = np.eye(mod.k_endog) * 0.5
    mod['transition', :, :] = 0.5
    mod['selection', :, :] = 1
    mod['state_cov', :, :] = 0.5
    llf_inject_na = mod.loglikeobs()

    assert_allclose(llf_inject_na, llf)
示例#14
0
def test_cython():
    # Test the cython _kalman_filter creation, re-creation, calling, etc.

    # Check that datatypes are correct:
    for prefix, dtype in tools.prefix_dtype_map.items():
        endog = np.array(1., ndmin=2, dtype=dtype)
        mod = KalmanFilter(k_endog=1, k_states=1, dtype=dtype)

        # Bind data and initialize the ?KalmanFilter object
        mod.bind(endog)
        mod._initialize_filter()

        # Check that the dtype and prefix are correct
        assert_equal(mod.prefix, prefix)
        assert_equal(mod.dtype, dtype)

        # Test that a dKalmanFilter instance was created
        assert_equal(prefix in mod._kalman_filters, True)
        kf = mod._kalman_filters[prefix]
        assert_equal(isinstance(kf, tools.prefix_kalman_filter_map[prefix]),
                     True)

        # Test that the default returned _kalman_filter is the above instance
        assert_equal(mod._kalman_filter, kf)

    # Check that upcasting datatypes / ?KalmanFilter works (e.g. d -> z)
    mod = KalmanFilter(k_endog=1, k_states=1)

    # Default dtype is float
    assert_equal(mod.prefix, 'd')
    assert_equal(mod.dtype, np.float64)

    # Prior to initialization, no ?KalmanFilter exists
    assert_equal(mod._kalman_filter, None)

    # Bind data and initialize the ?KalmanFilter object
    endog = np.ascontiguousarray(np.array([1., 2.], dtype=np.float64))
    mod.bind(endog)
    mod._initialize_filter()
    kf = mod._kalman_filters['d']

    # Rebind data, still float, check that we haven't changed
    mod.bind(endog)
    mod._initialize_filter()
    assert_equal(mod._kalman_filter, kf)

    # Force creating new ?Statespace and ?KalmanFilter, by changing the
    # time-varying character of an array
    mod.design = np.zeros((1, 1, 2))
    mod._initialize_filter()
    assert_equal(mod._kalman_filter == kf, False)
    kf = mod._kalman_filters['d']

    # Rebind data, now complex, check that the ?KalmanFilter instance has
    # changed
    endog = np.ascontiguousarray(np.array([1., 2.], dtype=np.complex128))
    mod.bind(endog)
    assert_equal(mod._kalman_filter == kf, False)
示例#15
0
def test_predict():
    # Tests of invalid calls to the predict function

    warnings.simplefilter("always")

    endog = np.ones((10, 1))
    mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
    mod['design', :] = 1
    mod['obs_intercept'] = np.zeros((1, 10))
    mod['selection', :] = 1
    mod['state_cov', :] = 1

    # Check that we need both forecasts and predicted output for prediction
    mod.memory_no_forecast = True
    res = mod.filter()
    assert_raises(ValueError, res.predict)
    mod.memory_no_forecast = False

    mod.memory_no_predicted = True
    res = mod.filter()
    assert_raises(ValueError, res.predict)
    mod.memory_no_predicted = False

    # Now get a clean filter object
    res = mod.filter()

    # Check that start < 0 is an error
    assert_raises(ValueError, res.predict, start=-1)

    # Check that end < start is an error
    assert_raises(ValueError, res.predict, start=2, end=1)

    # Check that dynamic < 0 is an error
    assert_raises(ValueError, res.predict, dynamic=-1)

    # Check that dynamic > end is an warning
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=1, dynamic=2)
        message = ('Dynamic prediction specified to begin after the end of'
                   ' prediction, and so has no effect.')
        assert_equal(str(w[0].message), message)

    # Check that dynamic > nobs is an warning
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1, 1)))
        message = ('Dynamic prediction specified to begin during'
                   ' out-of-sample forecasting period, and so has no'
                   ' effect.')
        assert_equal(str(w[0].message), message)

    # Check for a warning when providing a non-used statespace matrix
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=res.nobs + 1,
                    design=True,
                    obs_intercept=np.zeros((1, 1)))
        message = ('Model has time-invariant design matrix, so the design'
                   ' argument to `predict` has been ignored.')
        assert_equal(str(w[0].message), message)

    # Check that an error is raised when a new time-varying matrix is not
    # provided
    assert_raises(ValueError, res.predict, end=res.nobs + 1)

    # Check that an error is raised when a non-two-dimensional obs_intercept
    # is given
    assert_raises(ValueError,
                  res.predict,
                  end=res.nobs + 1,
                  obs_intercept=np.zeros(1))

    # Check that an error is raised when an obs_intercept with incorrect length
    # is given
    assert_raises(ValueError,
                  res.predict,
                  end=res.nobs + 1,
                  obs_intercept=np.zeros(2))

    # Check that start=None gives start=0 and end=None gives end=nobs
    assert_equal(res.predict().forecasts.shape, (1, res.nobs))

    # Check that dynamic=True begins dynamic prediction immediately
    # TODO just a smoke test
    res.predict(dynamic=True)

    # Check that on success, PredictionResults object is returned
    prediction_results = res.predict(start=3, end=5)
    assert_equal(isinstance(prediction_results, PredictionResults), True)

    # Check for correctly subset representation arrays
    # (k_endog, npredictions) = (1, 2)
    assert_equal(prediction_results.endog.shape, (1, 2))
    # (k_endog, npredictions) = (1, 2)
    assert_equal(prediction_results.obs_intercept.shape, (1, 2))
    # (k_endog, k_states) = (1, 1)
    assert_equal(prediction_results.design.shape, (1, 1))
    # (k_endog, k_endog) = (1, 1)
    assert_equal(prediction_results.obs_cov.shape, (1, 1))
    # (k_state,) = (1,)
    assert_equal(prediction_results.state_intercept.shape, (1, ))
    # (k_state, npredictions) = (1, 2)
    assert_equal(prediction_results.obs_intercept.shape, (1, 2))
    # (k_state, k_state) = (1, 1)
    assert_equal(prediction_results.transition.shape, (1, 1))
    # (k_state, k_posdef) = (1, 1)
    assert_equal(prediction_results.selection.shape, (1, 1))
    # (k_posdef, k_posdef) = (1, 1)
    assert_equal(prediction_results.state_cov.shape, (1, 1))

    # Check for correctly subset filter output arrays
    # (k_endog, npredictions) = (1, 2)
    assert_equal(prediction_results.forecasts.shape, (1, 2))
    assert_equal(prediction_results.forecasts_error.shape, (1, 2))
    # (k_states, npredictions) = (1, 2)
    assert_equal(prediction_results.filtered_state.shape, (1, 2))
    assert_equal(prediction_results.predicted_state.shape, (1, 2))
    # (k_endog, k_endog, npredictions) = (1, 1, 2)
    assert_equal(prediction_results.forecasts_error_cov.shape, (1, 1, 2))
    # (k_states, k_states, npredictions) = (1, 1, 2)
    assert_equal(prediction_results.filtered_state_cov.shape, (1, 1, 2))
    assert_equal(prediction_results.predicted_state_cov.shape, (1, 1, 2))

    # Check for invalid attribute
    assert_raises(AttributeError, getattr, prediction_results, 'test')

    # Check that an error is raised when a non-two-dimensional obs_cov
    # is given
    # ...and...
    # Check that an error is raised when an obs_cov with incorrect length
    # is given
    mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
    mod['design', :] = 1
    mod['obs_cov'] = np.zeros((1, 1, 10))
    mod['selection', :] = 1
    mod['state_cov', :] = 1
    res = mod.filter()

    assert_raises(ValueError,
                  res.predict,
                  end=res.nobs + 1,
                  obs_cov=np.zeros((1, 1)))
    assert_raises(ValueError,
                  res.predict,
                  end=res.nobs + 1,
                  obs_cov=np.zeros((1, 1, 2)))
class Clark1989(object):
    """
    Clark's (1989) bivariate unobserved components model of real GDP (as
    presented in Kim and Nelson, 1999)

    Tests two-dimensional observation data.

    Test data produced using GAUSS code described in Kim and Nelson (1999) and
    found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm

    See `results.results_kalman_filter` for more information.
    """
    def __init__(self, dtype=float, **kwargs):
        self.true = results_kalman_filter.uc_bi
        self.true_states = pd.DataFrame(self.true['states'])

        # GDP and Unemployment, Quarterly, 1948.1 - 1995.3
        data = pd.DataFrame(
            self.true['data'],
            index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
            columns=['GDP', 'UNEMP']
        )[4:]
        data['GDP'] = np.log(data['GDP'])
        data['UNEMP'] = (data['UNEMP']/100)

        k_states = 6
        self.model = KalmanFilter(k_endog=2, k_states=k_states, **kwargs)
        self.model.bind(np.ascontiguousarray(data.values))

        # Statespace representation
        self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
        self.model.transition[
            ([0, 0, 1, 1, 2, 3, 4, 5],
             [0, 4, 1, 2, 1, 2, 4, 5],
             [0, 0, 0, 0, 0, 0, 0, 0])
        ] = [1, 1, 0, 0, 1, 1, 1, 1]
        self.model.selection = np.eye(self.model.k_states)

        # Update matrices with given parameters
        (sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
         phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
            self.true['parameters'],
        )
        self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
            alpha_1, alpha_2, alpha_3
        ]
        self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
        self.model.obs_cov[1, 1, 0] = sigma_ec**2
        self.model.state_cov[
            np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
            sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
        ]

        # Initialization
        initial_state = np.zeros((k_states,))
        initial_state_cov = np.eye(k_states)*100

        # Initialization: self.modelification
        initial_state_cov = np.dot(
            np.dot(self.model.transition[:, :, 0], initial_state_cov),
            self.model.transition[:, :, 0].T
        )
        self.model.initialize_known(initial_state, initial_state_cov)

    def run_filter(self):
        # Filter the data
        self.results = self.model.filter()

    def test_loglike(self):
        assert_almost_equal(
            # self.results.llf_obs[self.true['start']:].sum(),
            self.results.llf_obs[0:].sum(),
            self.true['loglike'], 2
        )

    def test_filtered_state(self):
        assert_almost_equal(
            self.results.filtered_state[0][self.true['start']:],
            self.true_states.iloc[:, 0], 4
        )
        assert_almost_equal(
            self.results.filtered_state[1][self.true['start']:],
            self.true_states.iloc[:, 1], 4
        )
        assert_almost_equal(
            self.results.filtered_state[4][self.true['start']:],
            self.true_states.iloc[:, 2], 4
        )
        assert_almost_equal(
            self.results.filtered_state[5][self.true['start']:],
            self.true_states.iloc[:, 3], 4
        )
def test_impulse_responses():
    # Test for impulse response functions

    # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
    # for all periods
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10)
    desired = np.ones((11, 1))

    assert_allclose(actual, desired)

    # Random walk: 2-unit impulse response (i.e. non-orthogonalized irf) is 2
    # for all periods
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, impulse=[2])
    desired = np.ones((11, 1)) * 2

    assert_allclose(actual, desired)

    # Random walk: 1-standard-deviation response (i.e. orthogonalized irf) is
    # sigma for all periods (here sigma^2 = 2)
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, orthogonalized=True)
    desired = np.ones((11, 1)) * 2**0.5

    assert_allclose(actual, desired)

    # Random walk: 1-standard-deviation cumulative response (i.e. cumulative
    # orthogonalized irf)
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, orthogonalized=True,
                                   cumulative=True)
    desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]

    actual = mod.impulse_responses(steps=10, impulse=[1], orthogonalized=True,
                                   cumulative=True)
    desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]

    assert_allclose(actual, desired)

    # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
    # for all periods, even when intercepts are present
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['state_intercept', 0] = 100.
    mod['design', 0, 0] = 1.
    mod['obs_intercept', 0] = -1000.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10)
    desired = np.ones((11, 1))

    assert_allclose(actual, desired)

    # Univariate model (random walk): test that an error is thrown when
    # a multivariate or empty "impulse" is sent
    mod = KalmanFilter(k_endog=1, k_states=1)
    assert_raises(ValueError, mod.impulse_responses, impulse=1)
    assert_raises(ValueError, mod.impulse_responses, impulse=[1,1])
    assert_raises(ValueError, mod.impulse_responses, impulse=[])

    # Univariate model with two uncorrelated shocks
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod['design', 0, 0:2] = 1.
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.eye(2)

    desired = np.ones((11, 1))

    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[1,0])
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[0,1])
    assert_allclose(actual, desired)

    # In this case (with sigma=sigma^2=1), orthogonalized is the same as not
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[1,0], orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[0,1], orthogonalized=True)
    assert_allclose(actual, desired)

    # Univariate model with two correlated shocks
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod['design', 0, 0:2] = 1.
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])

    desired = np.ones((11, 1))

    # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's
    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, desired)

    # Orthogonalized (i.e. 1-std-dev) impulses now generate different responses
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, desired + desired * 0.5)

    actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True)
    assert_allclose(actual, desired)

    # Multivariate model with two correlated shocks
    mod = KalmanFilter(k_endog=2, k_states=2)
    mod['design', :, :] = np.eye(2)
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])

    ones = np.ones((11, 1))
    zeros = np.zeros((11, 1))

    # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's, but
    # only for the appropriate series
    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, np.c_[ones, zeros])

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, np.c_[zeros, ones])

    # Orthogonalized (i.e. 1-std-dev) impulses now generate different
    # responses, and only for the appropriate series
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, np.c_[ones, ones * 0.5])

    actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True)
    assert_allclose(actual, np.c_[zeros, ones])

    # AR(1) model generates a geometrically declining series
    mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1,0,0))
    phi = 0.5
    mod.update([phi, 1])

    desired = np.cumprod(np.r_[1, [phi]*10])

    # Test going through the model directly
    actual = mod.ssm.impulse_responses(steps=10)
    assert_allclose(actual[:, 0], desired)

    # Test going through the results object
    res = mod.filter([phi, 1.])
    actual = res.impulse_responses(steps=10)
    assert_allclose(actual, desired)
示例#18
0
def test_impulse_responses():
    # Test for impulse response functions

    # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
    # for all periods
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10)
    desired = np.ones((11, 1))

    assert_allclose(actual, desired)

    # Random walk: 2-unit impulse response (i.e. non-orthogonalized irf) is 2
    # for all periods
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, impulse=[2])
    desired = np.ones((11, 1)) * 2

    assert_allclose(actual, desired)

    # Random walk: 1-standard-deviation response (i.e. orthogonalized irf) is
    # sigma for all periods (here sigma^2 = 2)
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, orthogonalized=True)
    desired = np.ones((11, 1)) * 2**0.5

    assert_allclose(actual, desired)

    # Random walk: 1-standard-deviation cumulative response (i.e. cumulative
    # orthogonalized irf)
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10, orthogonalized=True,
                                   cumulative=True)
    desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]

    actual = mod.impulse_responses(steps=10, impulse=[1], orthogonalized=True,
                                   cumulative=True)
    desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]

    assert_allclose(actual, desired)

    # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
    # for all periods, even when intercepts are present
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['state_intercept', 0] = 100.
    mod['design', 0, 0] = 1.
    mod['obs_intercept', 0] = -1000.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    mod['state_cov', 0, 0] = 2.

    actual = mod.impulse_responses(steps=10)
    desired = np.ones((11, 1))

    assert_allclose(actual, desired)

    # Univariate model (random walk): test that an error is thrown when
    # a multivariate or empty "impulse" is sent
    mod = KalmanFilter(k_endog=1, k_states=1)
    assert_raises(ValueError, mod.impulse_responses, impulse=1)
    assert_raises(ValueError, mod.impulse_responses, impulse=[1,1])
    assert_raises(ValueError, mod.impulse_responses, impulse=[])

    # Univariate model with two uncorrelated shocks
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod['design', 0, 0:2] = 1.
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.eye(2)

    desired = np.ones((11, 1))

    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[1,0])
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[0,1])
    assert_allclose(actual, desired)

    # In this case (with sigma=sigma^2=1), orthogonalized is the same as not
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[1,0], orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[0,1], orthogonalized=True)
    assert_allclose(actual, desired)
示例#19
0
    # In this case (with sigma=sigma^2=1), orthogonalized is the same as not
    actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[1,0], orthogonalized=True)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=[0,1], orthogonalized=True)
    assert_allclose(actual, desired)
<<<<<<< HEAD
=======

>>>>>>> upstream/master
    # Univariate model with two correlated shocks
    mod = KalmanFilter(k_endog=1, k_states=2)
    mod['design', 0, 0:2] = 1.
    mod['transition', :, :] = np.eye(2)
    mod['selection', :, :] = np.eye(2)
    mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])

    desired = np.ones((11, 1))

    # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's
    actual = mod.impulse_responses(steps=10, impulse=0)
    assert_allclose(actual, desired)

    actual = mod.impulse_responses(steps=10, impulse=1)
    assert_allclose(actual, desired)

    # Orthogonalized (i.e. 1-std-dev) impulses now generate different responses
示例#20
0
class Clark1989(object):
    """
    Clark's (1989) bivariate unobserved components model of real GDP (as
    presented in Kim and Nelson, 1999)

    Tests two-dimensional observation data.

    Test data produced using GAUSS code described in Kim and Nelson (1999) and
    found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm

    See `results.results_kalman_filter` for more information.
    """
    def __init__(self, dtype=float, **kwargs):
        self.true = results_kalman_filter.uc_bi
        self.true_states = pd.DataFrame(self.true['states'])

        # GDP and Unemployment, Quarterly, 1948.1 - 1995.3
        data = pd.DataFrame(self.true['data'],
                            index=pd.date_range('1947-01-01',
                                                '1995-07-01',
                                                freq='QS'),
                            columns=['GDP', 'UNEMP'])[4:]
        data['GDP'] = np.log(data['GDP'])
        data['UNEMP'] = (data['UNEMP'] / 100)

        k_states = 6
        self.model = KalmanFilter(k_endog=2, k_states=k_states, **kwargs)
        self.model.bind(np.ascontiguousarray(data.values))

        # Statespace representation
        self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
        self.model.transition[([0, 0, 1, 1, 2, 3, 4,
                                5], [0, 4, 1, 2, 1, 2, 4,
                                     5], [0, 0, 0, 0, 0, 0, 0,
                                          0])] = [1, 1, 0, 0, 1, 1, 1, 1]
        self.model.selection = np.eye(self.model.k_states)

        # Update matrices with given parameters
        (sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec, phi_1, phi_2, alpha_1,
         alpha_2, alpha_3) = np.array(self.true['parameters'], )
        self.model.design[([1, 1, 1], [1, 2,
                                       3], [0, 0,
                                            0])] = [alpha_1, alpha_2, alpha_3]
        self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
        self.model.obs_cov[1, 1, 0] = sigma_ec**2
        self.model.state_cov[np.diag_indices(k_states) +
                             (np.zeros(k_states, dtype=int), )] = [
                                 sigma_v**2, sigma_e**2, 0, 0, sigma_w**2,
                                 sigma_vl**2
                             ]

        # Initialization
        initial_state = np.zeros((k_states, ))
        initial_state_cov = np.eye(k_states) * 100

        # Initialization: self.modelification
        initial_state_cov = np.dot(
            np.dot(self.model.transition[:, :, 0], initial_state_cov),
            self.model.transition[:, :, 0].T)
        self.model.initialize_known(initial_state, initial_state_cov)

    def run_filter(self):
        # Filter the data
        self.results = self.model.filter()

    def test_loglike(self):
        assert_almost_equal(
            # self.results.llf_obs[self.true['start']:].sum(),
            self.results.llf_obs[0:].sum(),
            self.true['loglike'],
            2)

    def test_filtered_state(self):
        assert_almost_equal(
            self.results.filtered_state[0][self.true['start']:],
            self.true_states.iloc[:, 0], 4)
        assert_almost_equal(
            self.results.filtered_state[1][self.true['start']:],
            self.true_states.iloc[:, 1], 4)
        assert_almost_equal(
            self.results.filtered_state[4][self.true['start']:],
            self.true_states.iloc[:, 2], 4)
        assert_almost_equal(
            self.results.filtered_state[5][self.true['start']:],
            self.true_states.iloc[:, 3], 4)
示例#21
0
def test_simulate():
    # Test for simulation of new time-series
    from scipy.signal import lfilter

    # Common parameters
    nsimulations = 10
    sigma2 = 2
    measurement_shocks = np.zeros(nsimulations)
    state_shocks = np.random.normal(scale=sigma2**0.5, size=nsimulations)

    # Random walk model, so simulated series is just the cumulative sum of
    # the shocks
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(nsimulations,
                          measurement_shocks=measurement_shocks,
                          state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[0, np.cumsum(state_shocks)[:-1]]

    assert_allclose(actual, desired)

    # Local level model, so simulated series is just the cumulative sum of
    # the shocks plus the measurement shock
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(nsimulations,
                          measurement_shocks=np.ones(nsimulations),
                          state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[1, np.cumsum(state_shocks)[:-1] + 1]

    assert_allclose(actual, desired)

    # Local level-like model with observation and state intercepts, so
    # simulated series is just the cumulative sum of the shocks minus the state
    # intercept, plus the observation intercept and the measurement shock
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['obs_intercept', 0, 0] = 5.
    mod['design', 0, 0] = 1.
    mod['state_intercept', 0, 0] = -2.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(nsimulations,
                          measurement_shocks=np.ones(nsimulations),
                          state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[1 + 5, np.cumsum(state_shocks - 2)[:-1] + 1 + 5]

    assert_allclose(actual, desired)

    # Model with time-varying observation intercept
    mod = KalmanFilter(k_endog=1, k_states=1, nobs=10)
    mod['obs_intercept'] = (np.arange(10) * 1.).reshape(1, 10)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(nsimulations,
                          measurement_shocks=measurement_shocks,
                          state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[0, np.cumsum(state_shocks)[:-1] + np.arange(1, 10)]

    assert_allclose(actual, desired)

    # Model with time-varying observation intercept, check that error is raised
    # if more simulations are requested than are nobs.
    mod = KalmanFilter(k_endog=1, k_states=1, nobs=10)
    mod['obs_intercept'] = (np.arange(10) * 1.).reshape(1, 10)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    assert_raises(ValueError, mod.simulate, nsimulations + 1,
                  measurement_shocks, state_shocks)

    # ARMA(1,1): phi = [0.1], theta = [0.5], sigma^2 = 2
    phi = np.r_[0.1]
    theta = np.r_[0.5]
    mod = sarimax.SARIMAX([0], order=(1, 0, 1))
    mod.update(np.r_[phi, theta, sigma2])

    actual = mod.ssm.simulate(nsimulations,
                              measurement_shocks=measurement_shocks,
                              state_shocks=state_shocks)[0].squeeze()
    desired = lfilter([1, theta], [1, -phi], np.r_[0, state_shocks[:-1]])

    assert_allclose(actual, desired)

    # SARIMAX(1,0,1)x(1,0,1,4), this time using the results object call
    mod = sarimax.SARIMAX([0.1, 0.5, -0.2],
                          order=(1, 0, 1),
                          seasonal_order=(1, 0, 1, 4))
    res = mod.filter([0.1, 0.5, 0.2, -0.3, 1])

    actual = res.simulate(nsimulations,
                          measurement_shocks=measurement_shocks,
                          state_shocks=state_shocks)[0].squeeze()
    desired = lfilter(res.polynomial_reduced_ma, res.polynomial_reduced_ar,
                      np.r_[0, state_shocks[:-1]])

    assert_allclose(actual, desired)
示例#22
0
def test_predict():
    # Tests of invalid calls to the predict function

    warnings.simplefilter("always")

    endog = np.ones((10, 1))
    mod = KalmanFilter(endog, k_states=1, initialization="approximate_diffuse")
    mod["design", :] = 1
    mod["obs_intercept"] = np.zeros((1, 10))
    mod["selection", :] = 1
    mod["state_cov", :] = 1

    # Check that we need both forecasts and predicted output for prediction
    mod.memory_no_forecast = True
    res = mod.filter()
    assert_raises(ValueError, res.predict)
    mod.memory_no_forecast = False

    mod.memory_no_predicted = True
    res = mod.filter()
    assert_raises(ValueError, res.predict)
    mod.memory_no_predicted = False

    # Now get a clean filter object
    res = mod.filter()

    # Check that start < 0 is an error
    assert_raises(ValueError, res.predict, start=-1)

    # Check that end < start is an error
    assert_raises(ValueError, res.predict, start=2, end=1)

    # Check that dynamic < 0 is an error
    assert_raises(ValueError, res.predict, dynamic=-1)

    # Check that dynamic > end is an warning
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=1, dynamic=2)
        message = "Dynamic prediction specified to begin after the end of" " prediction, and so has no effect."
        assert_equal(str(w[0].message), message)

    # Check that dynamic > nobs is an warning
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1, 1)))
        message = (
            "Dynamic prediction specified to begin during" " out-of-sample forecasting period, and so has no" " effect."
        )
        assert_equal(str(w[0].message), message)

    # Check for a warning when providing a non-used statespace matrix
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=res.nobs + 1, design=True, obs_intercept=np.zeros((1, 1)))
        message = "Model has time-invariant design matrix, so the design" " argument to `predict` has been ignored."
        assert_equal(str(w[0].message), message)

    # Check that an error is raised when a new time-varying matrix is not
    # provided
    assert_raises(ValueError, res.predict, end=res.nobs + 1)

    # Check that an error is raised when a non-two-dimensional obs_intercept
    # is given
    assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_intercept=np.zeros(1))

    # Check that an error is raised when an obs_intercept with incorrect length
    # is given
    assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_intercept=np.zeros(2))

    # Check that start=None gives start=0 and end=None gives end=nobs
    assert_equal(res.predict().shape, (1, res.nobs))

    # Check that dynamic=True begins dynamic prediction immediately
    # TODO just a smoke test
    res.predict(dynamic=True)

    # Check that full_results=True yields a FilterResults object
    assert_equal(isinstance(res.predict(full_results=True), FilterResults), True)

    # Check that an error is raised when a non-two-dimensional obs_cov
    # is given
    # ...and...
    # Check that an error is raised when an obs_cov with incorrect length
    # is given
    mod = KalmanFilter(endog, k_states=1, initialization="approximate_diffuse")
    mod["design", :] = 1
    mod["obs_cov"] = np.zeros((1, 1, 10))
    mod["selection", :] = 1
    mod["state_cov", :] = 1
    res = mod.filter()

    assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_cov=np.zeros((1, 1)))
    assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_cov=np.zeros((1, 1, 2)))
def test_predict():
    # Tests of invalid calls to the predict function

    warnings.simplefilter("always")

    endog = np.ones((10,1))
    mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
    mod['design', :] = 1
    mod['obs_intercept'] = np.zeros((1,10))
    mod['selection', :] = 1
    mod['state_cov', :] = 1

    # Check that we need both forecasts and predicted output for prediction
    mod.memory_no_forecast = True
    res = mod.filter()
    assert_raises(ValueError, res.predict)
    mod.memory_no_forecast = False

    mod.memory_no_predicted = True
    res = mod.filter()
    assert_raises(ValueError, res.predict)
    mod.memory_no_predicted = False

    # Now get a clean filter object
    res = mod.filter()

    # Check that start < 0 is an error
    assert_raises(ValueError, res.predict, start=-1)

    # Check that end < start is an error
    assert_raises(ValueError, res.predict, start=2, end=1)

    # Check that dynamic < 0 is an error
    assert_raises(ValueError, res.predict, dynamic=-1)

    # Check that dynamic > end is an warning
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=1, dynamic=2)
        message = ('Dynamic prediction specified to begin after the end of'
                   ' prediction, and so has no effect.')
        assert_equal(str(w[0].message), message)

    # Check that dynamic > nobs is an warning
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1,1)))
        message = ('Dynamic prediction specified to begin during'
                   ' out-of-sample forecasting period, and so has no'
                   ' effect.')
        assert_equal(str(w[0].message), message)

    # Check for a warning when providing a non-used statespace matrix
    with warnings.catch_warnings(record=True) as w:
        res.predict(end=res.nobs+1, design=True, obs_intercept=np.zeros((1,1)))
        message = ('Model has time-invariant design matrix, so the design'
                   ' argument to `predict` has been ignored.')
        assert_equal(str(w[0].message), message)

    # Check that an error is raised when a new time-varying matrix is not
    # provided
    assert_raises(ValueError, res.predict, end=res.nobs+1)

    # Check that an error is raised when a non-two-dimensional obs_intercept
    # is given
    assert_raises(ValueError, res.predict, end=res.nobs+1,
                  obs_intercept=np.zeros(1))

    # Check that an error is raised when an obs_intercept with incorrect length
    # is given
    assert_raises(ValueError, res.predict, end=res.nobs+1,
                  obs_intercept=np.zeros(2))

    # Check that start=None gives start=0 and end=None gives end=nobs
    assert_equal(res.predict().forecasts.shape, (1,res.nobs))

    # Check that dynamic=True begins dynamic prediction immediately
    # TODO just a smoke test
    res.predict(dynamic=True)

    # Check that on success, PredictionResults object is returned
    prediction_results = res.predict(start=3, end=5)
    assert_equal(isinstance(prediction_results, PredictionResults), True)

    # Check for correctly subset representation arrays
    # (k_endog, npredictions) = (1, 2)
    assert_equal(prediction_results.endog.shape, (1, 2))
    # (k_endog, npredictions) = (1, 2)
    assert_equal(prediction_results.obs_intercept.shape, (1, 2))
    # (k_endog, k_states) = (1, 1)
    assert_equal(prediction_results.design.shape, (1, 1))
    # (k_endog, k_endog) = (1, 1)
    assert_equal(prediction_results.obs_cov.shape, (1, 1))
    # (k_state,) = (1,)
    assert_equal(prediction_results.state_intercept.shape, (1,))
    # (k_state, npredictions) = (1, 2)
    assert_equal(prediction_results.obs_intercept.shape, (1, 2))
    # (k_state, k_state) = (1, 1)
    assert_equal(prediction_results.transition.shape, (1, 1))
    # (k_state, k_posdef) = (1, 1)
    assert_equal(prediction_results.selection.shape, (1, 1))
    # (k_posdef, k_posdef) = (1, 1)
    assert_equal(prediction_results.state_cov.shape, (1, 1))

    # Check for correctly subset filter output arrays
    # (k_endog, npredictions) = (1, 2)
    assert_equal(prediction_results.forecasts.shape, (1, 2))
    assert_equal(prediction_results.forecasts_error.shape, (1, 2))
    # (k_states, npredictions) = (1, 2)
    assert_equal(prediction_results.filtered_state.shape, (1, 2))
    assert_equal(prediction_results.predicted_state.shape, (1, 2))
    # (k_endog, k_endog, npredictions) = (1, 1, 2)
    assert_equal(prediction_results.forecasts_error_cov.shape, (1, 1, 2))
    # (k_states, k_states, npredictions) = (1, 1, 2)
    assert_equal(prediction_results.filtered_state_cov.shape, (1, 1, 2))
    assert_equal(prediction_results.predicted_state_cov.shape, (1, 1, 2))

    # Check for invalid attribute
    assert_raises(AttributeError, getattr, prediction_results, 'test')

    # Check that an error is raised when a non-two-dimensional obs_cov
    # is given
    # ...and...
    # Check that an error is raised when an obs_cov with incorrect length
    # is given
    mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
    mod['design', :] = 1
    mod['obs_cov'] = np.zeros((1,1,10))
    mod['selection', :] = 1
    mod['state_cov', :] = 1
    res = mod.filter()

    assert_raises(ValueError, res.predict, end=res.nobs+1,
                  obs_cov=np.zeros((1,1)))
    assert_raises(ValueError, res.predict, end=res.nobs+1,
                  obs_cov=np.zeros((1,1,2)))
class Clark1987(object):
    """
    Clark's (1987) univariate unobserved components model of real GDP (as
    presented in Kim and Nelson, 1999)

    Test data produced using GAUSS code described in Kim and Nelson (1999) and
    found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm

    See `results.results_kalman_filter` for more information.
    """
    def __init__(self, dtype=float, **kwargs):
        self.true = results_kalman_filter.uc_uni
        self.true_states = pd.DataFrame(self.true['states'])

        # GDP, Quarterly, 1947.1 - 1995.3
        data = pd.DataFrame(
            self.true['data'],
            index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
            columns=['GDP']
        )
        data['lgdp'] = np.log(data['GDP'])

        # Construct the statespace representation
        k_states = 4
        self.model = KalmanFilter(k_endog=1, k_states=k_states, **kwargs)
        self.model.bind(data['lgdp'].values)

        self.model.design[:, :, 0] = [1, 1, 0, 0]
        self.model.transition[([0, 0, 1, 1, 2, 3],
                               [0, 3, 1, 2, 1, 3],
                               [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
        self.model.selection = np.eye(self.model.k_states)

        # Update matrices with given parameters
        (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
            self.true['parameters']
        )
        self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
        self.model.state_cov[
            np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
            sigma_v**2, sigma_e**2, 0, sigma_w**2
        ]

        # Initialization
        initial_state = np.zeros((k_states,))
        initial_state_cov = np.eye(k_states)*100

        # Initialization: modification
        initial_state_cov = np.dot(
            np.dot(self.model.transition[:, :, 0], initial_state_cov),
            self.model.transition[:, :, 0].T
        )
        self.model.initialize_known(initial_state, initial_state_cov)

    def run_filter(self):
        # Filter the data
        self.results = self.model.filter()

    def test_loglike(self):
        assert_almost_equal(
            self.results.llf_obs[self.true['start']:].sum(),
            self.true['loglike'], 5
        )

    def test_filtered_state(self):
        assert_almost_equal(
            self.results.filtered_state[0][self.true['start']:],
            self.true_states.iloc[:, 0], 4
        )
        assert_almost_equal(
            self.results.filtered_state[1][self.true['start']:],
            self.true_states.iloc[:, 1], 4
        )
        assert_almost_equal(
            self.results.filtered_state[3][self.true['start']:],
            self.true_states.iloc[:, 2], 4
        )
def test_simulate():
    # Test for simulation of new time-series
    from scipy.signal import lfilter

    # Common parameters
    nsimulations = 10
    sigma2 = 2
    measurement_shocks = np.zeros(nsimulations)
    state_shocks = np.random.normal(scale=sigma2**0.5, size=nsimulations)

    # Random walk model, so simulated series is just the cumulative sum of
    # the shocks
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(
        nsimulations, measurement_shocks=measurement_shocks,
        state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[0, np.cumsum(state_shocks)[:-1]]

    assert_allclose(actual, desired)

    # Local level model, so simulated series is just the cumulative sum of
    # the shocks plus the measurement shock
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(
        nsimulations, measurement_shocks=np.ones(nsimulations),
        state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[1, np.cumsum(state_shocks)[:-1] + 1]

    assert_allclose(actual, desired)

    # Local level-like model with observation and state intercepts, so
    # simulated series is just the cumulative sum of the shocks minus the state
    # intercept, plus the observation intercept and the measurement shock
    mod = KalmanFilter(k_endog=1, k_states=1)
    mod['obs_intercept', 0, 0] = 5.
    mod['design', 0, 0] = 1.
    mod['state_intercept', 0, 0] = -2.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(
        nsimulations, measurement_shocks=np.ones(nsimulations),
        state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[1 + 5, np.cumsum(state_shocks - 2)[:-1] + 1 + 5]

    assert_allclose(actual, desired)

    # Model with time-varying observation intercept
    mod = KalmanFilter(k_endog=1, k_states=1, nobs=10)
    mod['obs_intercept'] = (np.arange(10)*1.).reshape(1, 10)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.

    actual = mod.simulate(
        nsimulations, measurement_shocks=measurement_shocks,
        state_shocks=state_shocks)[0].squeeze()
    desired = np.r_[0, np.cumsum(state_shocks)[:-1] + np.arange(1, 10)]

    assert_allclose(actual, desired)

    # Model with time-varying observation intercept, check that error is raised
    # if more simulations are requested than are nobs.
    mod = KalmanFilter(k_endog=1, k_states=1, nobs=10)
    mod['obs_intercept'] = (np.arange(10)*1.).reshape(1, 10)
    mod['design', 0, 0] = 1.
    mod['transition', 0, 0] = 1.
    mod['selection', 0, 0] = 1.
    assert_raises(ValueError, mod.simulate, nsimulations+1, measurement_shocks,
                  state_shocks)

    # ARMA(1,1): phi = [0.1], theta = [0.5], sigma^2 = 2
    phi = 0.1
    theta = 0.5
    mod = sarimax.SARIMAX([0], order=(1, 0, 1))
    mod.update(np.r_[phi, theta, sigma2])

    actual = mod.ssm.simulate(
        nsimulations, measurement_shocks=measurement_shocks,
        state_shocks=state_shocks,
        initial_state=np.zeros(mod.k_states))[0].squeeze()
    desired = lfilter([1, theta], [1, -phi], np.r_[0, state_shocks[:-1]])

    assert_allclose(actual, desired)

    # SARIMAX(1,0,1)x(1,0,1,4), this time using the results object call
    mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1, 0, 1),
                          seasonal_order=(1, 0, 1, 4))
    res = mod.filter([0.1, 0.5, 0.2, -0.3, 1])

    actual = res.simulate(
        nsimulations, measurement_shocks=measurement_shocks,
        state_shocks=state_shocks, initial_state=np.zeros(mod.k_states))
    desired = lfilter(
        res.polynomial_reduced_ma, res.polynomial_reduced_ar,
        np.r_[0, state_shocks[:-1]])

    assert_allclose(actual, desired)
示例#26
0
def test_kalman_filter_pickle(data):
    # Construct the statespace representation
    true = results_kalman_filter.uc_uni
    k_states = 4
    model = KalmanFilter(k_endog=1, k_states=k_states)
    model.bind(data['lgdp'].values)

    model.design[:, :, 0] = [1, 1, 0, 0]
    model.transition[([0, 0, 1, 1, 2, 3],
                      [0, 3, 1, 2, 1, 3],
                      [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
    model.selection = np.eye(model.k_states)

    # Update matrices with given parameters
    (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
        true['parameters']
    )
    model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
    model.state_cov[
        np.diag_indices(k_states) + (np.zeros(k_states, dtype=int),)] = [
        sigma_v ** 2, sigma_e ** 2, 0, sigma_w ** 2
    ]

    # Initialization
    initial_state = np.zeros((k_states,))
    initial_state_cov = np.eye(k_states) * 100

    # Initialization: modification
    initial_state_cov = np.dot(
        np.dot(model.transition[:, :, 0], initial_state_cov),
        model.transition[:, :, 0].T
    )
    model.initialize_known(initial_state, initial_state_cov)
    pkl_mod = cPickle.loads(cPickle.dumps(model))

    results = model.filter()
    pkl_results = pkl_mod.filter()

    assert_allclose(results.llf_obs[true['start']:].sum(),
                    pkl_results.llf_obs[true['start']:].sum())
    assert_allclose(results.filtered_state[0][true['start']:],
                    pkl_results.filtered_state[0][true['start']:])
    assert_allclose(results.filtered_state[1][true['start']:],
                    pkl_results.filtered_state[1][true['start']:])
    assert_allclose(results.filtered_state[3][true['start']:],
                    pkl_results.filtered_state[3][true['start']:])
示例#27
0
class Clark1987(object):
    """
    Clark's (1987) univariate unobserved components model of real GDP (as
    presented in Kim and Nelson, 1999)

    Test data produced using GAUSS code described in Kim and Nelson (1999) and
    found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm

    See `results.results_kalman_filter` for more information.
    """
    def __init__(self, dtype=float, **kwargs):
        self.true = results_kalman_filter.uc_uni
        self.true_states = pd.DataFrame(self.true['states'])

        # GDP, Quarterly, 1947.1 - 1995.3
        data = pd.DataFrame(self.true['data'],
                            index=pd.date_range('1947-01-01',
                                                '1995-07-01',
                                                freq='QS'),
                            columns=['GDP'])
        data['lgdp'] = np.log(data['GDP'])

        # Construct the statespace representation
        k_states = 4
        self.model = KalmanFilter(k_endog=1, k_states=k_states, **kwargs)
        self.model.bind(data['lgdp'].values)

        self.model.design[:, :, 0] = [1, 1, 0, 0]
        self.model.transition[([0, 0, 1, 1, 2,
                                3], [0, 3, 1, 2, 1,
                                     3], [0, 0, 0, 0, 0,
                                          0])] = [1, 1, 0, 0, 1, 1]
        self.model.selection = np.eye(self.model.k_states)

        # Update matrices with given parameters
        (sigma_v, sigma_e, sigma_w, phi_1,
         phi_2) = np.array(self.true['parameters'])
        self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
        self.model.state_cov[np.diag_indices(k_states) +
                             (np.zeros(k_states, dtype=int), )] = [
                                 sigma_v**2, sigma_e**2, 0, sigma_w**2
                             ]

        # Initialization
        initial_state = np.zeros((k_states, ))
        initial_state_cov = np.eye(k_states) * 100

        # Initialization: modification
        initial_state_cov = np.dot(
            np.dot(self.model.transition[:, :, 0], initial_state_cov),
            self.model.transition[:, :, 0].T)
        self.model.initialize_known(initial_state, initial_state_cov)

    def run_filter(self):
        # Filter the data
        self.results = self.model.filter()

    def test_loglike(self):
        assert_almost_equal(self.results.llf_obs[self.true['start']:].sum(),
                            self.true['loglike'], 5)

    def test_filtered_state(self):
        assert_almost_equal(
            self.results.filtered_state[0][self.true['start']:],
            self.true_states.iloc[:, 0], 4)
        assert_almost_equal(
            self.results.filtered_state[1][self.true['start']:],
            self.true_states.iloc[:, 1], 4)
        assert_almost_equal(
            self.results.filtered_state[3][self.true['start']:],
            self.true_states.iloc[:, 2], 4)