def test_cython(): # Test the cython _kalman_filter creation, re-creation, calling, etc. # Check that datatypes are correct: for prefix, dtype in tools.prefix_dtype_map.items(): endog = np.array(1., ndmin=2, dtype=dtype) mod = KalmanFilter(k_endog=1, k_states=1, dtype=dtype) # Bind data and initialize the ?KalmanFilter object mod.bind(endog) mod._initialize_filter() # Check that the dtype and prefix are correct assert_equal(mod.prefix, prefix) assert_equal(mod.dtype, dtype) # Test that a dKalmanFilter instance was created assert_equal(prefix in mod._kalman_filters, True) kf = mod._kalman_filters[prefix] assert_equal(isinstance(kf, tools.prefix_kalman_filter_map[prefix]), True) # Test that the default returned _kalman_filter is the above instance assert_equal(mod._kalman_filter, kf) # Check that upcasting datatypes / ?KalmanFilter works (e.g. d -> z) mod = KalmanFilter(k_endog=1, k_states=1) # Default dtype is float assert_equal(mod.prefix, 'd') assert_equal(mod.dtype, np.float64) # Prior to initialization, no ?KalmanFilter exists assert_equal(mod._kalman_filter, None) # Bind data and initialize the ?KalmanFilter object endog = np.ascontiguousarray(np.array([1., 2.], dtype=np.float64)) mod.bind(endog) mod._initialize_filter() kf = mod._kalman_filters['d'] # Rebind data, still float, check that we haven't changed mod.bind(endog) mod._initialize_filter() assert_equal(mod._kalman_filter, kf) # Force creating new ?Statespace and ?KalmanFilter, by changing the # time-varying character of an array mod.design = np.zeros((1,1,2)) mod._initialize_filter() assert_equal(mod._kalman_filter == kf, False) kf = mod._kalman_filters['d'] # Rebind data, now complex, check that the ?KalmanFilter instance has # changed endog = np.ascontiguousarray(np.array([1., 2.], dtype=np.complex128)) mod.bind(endog) assert_equal(mod._kalman_filter == kf, False)
def test_missing(): # Datasets endog = np.arange(10).reshape(10,1) endog_pre_na = np.ascontiguousarray(np.c_[ endog.copy() * np.nan, endog.copy() * np.nan, endog, endog]) endog_post_na = np.ascontiguousarray(np.c_[ endog, endog, endog.copy() * np.nan, endog.copy() * np.nan]) endog_inject_na = np.ascontiguousarray(np.c_[ endog, endog.copy() * np.nan, endog, endog.copy() * np.nan]) # Base model mod = KalmanFilter(np.ascontiguousarray(np.c_[endog, endog]), k_states=1, initialization='approximate_diffuse') mod['design', :, :] = 1 mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5 mod['transition', :, :] = 0.5 mod['selection', :, :] = 1 mod['state_cov', :, :] = 0.5 llf = mod.loglikeobs() # Model with prepended nans mod = KalmanFilter(endog_pre_na, k_states=1, initialization='approximate_diffuse') mod['design', :, :] = 1 mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5 mod['transition', :, :] = 0.5 mod['selection', :, :] = 1 mod['state_cov', :, :] = 0.5 llf_pre_na = mod.loglikeobs() assert_allclose(llf_pre_na, llf) # Model with appended nans mod = KalmanFilter(endog_post_na, k_states=1, initialization='approximate_diffuse') mod['design', :, :] = 1 mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5 mod['transition', :, :] = 0.5 mod['selection', :, :] = 1 mod['state_cov', :, :] = 0.5 llf_post_na = mod.loglikeobs() assert_allclose(llf_post_na, llf) # Model with injected nans mod = KalmanFilter(endog_inject_na, k_states=1, initialization='approximate_diffuse') mod['design', :, :] = 1 mod['obs_cov', :, :] = np.eye(mod.k_endog)*0.5 mod['transition', :, :] = 0.5 mod['selection', :, :] = 1 mod['state_cov', :, :] = 0.5 llf_inject_na = mod.loglikeobs() assert_allclose(llf_inject_na, llf)
def __init__(self, *args, **kwargs): # Dummy data endog = np.arange(10) k_states = 1 self.model = KalmanFilter(k_endog=1, k_states=k_states, *args, **kwargs) self.model.bind(endog)
def test_no_endog(): # Test for RuntimeError when no endog is provided by the time filtering # is initialized. mod = KalmanFilter(k_endog=1, k_states=1) # directly call the _initialize_filter function assert_raises(RuntimeError, mod._initialize_filter) # indirectly call it through filtering mod.initialize_approximate_diffuse() assert_raises(RuntimeError, mod.filter)
def test_filter(): # Tests of invalid calls to the filter function endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['selection', :] = 1 mod['state_cov', :] = 1 # Test default filter results res = mod.filter() assert_equal(isinstance(res, FilterResults), True)
def test_loglike(): # Tests of invalid calls to the loglike function endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['selection', :] = 1 mod['state_cov', :] = 1 # Test that self.memory_no_likelihood = True raises an error mod.memory_no_likelihood = True assert_raises(RuntimeError, mod.loglikeobs)
def setup_class(cls, dtype=float, **kwargs): cls.true = results_kalman_filter.uc_bi cls.true_states = pd.DataFrame(cls.true['states']) # GDP and Unemployment, Quarterly, 1948.1 - 1995.3 data = pd.DataFrame( cls.true['data'], index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'), columns=['GDP', 'UNEMP'] )[4:] data['GDP'] = np.log(data['GDP']) data['UNEMP'] = (data['UNEMP']/100) k_states = 6 cls.model = KalmanFilter(k_endog=2, k_states=k_states, **kwargs) cls.model.bind(np.ascontiguousarray(data.values)) # Statespace representation cls.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]] cls.model.transition[ ([0, 0, 1, 1, 2, 3, 4, 5], [0, 4, 1, 2, 1, 2, 4, 5], [0, 0, 0, 0, 0, 0, 0, 0]) ] = [1, 1, 0, 0, 1, 1, 1, 1] cls.model.selection = np.eye(cls.model.k_states) # Update matrices with given parameters (sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec, phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array( cls.true['parameters'], ) cls.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [ alpha_1, alpha_2, alpha_3 ] cls.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2] cls.model.obs_cov[1, 1, 0] = sigma_ec**2 cls.model.state_cov[ np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [ sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2 ] # Initialization initial_state = np.zeros((k_states,)) initial_state_cov = np.eye(k_states)*100 # Initialization: cls.modelification initial_state_cov = np.dot( np.dot(cls.model.transition[:, :, 0], initial_state_cov), cls.model.transition[:, :, 0].T ) cls.model.initialize_known(initial_state, initial_state_cov)
def test_kalman_filter_pickle(data): # Construct the statespace representation true = results_kalman_filter.uc_uni k_states = 4 model = KalmanFilter(k_endog=1, k_states=k_states) model.bind(data['lgdp'].values) model.design[:, :, 0] = [1, 1, 0, 0] model.transition[([0, 0, 1, 1, 2, 3], [0, 3, 1, 2, 1, 3], [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1] model.selection = np.eye(model.k_states) # Update matrices with given parameters (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array( true['parameters'] ) model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2] model.state_cov[ np.diag_indices(k_states) + (np.zeros(k_states, dtype=int),)] = [ sigma_v ** 2, sigma_e ** 2, 0, sigma_w ** 2 ] # Initialization initial_state = np.zeros((k_states,)) initial_state_cov = np.eye(k_states) * 100 # Initialization: modification initial_state_cov = np.dot( np.dot(model.transition[:, :, 0], initial_state_cov), model.transition[:, :, 0].T ) model.initialize_known(initial_state, initial_state_cov) pkl_mod = cPickle.loads(cPickle.dumps(model)) results = model.filter() pkl_results = pkl_mod.filter() assert_allclose(results.llf_obs[true['start']:].sum(), pkl_results.llf_obs[true['start']:].sum()) assert_allclose(results.filtered_state[0][true['start']:], pkl_results.filtered_state[0][true['start']:]) assert_allclose(results.filtered_state[1][true['start']:], pkl_results.filtered_state[1][true['start']:]) assert_allclose(results.filtered_state[3][true['start']:], pkl_results.filtered_state[3][true['start']:])
def main1(): for equity in os.listdir(rawDataDir): infp = PurePath(str(rawDataDir) + "/" + equity) df = pd.read_parquet(infp) volume_M = df.volume.sum() / df.shape[0] # produce the volume bar vbar = volume_bar_df(df, 'volume', volume_M) vbar.set_index('dates', inplace=True) # return vbar['retClose'] = vbar['price'] / vbar['price'].shift(1) - 1 # daily vol vbar['dailyVol'] = getDailyVol(vbar['price']) # normOI and VPIN vbar = orderFlow(vbar) # kf setting, assume random walk kf = KalmanFilter(1, 1) sigma_h = 0.0001 # hidden sigma_e = 0.001 # obs kf.obs_cov = np.array([sigma_e]) kf.state_cov = np.array([sigma_h]) kf.design = np.array([1.0]) kf.transition = np.array([1.0]) kf.selection = np.array([1.0]) kf.initialize_known(np.array([vbar.price[0]]), np.array([[sigma_h]])) kf.bind(np.array(vbar.price.copy())) r = kf.filter() vbar['forecasts'] = pd.DataFrame(r.forecasts[0], index=vbar.index) vbar['forecasts_error'] = pd.DataFrame(r.forecasts_error[0], index=vbar.index) vbar['error_std'] = pd.DataFrame(np.sqrt(r.forecasts_error_cov[0][0]), index=vbar.index) vbar = vbar.dropna() # srl_corr vbar['srl_corr'] = df_rolling_autocorr(vbar['price'], window=100).rename('srl_corr') vbar = vbar.dropna() ## output tmpPath = str(interimDataDir) + "/" + equity outfp = PurePath(tmpPath) print(outfp) vbar.to_parquet(outfp) print("Success: save") return
def test_filter(): # Tests of invalid calls to the filter function endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['selection', :] = 1 mod['state_cov', :] = 1 # Test default filter results res = mod.filter() assert_equal(isinstance(res, FilterResults), True) # Test specified invalid results class assert_raises(ValueError, mod.filter, results=object) # Test specified valid results class res = mod.filter(results=FilterResults) assert_equal(isinstance(res, FilterResults), True)
def setup_class(cls, dtype=float, **kwargs): cls.true = results_kalman_filter.uc_uni cls.true_states = pd.DataFrame(cls.true['states']) # GDP, Quarterly, 1947.1 - 1995.3 data = pd.DataFrame( cls.true['data'], index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'), columns=['GDP'] ) data['lgdp'] = np.log(data['GDP']) # Construct the statespace representation k_states = 4 cls.model = KalmanFilter(k_endog=1, k_states=k_states, **kwargs) cls.model.bind(data['lgdp'].values) cls.model.design[:, :, 0] = [1, 1, 0, 0] cls.model.transition[([0, 0, 1, 1, 2, 3], [0, 3, 1, 2, 1, 3], [0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1] cls.model.selection = np.eye(cls.model.k_states) # Update matrices with given parameters (sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array( cls.true['parameters'] ) cls.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2] cls.model.state_cov[ np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [ sigma_v**2, sigma_e**2, 0, sigma_w**2 ] # Initialization initial_state = np.zeros((k_states,)) initial_state_cov = np.eye(k_states)*100 # Initialization: modification initial_state_cov = np.dot( np.dot(cls.model.transition[:, :, 0], initial_state_cov), cls.model.transition[:, :, 0].T ) cls.model.initialize_known(initial_state, initial_state_cov)
def test_slice_notation(): # Test setting and getting state space representation matrices using the # slice notation. endog = np.arange(10) * 1.0 mod = KalmanFilter(k_endog=1, k_states=2) mod.bind(endog) # Test invalid __setitem__ def set_designs(): mod['designs'] = 1 def set_designs2(): mod['designs', 0, 0] = 1 def set_designs3(): mod[0] = 1 assert_raises(IndexError, set_designs) assert_raises(IndexError, set_designs2) assert_raises(IndexError, set_designs3) # Test invalid __getitem__ assert_raises(IndexError, lambda: mod['designs']) assert_raises(IndexError, lambda: mod['designs', 0, 0, 0]) assert_raises(IndexError, lambda: mod[0]) # Test valid __setitem__, __getitem__ assert_equal(mod.design[0, 0, 0], 0) mod['design', 0, 0, 0] = 1 assert_equal(mod['design'].sum(), 1) assert_equal(mod.design[0, 0, 0], 1) assert_equal(mod['design', 0, 0, 0], 1) # Test valid __setitem__, __getitem__ with unspecified time index mod['design'] = np.zeros(mod['design'].shape) assert_equal(mod.design[0, 0], 0) mod['design', 0, 0] = 1 assert_equal(mod.design[0, 0], 1) assert_equal(mod['design', 0, 0], 1)
def test_simulate(): # Test for simulation of new time-series from scipy.signal import lfilter # Common parameters nsimulations = 10 sigma2 = 2 measurement_shocks = np.zeros(nsimulations) state_shocks = np.random.normal(scale=sigma2**0.5, size=nsimulations) # Random walk model, so simulated series is just the cumulative sum of # the shocks mod = KalmanFilter(k_endog=1, k_states=1) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. actual = mod.simulate(nsimulations, measurement_shocks=measurement_shocks, state_shocks=state_shocks)[0].squeeze() desired = np.r_[0, np.cumsum(state_shocks)[:-1]] assert_allclose(actual, desired) # Local level model, so simulated series is just the cumulative sum of # the shocks plus the measurement shock mod = KalmanFilter(k_endog=1, k_states=1) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. actual = mod.simulate(nsimulations, measurement_shocks=np.ones(nsimulations), state_shocks=state_shocks)[0].squeeze() desired = np.r_[1, np.cumsum(state_shocks)[:-1] + 1] assert_allclose(actual, desired) # Local level-like model with observation and state intercepts, so # simulated series is just the cumulative sum of the shocks minus the state # intercept, plus the observation intercept and the measurement shock mod = KalmanFilter(k_endog=1, k_states=1) mod['obs_intercept', 0, 0] = 5. mod['design', 0, 0] = 1. mod['state_intercept', 0, 0] = -2. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. actual = mod.simulate(nsimulations, measurement_shocks=np.ones(nsimulations), state_shocks=state_shocks)[0].squeeze() desired = np.r_[1 + 5, np.cumsum(state_shocks - 2)[:-1] + 1 + 5] assert_allclose(actual, desired) # Model with time-varying observation intercept mod = KalmanFilter(k_endog=1, k_states=1, nobs=10) mod['obs_intercept'] = (np.arange(10) * 1.).reshape(1, 10) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. actual = mod.simulate(nsimulations, measurement_shocks=measurement_shocks, state_shocks=state_shocks)[0].squeeze() desired = np.r_[0, np.cumsum(state_shocks)[:-1] + np.arange(1, 10)] assert_allclose(actual, desired) # Model with time-varying observation intercept, check that error is raised # if more simulations are requested than are nobs. mod = KalmanFilter(k_endog=1, k_states=1, nobs=10) mod['obs_intercept'] = (np.arange(10) * 1.).reshape(1, 10) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. assert_raises(ValueError, mod.simulate, nsimulations + 1, measurement_shocks, state_shocks) # ARMA(1,1): phi = [0.1], theta = [0.5], sigma^2 = 2 phi = 0.1 theta = 0.5 mod = sarimax.SARIMAX([0], order=(1, 0, 1)) mod.update(np.r_[phi, theta, sigma2]) actual = mod.ssm.simulate(nsimulations, measurement_shocks=measurement_shocks, state_shocks=state_shocks, initial_state=np.zeros( mod.k_states))[0].squeeze() desired = lfilter([1, theta], [1, -phi], np.r_[0, state_shocks[:-1]]) assert_allclose(actual, desired) # SARIMAX(1,0,1)x(1,0,1,4), this time using the results object call mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1, 0, 1), seasonal_order=(1, 0, 1, 4)) res = mod.filter([0.1, 0.5, 0.2, -0.3, 1]) actual = res.simulate(nsimulations, measurement_shocks=measurement_shocks, state_shocks=state_shocks, initial_state=np.zeros(mod.k_states)) desired = lfilter(res.polynomial_reduced_ma, res.polynomial_reduced_ar, np.r_[0, state_shocks[:-1]]) assert_allclose(actual, desired)
def test_predict(): # Tests of invalid calls to the predict function warnings.simplefilter("always") endog = np.ones((10, 1)) mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['obs_intercept'] = np.zeros((1, 10)) mod['selection', :] = 1 mod['state_cov', :] = 1 # Check that we need both forecasts and predicted output for prediction mod.memory_no_forecast = True res = mod.filter() assert_raises(ValueError, res.predict) mod.memory_no_forecast = False mod.memory_no_predicted = True res = mod.filter() assert_raises(ValueError, res.predict) mod.memory_no_predicted = False # Now get a clean filter object res = mod.filter() # Check that start < 0 is an error assert_raises(ValueError, res.predict, start=-1) # Check that end < start is an error assert_raises(ValueError, res.predict, start=2, end=1) # Check that dynamic < 0 is an error assert_raises(ValueError, res.predict, dynamic=-1) # Check that dynamic > end is an warning with warnings.catch_warnings(record=True) as w: res.predict(end=1, dynamic=2) message = ('Dynamic prediction specified to begin after the end of' ' prediction, and so has no effect.') assert_equal(str(w[0].message), message) # Check that dynamic > nobs is an warning with warnings.catch_warnings(record=True) as w: res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1, 1))) message = ('Dynamic prediction specified to begin during' ' out-of-sample forecasting period, and so has no' ' effect.') assert_equal(str(w[0].message), message) # Check for a warning when providing a non-used statespace matrix with warnings.catch_warnings(record=True) as w: res.predict(end=res.nobs + 1, design=True, obs_intercept=np.zeros((1, 1))) message = ('Model has time-invariant design matrix, so the design' ' argument to `predict` has been ignored.') assert_equal(str(w[0].message), message) # Check that an error is raised when a new time-varying matrix is not # provided assert_raises(ValueError, res.predict, end=res.nobs + 1) # Check that an error is raised when a non-two-dimensional obs_intercept # is given assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_intercept=np.zeros(1)) # Check that an error is raised when an obs_intercept with incorrect length # is given assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_intercept=np.zeros(2)) # Check that start=None gives start=0 and end=None gives end=nobs assert_equal(res.predict().forecasts.shape, (1, res.nobs)) # Check that dynamic=True begins dynamic prediction immediately # TODO just a smoke test res.predict(dynamic=True) # Check that on success, PredictionResults object is returned prediction_results = res.predict(start=3, end=5) assert_equal(isinstance(prediction_results, PredictionResults), True) # Check for correctly subset representation arrays # (k_endog, npredictions) = (1, 2) assert_equal(prediction_results.endog.shape, (1, 2)) # (k_endog, npredictions) = (1, 2) assert_equal(prediction_results.obs_intercept.shape, (1, 2)) # (k_endog, k_states) = (1, 1) assert_equal(prediction_results.design.shape, (1, 1)) # (k_endog, k_endog) = (1, 1) assert_equal(prediction_results.obs_cov.shape, (1, 1)) # (k_state,) = (1,) assert_equal(prediction_results.state_intercept.shape, (1, )) # (k_state, npredictions) = (1, 2) assert_equal(prediction_results.obs_intercept.shape, (1, 2)) # (k_state, k_state) = (1, 1) assert_equal(prediction_results.transition.shape, (1, 1)) # (k_state, k_posdef) = (1, 1) assert_equal(prediction_results.selection.shape, (1, 1)) # (k_posdef, k_posdef) = (1, 1) assert_equal(prediction_results.state_cov.shape, (1, 1)) # Check for correctly subset filter output arrays # (k_endog, npredictions) = (1, 2) assert_equal(prediction_results.forecasts.shape, (1, 2)) assert_equal(prediction_results.forecasts_error.shape, (1, 2)) # (k_states, npredictions) = (1, 2) assert_equal(prediction_results.filtered_state.shape, (1, 2)) assert_equal(prediction_results.predicted_state.shape, (1, 2)) # (k_endog, k_endog, npredictions) = (1, 1, 2) assert_equal(prediction_results.forecasts_error_cov.shape, (1, 1, 2)) # (k_states, k_states, npredictions) = (1, 1, 2) assert_equal(prediction_results.filtered_state_cov.shape, (1, 1, 2)) assert_equal(prediction_results.predicted_state_cov.shape, (1, 1, 2)) # Check for invalid attribute assert_raises(AttributeError, getattr, prediction_results, 'test') # Check that an error is raised when a non-two-dimensional obs_cov # is given # ...and... # Check that an error is raised when an obs_cov with incorrect length # is given mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse') mod['design', :] = 1 mod['obs_cov'] = np.zeros((1, 1, 10)) mod['selection', :] = 1 mod['state_cov', :] = 1 res = mod.filter() assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_cov=np.zeros((1, 1))) assert_raises(ValueError, res.predict, end=res.nobs + 1, obs_cov=np.zeros((1, 1, 2)))
''' https://datascienceschool.net/view-notebook/c645d51f308b4047aa78e8b343a2e181/ ''' from statsmodels.tsa.statespace.kalman_filter import KalmanFilter import numpy as np import matplotlib.pyplot as plt model1 = KalmanFilter(k_endog=1, k_states=1, transition=[[1]], selection=[[1]], state_cov=[[10]], design=[[1]], obs_cov=[[100]]) np.random.seed(0) y1, x1 = model1.simulate(100) print(x1) print(y1) plt.plot(y1, 'r:', label="관측값") plt.plot(x1, 'g-', label="상태값") plt.legend() plt.title("로컬레벨 모형의 시뮬레이션 ($\sigma_w^2 = 10$, $\sigma_v^2 = 100$)") plt.show()
sigma_e = 15. e = np.random.normal(0, sigma_e, 110) df['y'] = a[0:100] + e[0:100] _=df.plot(figsize=(14,6), style=['b--', 'g-',]) _=df.y.plot(figsize=(14,6), style=['g-',]) #y = a + e #If we can only observe y, what can we say about α? #This acts like a filter trying to recover a signal by filtering out noise. #A linear filter. # a is the state and y is the observation (equations) import statsmodels.tsa.statespace.kalman_filter from statsmodels.tsa.statespace.kalman_filter import KalmanFilter kf = KalmanFilter(1,1) kf.obs_cov = np.array([sigma_e]) # H kf.state_cov = np.array([sigma_h]) # Q kf.design = np.array([1.0]) # Z kf.transition = np.array([1.0]) # T kf.selection = np.array([1.0]) # R ys, ah = kf.simulate(100)
def test_impulse_responses(): # Test for impulse response functions # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1 # for all periods mod = KalmanFilter(k_endog=1, k_states=1) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. mod['state_cov', 0, 0] = 2. actual = mod.impulse_responses(steps=10) desired = np.ones((11, 1)) assert_allclose(actual, desired) # Random walk: 2-unit impulse response (i.e. non-orthogonalized irf) is 2 # for all periods mod = KalmanFilter(k_endog=1, k_states=1) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. mod['state_cov', 0, 0] = 2. actual = mod.impulse_responses(steps=10, impulse=[2]) desired = np.ones((11, 1)) * 2 assert_allclose(actual, desired) # Random walk: 1-standard-deviation response (i.e. orthogonalized irf) is # sigma for all periods (here sigma^2 = 2) mod = KalmanFilter(k_endog=1, k_states=1) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. mod['state_cov', 0, 0] = 2. actual = mod.impulse_responses(steps=10, orthogonalized=True) desired = np.ones((11, 1)) * 2**0.5 assert_allclose(actual, desired) # Random walk: 1-standard-deviation cumulative response (i.e. cumulative # orthogonalized irf) mod = KalmanFilter(k_endog=1, k_states=1) mod['design', 0, 0] = 1. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. mod['state_cov', 0, 0] = 2. actual = mod.impulse_responses(steps=10, orthogonalized=True, cumulative=True) desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis] actual = mod.impulse_responses(steps=10, impulse=[1], orthogonalized=True, cumulative=True) desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis] assert_allclose(actual, desired) # Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1 # for all periods, even when intercepts are present mod = KalmanFilter(k_endog=1, k_states=1) mod['state_intercept', 0] = 100. mod['design', 0, 0] = 1. mod['obs_intercept', 0] = -1000. mod['transition', 0, 0] = 1. mod['selection', 0, 0] = 1. mod['state_cov', 0, 0] = 2. actual = mod.impulse_responses(steps=10) desired = np.ones((11, 1)) assert_allclose(actual, desired) # Univariate model (random walk): test that an error is thrown when # a multivariate or empty "impulse" is sent mod = KalmanFilter(k_endog=1, k_states=1) assert_raises(ValueError, mod.impulse_responses, impulse=1) assert_raises(ValueError, mod.impulse_responses, impulse=[1, 1]) assert_raises(ValueError, mod.impulse_responses, impulse=[]) # Univariate model with two uncorrelated shocks mod = KalmanFilter(k_endog=1, k_states=2) mod['design', 0, 0:2] = 1. mod['transition', :, :] = np.eye(2) mod['selection', :, :] = np.eye(2) mod['state_cov', :, :] = np.eye(2) desired = np.ones((11, 1)) actual = mod.impulse_responses(steps=10, impulse=0) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=[1, 0]) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=1) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=[0, 1]) assert_allclose(actual, desired) # In this case (with sigma=sigma^2=1), orthogonalized is the same as not actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=[1, 0], orthogonalized=True) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=[0, 1], orthogonalized=True) assert_allclose(actual, desired) # Univariate model with two correlated shocks mod = KalmanFilter(k_endog=1, k_states=2) mod['design', 0, 0:2] = 1. mod['transition', :, :] = np.eye(2) mod['selection', :, :] = np.eye(2) mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]]) desired = np.ones((11, 1)) # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's actual = mod.impulse_responses(steps=10, impulse=0) assert_allclose(actual, desired) actual = mod.impulse_responses(steps=10, impulse=1) assert_allclose(actual, desired) # Orthogonalized (i.e. 1-std-dev) impulses now generate different responses actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True) assert_allclose(actual, desired + desired * 0.5) actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True) assert_allclose(actual, desired) # Multivariate model with two correlated shocks mod = KalmanFilter(k_endog=2, k_states=2) mod['design', :, :] = np.eye(2) mod['transition', :, :] = np.eye(2) mod['selection', :, :] = np.eye(2) mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]]) ones = np.ones((11, 1)) zeros = np.zeros((11, 1)) # Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's, but # only for the appropriate series actual = mod.impulse_responses(steps=10, impulse=0) assert_allclose(actual, np.c_[ones, zeros]) actual = mod.impulse_responses(steps=10, impulse=1) assert_allclose(actual, np.c_[zeros, ones]) # Orthogonalized (i.e. 1-std-dev) impulses now generate different # responses, and only for the appropriate series actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True) assert_allclose(actual, np.c_[ones, ones * 0.5]) actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True) assert_allclose(actual, np.c_[zeros, ones]) # AR(1) model generates a geometrically declining series mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1, 0, 0)) phi = 0.5 mod.update([phi, 1]) desired = np.cumprod(np.r_[1, [phi] * 10]) # Test going through the model directly actual = mod.ssm.impulse_responses(steps=10) assert_allclose(actual[:, 0], desired) # Test going through the results object res = mod.filter([phi, 1.]) actual = res.impulse_responses(steps=10) assert_allclose(actual, desired)