def test_nondiagonal_obs_cov(reset_randomstate): # All diffuse handling is done using the univariate filtering approach, # even if the usual multivariate filtering method is being used for the # other periods. This means that if the observation covariance matrix is # not a diagonal matrix during the diffuse periods, we need to transform # the observation equation as we would if we were using the univariate # filter. mod = TVSS(np.zeros((10, 2))) res1 = mod.smooth([]) mod.ssm.filter_univariate = True res2 = mod.smooth([]) atol = 0.002 if PLATFORM_WIN else 1e-5 rtol = 0.002 if PLATFORM_WIN else 1e-6 # Here we'll just test a few values assert_allclose(res1.llf, res2.llf, rtol=rtol, atol=atol) assert_allclose(res1.forecasts[0], res2.forecasts[0], rtol=rtol, atol=atol) assert_allclose(res1.filtered_state, res2.filtered_state, rtol=rtol, atol=atol) assert_allclose(res1.filtered_state_cov, res2.filtered_state_cov, rtol=rtol, atol=atol) assert_allclose(res1.smoothed_state, res2.smoothed_state, rtol=rtol, atol=atol) assert_allclose(res1.smoothed_state_cov, res2.smoothed_state_cov, rtol=rtol, atol=atol)
def test_smoothed_state_obs_weights_univariate_singular(singular, periods, reset_randomstate): # Tests for the univariate case when the forecast error covariance matrix # is singular (so the multivariate approach cannot be used, and the use of # pinv in computing the weights becomes actually operative) endog = np.zeros((10, 2)) endog[6, 0] = np.nan endog[7, :] = np.nan endog[8, 1] = np.nan mod = TVSS(endog) mod.ssm.initialize_known([1.2, 0.8], np.eye(2) * 0) if singular == 'both': mod['obs_cov', ..., :periods] = 0 else: mod['obs_cov', 0, 1, :periods] = 0 mod['obs_cov', 1, 0, :periods] = 0 mod['obs_cov', singular, singular, :periods] = 0 mod['state_cov', :, :, :periods] = 0 mod.ssm.filter_univariate = True res = mod.smooth([]) # Make sure we actually have singular covariance matrices in the periods # specified for i in range(periods): eigvals = np.linalg.eigvalsh(res.forecasts_error_cov[..., i]) assert_equal(np.min(eigvals), 0) # Compute the desiried weights n = mod.nobs m = mod.k_states p = mod.k_endog desired = np.zeros((n, n, m, p)) * np.nan # Here we manually compute the weights by adjusting one observation at a # time for j in range(n): for i in range(p): if np.isnan(endog[j, i]): desired[:, j, :, i] = np.nan else: y = endog.copy() y[j, i] = 1.0 tmp_mod = mod.clone(y) tmp_mod.ssm.initialize_known([1.2, 0.8], np.eye(2) * 0) tmp_mod.ssm.filter_univariate = True tmp_res = tmp_mod.smooth([]) desired[:, j, :, i] = (tmp_res.smoothed_state.T - res.smoothed_state.T) actual, _, _ = tools.compute_smoothed_state_weights(res) assert_allclose(actual, desired, atol=1e-12)
def test_time_varying_model(reset_randomstate): endog = np.array([[0.5, 1.2, -0.2, 0.3, -0.1, 0.4, 1.4, 0.9], [-0.2, -0.3, -0.1, 0.1, 0.01, 0.05, -0.13, -0.2]]).T # The basic model switches to the univariate method at observation 3, # because the forecast error covariance matrix will have a singular # component corresponding to the first endog variable np.random.seed(1234) mod_switch = TVSS(endog) mod_switch['design', ..., 3] = 0 mod_switch['obs_cov', ..., 3] = 0 mod_switch['obs_cov', 1, 1, 3] = 1. res_switch = mod_switch.ssm.smooth() kfilter = mod_switch.ssm._kalman_filter uf_switch = np.array(kfilter.univariate_filter, copy=True) # Next, this model only uses the univariate method np.random.seed(1234) mod_uv = TVSS(endog) mod_uv['design', ..., 3] = 0 mod_uv['obs_cov', ..., 3] = 0 mod_uv['obs_cov', 1, 1, 3] = 1. mod_uv.ssm.filter_univariate = True res_uv = mod_uv.ssm.smooth() kfilter = mod_uv.ssm._kalman_filter uf_uv = np.array(kfilter.univariate_filter, copy=True) # Finally, this model uses the multivariate method and gets around the # issue by setting the endog variable to NaN that would have contributed # to the singular part of the forecast error covariance matrix np.random.seed(1234) endog_mv = endog.copy() endog_mv[3, 0] = np.nan mod_mv = TVSS(endog_mv) mod_mv['design', ..., 3] = 0 mod_mv['obs_cov', ..., 3] = 0 mod_mv['obs_cov', 1, 1, 3] = 1. res_mv = mod_mv.ssm.smooth() kfilter = mod_mv.ssm._kalman_filter uf_mv = np.array(kfilter.univariate_filter, copy=True) # Make sure that switching happened in the switch model but not in the # other two models assert_allclose(uf_switch[:3], 0) assert_allclose(uf_switch[3], 1) assert_allclose(uf_switch[4:], 0) assert_allclose(uf_uv, 1) assert_allclose(uf_mv, 0) # Check filter and smoother output check_filter_output([res_mv, res_switch, res_uv], np.s_[3]) check_smoother_output([res_mv, res_switch, res_uv], np.s_[3])
def test_compute_t_compute_j(compute_j, compute_t, reset_randomstate): # Tests for the collapsed case endog = np.zeros((10, 6)) endog[2, :] = np.nan endog[6, 0] = np.nan endog[7, :] = np.nan endog[8, 1] = np.nan mod = TVSS(endog) mod['obs_intercept'] = np.zeros((6, 1)) mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) mod.ssm.filter_collapsed = True res = mod.smooth([]) # Compute the desiried weights n = mod.nobs m = mod.k_states p = mod.k_endog desired = np.zeros((n, n, m, p)) * np.nan # Here we manually compute the weights by adjusting one observation at a # time for j in range(n): for i in range(p): if np.isnan(endog[j, i]): desired[:, j, :, i] = np.nan else: y = endog.copy() y[j, i] = 1.0 tmp_mod = mod.clone(y) tmp_mod['obs_intercept'] = np.zeros((6, 1)) tmp_mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) mod.ssm.filter_collapsed = True tmp_res = tmp_mod.smooth([]) desired[:, j, :, i] = (tmp_res.smoothed_state.T - res.smoothed_state.T) actual, _, _ = tools.compute_smoothed_state_weights( res, compute_t=compute_t, compute_j=compute_j) compute_t = np.atleast_1d(compute_t) compute_j = np.atleast_1d(compute_j) for t in np.arange(10): if t not in compute_t: desired[t, :] = np.nan for j in np.arange(10): if j not in compute_j: desired[:, j] = np.nan assert_allclose(actual, desired, atol=1e-12)
def test_predicted_filtered_smoothed_TVSS(reset_randomstate): mod = TVSS(np.zeros((50, 2))) mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) res = mod.smooth([]) mod_oos = TVSS(np.zeros((11, 2)) * np.nan) kwargs = { key: mod_oos[key] for key in [ 'obs_intercept', 'design', 'obs_cov', 'transition', 'selection', 'state_cov' ] } p_pred = res.get_prediction(start=0, end=60, information_set='predicted', **kwargs) f_pred = res.get_prediction(start=0, end=60, information_set='filtered', **kwargs) s_pred = res.get_prediction(start=0, end=60, information_set='smoothed', **kwargs) p_signal = res.get_prediction(start=0, end=60, information_set='predicted', signal_only=True, **kwargs) f_signal = res.get_prediction(start=0, end=60, information_set='filtered', signal_only=True, **kwargs) s_signal = res.get_prediction(start=0, end=60, information_set='smoothed', signal_only=True, **kwargs) # Test forecasts and signals d = mod['obs_intercept'].transpose(1, 0)[:, :, None] Z = mod['design'].transpose(2, 0, 1) H = mod['obs_cov'].transpose(2, 0, 1) fcast = res.get_forecast(11, **kwargs) fcast_signal = fcast.predicted_mean - mod_oos['obs_intercept'].T fcast_signal_cov = fcast.var_pred_mean - mod_oos['obs_cov'].T desired_s_signal = Z @ res.smoothed_state.T[:, :, None] desired_f_signal = Z @ res.filtered_state.T[:, :, None] desired_p_signal = Z @ res.predicted_state.T[:-1, :, None] assert_allclose(s_pred.predicted_mean[:50], (d + desired_s_signal)[..., 0]) assert_allclose(s_pred.predicted_mean[50:], fcast.predicted_mean) assert_allclose(f_pred.predicted_mean[:50], (d + desired_f_signal)[..., 0]) assert_allclose(f_pred.predicted_mean[50:], fcast.predicted_mean) assert_allclose(p_pred.predicted_mean[:50], (d + desired_p_signal)[..., 0]) assert_allclose(p_pred.predicted_mean[50:], fcast.predicted_mean) assert_allclose(s_signal.predicted_mean[:50], desired_s_signal[..., 0]) assert_allclose(s_signal.predicted_mean[50:], fcast_signal) assert_allclose(f_signal.predicted_mean[:50], desired_f_signal[..., 0]) assert_allclose(f_signal.predicted_mean[50:], fcast_signal) assert_allclose(p_signal.predicted_mean[:50], desired_p_signal[..., 0]) assert_allclose(p_signal.predicted_mean[50:], fcast_signal) for t in range(mod.nobs): assert_allclose(s_pred.var_pred_mean[t], Z[t] @ res.smoothed_state_cov[..., t] @ Z[t].T + H[t]) assert_allclose(f_pred.var_pred_mean[t], Z[t] @ res.filtered_state_cov[..., t] @ Z[t].T + H[t]) assert_allclose(p_pred.var_pred_mean[t], Z[t] @ res.predicted_state_cov[..., t] @ Z[t].T + H[t]) assert_allclose(s_signal.var_pred_mean[t], Z[t] @ res.smoothed_state_cov[..., t] @ Z[t].T) assert_allclose(f_signal.var_pred_mean[t], Z[t] @ res.filtered_state_cov[..., t] @ Z[t].T) assert_allclose(p_signal.var_pred_mean[t], Z[t] @ res.predicted_state_cov[..., t] @ Z[t].T) assert_allclose(s_pred.var_pred_mean[50:], fcast.var_pred_mean) assert_allclose(f_pred.var_pred_mean[50:], fcast.var_pred_mean) assert_allclose(p_pred.var_pred_mean[50:], fcast.var_pred_mean) assert_allclose(s_signal.var_pred_mean[50:], fcast_signal_cov) assert_allclose(f_signal.var_pred_mean[50:], fcast_signal_cov) assert_allclose(p_signal.var_pred_mean[50:], fcast_signal_cov)
def test_predicted_filtered_smoothed_with_nans_TVSS(reset_randomstate): mod = TVSS(np.zeros((50, 2)) * np.nan) mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) res = mod.smooth([]) mod_oos = TVSS(np.zeros((11, 2)) * np.nan) kwargs = { key: mod_oos[key] for key in [ 'obs_intercept', 'design', 'obs_cov', 'transition', 'selection', 'state_cov' ] } p_pred = res.get_prediction(start=0, end=60, information_set='predicted', **kwargs) f_pred = res.get_prediction(start=0, end=60, information_set='filtered', **kwargs) s_pred = res.get_prediction(start=0, end=60, information_set='smoothed', **kwargs) # Test forecasts assert_allclose(s_pred.predicted_mean, p_pred.predicted_mean) assert_allclose(s_pred.var_pred_mean, p_pred.var_pred_mean) assert_allclose(f_pred.predicted_mean, p_pred.predicted_mean) assert_allclose(f_pred.var_pred_mean, p_pred.var_pred_mean) assert_allclose(p_pred.predicted_mean[:50], res.fittedvalues) assert_allclose(p_pred.var_pred_mean[:50].T, res.forecasts_error_cov) p_signal = res.get_prediction(start=0, end=60, information_set='predicted', signal_only=True, **kwargs) f_signal = res.get_prediction(start=0, end=60, information_set='filtered', signal_only=True, **kwargs) s_signal = res.get_prediction(start=0, end=60, information_set='smoothed', signal_only=True, **kwargs) # Test signal predictions assert_allclose(s_signal.predicted_mean, p_signal.predicted_mean) assert_allclose(s_signal.var_pred_mean, p_signal.var_pred_mean) assert_allclose(f_signal.predicted_mean, p_signal.predicted_mean) assert_allclose(f_signal.var_pred_mean, p_signal.var_pred_mean) assert_allclose(p_signal.predicted_mean[:50] + mod['obs_intercept'].T, res.fittedvalues) assert_allclose((p_signal.var_pred_mean[:50] + mod['obs_cov'].T).T, res.forecasts_error_cov)
def test_smoothed_state_obs_weights_collapsed(reset_randomstate): # Tests for the collapsed case endog = np.zeros((20, 6)) endog[2, :] = np.nan endog[6, 0] = np.nan endog[7, :] = np.nan endog[8, 1] = np.nan mod = TVSS(endog) mod['obs_intercept'] = np.zeros((6, 1)) mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) mod.ssm.filter_collapsed = True res = mod.smooth([]) # Compute the desiried weights n = mod.nobs m = mod.k_states p = mod.k_endog desired = np.zeros((n, n, m, p)) * np.nan # Here we manually compute the weights by adjusting one observation at a # time for j in range(n): for i in range(p): if np.isnan(endog[j, i]): desired[:, j, :, i] = np.nan else: y = endog.copy() y[j, i] = 1.0 tmp_mod = mod.clone(y) tmp_mod['obs_intercept'] = np.zeros((6, 1)) tmp_mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) mod.ssm.filter_collapsed = True tmp_res = tmp_mod.smooth([]) desired[:, j, :, i] = (tmp_res.smoothed_state.T - res.smoothed_state.T) desired_state_intercept_weights = np.zeros((n, n, m, m)) * np.nan # Here we manually compute the weights by adjusting one state intercept # at a time for j in range(n): for ell in range(m): tmp_mod = mod.clone(endog) tmp_mod['obs_intercept'] = np.zeros((6, 1)) tmp_mod.ssm.initialize_known([1.2, 0.8], np.eye(2)) mod.ssm.filter_collapsed = True if tmp_mod['state_intercept'].ndim == 1: si = tmp_mod['state_intercept'] tmp_mod['state_intercept'] = np.zeros((mod.k_states, mod.nobs)) tmp_mod['state_intercept', :, :] = si[:, None] tmp_mod['state_intercept', ell, j] += 1.0 tmp_res = tmp_mod.ssm.smooth() desired_state_intercept_weights[:, j, :, ell] = ( tmp_res.smoothed_state.T - res.smoothed_state.T) actual, actual_state_intercept_weights, _ = ( tools.compute_smoothed_state_weights(res)) assert_allclose(actual, desired, atol=1e-12) assert_allclose(actual_state_intercept_weights, desired_state_intercept_weights, atol=1e-12)
def test_smoothed_state_obs_weights_TVSS(univariate, diffuse, reset_randomstate): endog = np.zeros((10, 3)) # One simple way to introduce more diffuse periods is to have fully missing # observations at the beginning if diffuse == 4: endog[:3] = np.nan endog[6, 0] = np.nan endog[7, :] = np.nan endog[8, 1] = np.nan mod = TVSS(endog) prior_mean = np.array([1.2, 0.8]) prior_cov = np.eye(2) if not diffuse: mod.ssm.initialize_known(prior_mean, prior_cov) if univariate: mod.ssm.filter_univariate = True res = mod.smooth([]) # Compute the desiried weights n = mod.nobs m = mod.k_states p = mod.k_endog desired = np.zeros((n, n, m, p)) * np.nan # Here we manually compute the weights by adjusting one observation at a # time for j in range(n): for i in range(p): if np.isnan(endog[j, i]): desired[:, j, :, i] = np.nan else: y = endog.copy() y[j, i] = 1.0 tmp_mod = mod.clone(y) if not diffuse: tmp_mod.ssm.initialize_known(prior_mean, prior_cov) if univariate: tmp_mod.ssm.filter_univariate = True tmp_res = tmp_mod.smooth([]) desired[:, j, :, i] = (tmp_res.smoothed_state.T - res.smoothed_state.T) desired_state_intercept_weights = np.zeros((n, n, m, m)) * np.nan # Here we manually compute the weights by adjusting one state intercept # at a time for j in range(n): for ell in range(m): tmp_mod = mod.clone(endog) if not diffuse: tmp_mod.ssm.initialize_known(prior_mean, prior_cov) if univariate: tmp_mod.ssm.filter_univariate = True if tmp_mod['state_intercept'].ndim == 1: si = tmp_mod['state_intercept'] tmp_mod['state_intercept'] = np.zeros((mod.k_states, mod.nobs)) tmp_mod['state_intercept', :, :] = si[:, None] tmp_mod['state_intercept', ell, j] += 1.0 tmp_res = tmp_mod.ssm.smooth() desired_state_intercept_weights[:, j, :, ell] = ( tmp_res.smoothed_state.T - res.smoothed_state.T) desired_prior_weights = np.zeros((n, m, m)) * np.nan if not diffuse: for i in range(m): a = prior_mean.copy() a[i] += 1 tmp_mod = mod.clone(endog) tmp_mod.ssm.initialize_known(a, prior_cov) tmp_res = tmp_mod.smooth([]) desired_prior_weights[:, :, i] = (tmp_res.smoothed_state.T - res.smoothed_state.T) if not diffuse: mod.ssm.initialize_known(prior_mean, prior_cov) actual, actual_state_intercept_weights, actual_prior_weights = ( tools.compute_smoothed_state_weights(res)) d = res.nobs_diffuse assert_equal(d, diffuse) if diffuse: assert_allclose(actual[:d], np.nan, atol=1e-12) assert_allclose(actual[:, :d], np.nan, atol=1e-12) assert_allclose(actual_state_intercept_weights[:d], np.nan) assert_allclose(actual_state_intercept_weights[:, :d], np.nan) assert_allclose(actual_prior_weights, np.nan) else: # Test that the weights are the same assert_allclose(actual_prior_weights, desired_prior_weights, atol=1e-12) # In the non-diffuse case, we can actually use the weights along with # the prior and observations to compute the smoothed state directly, # and then compare that to what was returned by the usual Kalman # smoothing routines # Note that TVSS sets the state intercept to zeros, so this does not # test that, although those weights are tested separately, see above # and below. contribution_prior = np.nansum( actual_prior_weights * prior_mean[None, None, :], axis=2) contribution_endog = np.nansum( actual * (endog - mod['obs_intercept'].T)[None, :, None, :], axis=(1, 3)) computed_smoothed_state = contribution_prior + contribution_endog assert_allclose(computed_smoothed_state, res.smoothed_state.T) assert_allclose(actual[d:, d:], desired[d:, d:], atol=1e-12) assert_allclose(actual_state_intercept_weights[d:, d:], desired_state_intercept_weights[d:, d:], atol=1e-12)
def test_smoothed_decomposition_TVSS(univariate, reset_randomstate): endog = np.zeros((10, 3)) endog[6, 0] = np.nan endog[7, :] = np.nan endog[8, 1] = np.nan mod = TVSS(endog) mod['state_intercept'] = np.random.normal(size=(mod.k_states, mod.nobs)) prior_mean = np.array([1.2, 0.8]) prior_cov = np.eye(2) mod.ssm.initialize_known(prior_mean, prior_cov) if univariate: mod.ssm.filter_univariate = True res = mod.smooth([]) # Check smoothed state # Get the decomposition of the smoothed state cd, coi, csi, cp = res.get_smoothed_decomposition( decomposition_of='smoothed_state') # Sum across contributions (i.e. from observations at each time period and # from the initial state) css = ((cd + coi).sum(axis=1) + csi.sum(axis=1) + cp.sum(axis=1)) css = css.unstack(level='state_to')[mod.state_names].values # Summing up all contributions should yield the actual smoothed state, # so the smoothed state vector is the desired result of this test ss = np.array(res.states.smoothed) assert_allclose(css, ss, atol=1e-12) # Check smoothed signal # Use the summed state contributions and multiply by the design matrix # to get the smoothed signal cs_sig = (css.T * mod['design']).sum(axis=1).T # Add in the observation intercept to get the smoothed forecast csf = cs_sig + mod['obs_intercept'].T # Summing up all contributions should yield the smoothed prediction of # the observed variables s_sig = res.predict(information_set='smoothed', signal_only=True) sf = res.predict(information_set='smoothed', signal_only=False) assert_allclose(cs_sig, s_sig, atol=1e-12) assert_allclose(csf, sf, atol=1e-12) # Now check the smoothed signal against the sum computed from the # decomposed smoothed signal cd, coi, csi, cp = res.get_smoothed_decomposition( decomposition_of='smoothed_signal') # Sum across contributions (i.e. from observations and intercepts at each # time period and from the initial state) to get the smoothed signal cs_sig = ((cd + coi).sum(axis=1) + csi.sum(axis=1) + cp.sum(axis=1)) cs_sig = cs_sig.unstack(level='variable_to')[mod.endog_names].values assert_allclose(cs_sig, s_sig, atol=1e-12) # Add in the observation intercept to get the smoothed forecast csf = cs_sig + mod['obs_intercept'].T assert_allclose(csf, sf)