def test_series_both(self): expected = pd.DataFrame( index=self.series.index, columns=['cpi', 'cpi.L.1', 'cpi.L.2', 'cpi.L.3']) expected['cpi'] = self.series for lag in range(1, 4): expected['cpi.L.' + str(int(lag))] = self.series.shift(lag) expected = expected.iloc[3:] both = tsatools.lagmat(self.series, 3, trim='both', original='in', use_pandas=True) tm.assert_frame_equal(both, expected) lags = tsatools.lagmat(self.series, 3, trim='both', original='ex', use_pandas=True) tm.assert_frame_equal(lags, expected.iloc[:, 1:]) lags, lead = tsatools.lagmat(self.series, 3, trim='both', original='sep', use_pandas=True) tm.assert_frame_equal(lead, expected.iloc[:, :1]) tm.assert_frame_equal(lags, expected.iloc[:, 1:])
def test_dataframe_forward(self): data = self.macro_df columns = list(data.columns) n = data.shape[0] values = np.zeros((n + 3, 16)) values[:n, :4] = data.values for lag in range(1, 4): new_cols = [col + '.L.' + str(lag) for col in data] columns.extend(new_cols) values[lag:n + lag, 4 * lag:4 * (lag + 1)] = data.values index = data.index values = values[:n] expected = pd.DataFrame(values, columns=columns, index=index) both = tsatools.lagmat(self.macro_df, 3, trim='forward', original='in', use_pandas=True) tm.assert_frame_equal(both, expected) lags = tsatools.lagmat(self.macro_df, 3, trim='forward', original='ex', use_pandas=True) tm.assert_frame_equal(lags, expected.iloc[:, 4:]) lags, lead = tsatools.lagmat(self.macro_df, 3, trim='forward', original='sep', use_pandas=True) tm.assert_frame_equal(lags, expected.iloc[:, 4:]) tm.assert_frame_equal(lead, expected.iloc[:, :4])
def test_add_lag1d(self): data = self.random_data lagmat = tsatools.lagmat(data, 3, trim='Both') results = np.column_stack((data[3:], lagmat)) lag_data = tsatools.add_lag(data, lags=3, insert=True) assert_equal(results, lag_data) # add index data = data[:, None] lagmat = tsatools.lagmat(data, 3, trim='Both') # test for lagmat too results = np.column_stack((data[3:], lagmat)) lag_data = tsatools.add_lag(data, lags=3, insert=True) assert_equal(results, lag_data)
def test_add_lag_ndarray(self): data = self.macro_df.values nddata = data.astype(float) lagmat = tsatools.lagmat(nddata[:, 2], 3, trim='Both') results = np.column_stack((nddata[3:, :3], lagmat, nddata[3:, -1])) lag_data = tsatools.add_lag(nddata, 2, 3) assert_equal(lag_data, results)
def test_add_lag_noinsert(self): data = self.macro_df.values nddata = data.astype(float) lagmat = tsatools.lagmat(nddata[:, 2], 3, trim='Both') results = np.column_stack((nddata[3:, :], lagmat)) lag_data = tsatools.add_lag(data, self.realgdp_loc, 3, insert=False) assert_equal(lag_data, results)
def test_add_lag_1d_drop_struct(self): data = np.zeros(100, dtype=[('variable', float)]) nddata = self.random_data data['variable'] = nddata lagmat = tsatools.lagmat(nddata, 3, trim='Both') lag_data = tsatools.add_lag(data, lags=3, drop=True) assert_equal(lagmat, lag_data.view((float, 3)))
def test_add_lag1d_drop(self): data = self.random_data lagmat = tsatools.lagmat(data, 3, trim='Both') lag_data = tsatools.add_lag(data, lags=3, drop=True, insert=True) assert_equal(lagmat, lag_data) # no insert, should be the same lag_data = tsatools.add_lag(data, lags=3, drop=True, insert=False) assert_equal(lagmat, lag_data)
def test_add_lag_noinsertatend_ndarray(self): data = self.macro_df.values nddata = data.astype(float) lagmat = tsatools.lagmat(nddata[:, -1], 3, trim='Both') results = np.column_stack((nddata[3:, :], lagmat)) lag_data = tsatools.add_lag(nddata, 3, 3, insert=False) assert_equal(lag_data, results) # should be the same as insert also check negative col number lag_data2 = tsatools.add_lag(nddata, -1, 3, insert=True) assert_equal(lag_data2, results)
def test_sep_return(self): data = self.random_data n = data.shape[0] lagmat, leads = tsatools.lagmat(data, 3, trim='none', original='sep') expected = np.zeros((n + 3, 4)) for i in range(4): expected[i:i + n, i] = data expected_leads = expected[:, :1] expected_lags = expected[:, 1:] assert_equal(expected_lags, lagmat) assert_equal(expected_leads, leads)
def test_add_lag_noinsert_atend(self): data = self.macro_df.values nddata = data.astype(float) lagmat = tsatools.lagmat(nddata[:, -1], 3, trim='Both') results = np.column_stack((nddata[3:, :], lagmat)) lag_data = tsatools.add_lag(data, self.cpi_loc, 3, insert=False) assert_equal(lag_data, results) # should be the same as insert lag_data2 = tsatools.add_lag(data, self.cpi_loc, 3, insert=True) assert_equal(lag_data2, results)
def _stackX(self, k_ar, trend): """ Private method to build the RHS matrix for estimation. Columns are trend terms then lags. """ endog = self.endog X = lagmat(endog, maxlag=k_ar, trim='both') k_trend = util.get_trendorder(trend) if k_trend: X = add_trend(X, prepend=True, trend=trend) self.k_trend = k_trend # TODO: Don't set this here return X
def test_add_lag1d_struct(self): data = np.zeros(100, dtype=[('variable', float)]) nddata = self.random_data data['variable'] = nddata lagmat = tsatools.lagmat(nddata, 3, trim='Both', original='in') lag_data = tsatools.add_lag(data, 'variable', lags=3, insert=True) assert_equal(lagmat, lag_data.view((float, 4))) lag_data = tsatools.add_lag(data, 'variable', lags=3, insert=False) assert_equal(lagmat, lag_data.view((float, 4))) lag_data = tsatools.add_lag(data, lags=3, insert=True) assert_equal(lagmat, lag_data.view((float, 4)))
def _em_autoregressive(self, result, betas, tmp=None): """ EM step for autoregressive coefficients and variances """ if tmp is None: tmp = np.sqrt(result.smoothed_marginal_probabilities) resid = np.zeros((self.k_regimes, self.nobs + self.order)) resid[:] = self.orig_endog if self._k_exog > 0: for i in range(self.k_regimes): resid[i] -= np.dot(self.orig_exog, betas[i]) # The difference between this and `_em_exog` is that here we have a # different endog and exog for each regime coeffs = np.zeros((self.k_regimes, ) + (self.order, )) variance = np.zeros((self.k_regimes, )) exog = np.zeros((self.nobs, self.order)) for i in range(self.k_regimes): endog = resid[i, self.order:] exog = lagmat(resid[i], self.order)[self.order:] tmp_endog = tmp[i] * endog tmp_exog = tmp[i][:, None] * exog coeffs[i] = np.dot(np.linalg.pinv(tmp_exog), tmp_endog) if self.switching_variance: tmp_resid = endog - np.dot(exog, coeffs[i]) variance[i] = ( np.sum(tmp_resid**2 * result.smoothed_marginal_probabilities[i]) / np.sum(result.smoothed_marginal_probabilities[i])) else: tmp_resid = tmp_endog - np.dot(tmp_exog, coeffs[i]) variance[i] = np.sum(tmp_resid**2) # Variances if not self.switching_variance: variance = variance.sum() / self.nobs return coeffs, variance
def test_dataframe_without_pandas(self): data = self.macro_df both = tsatools.lagmat(data, 3, trim='both', original='in') both_np = tsatools.lagmat(data.values, 3, trim='both', original='in') assert_equal(both, both_np) lags = tsatools.lagmat(data, 3, trim='none', original='ex') lags_np = tsatools.lagmat(data.values, 3, trim='none', original='ex') assert_equal(lags, lags_np) lags, lead = tsatools.lagmat(data, 3, trim='forward', original='sep') lags_np, lead_np = tsatools.lagmat(data.values, 3, trim='forward', original='sep') assert_equal(lags, lags_np) assert_equal(lead, lead_np)
def adfuller(x, maxlag=None, regression="c", autolag='AIC', store=False, regresults=False): """ Augmented Dickey-Fuller unit root test The Augmented Dickey-Fuller test can be used to test for a unit root in a univariate process in the presence of serial correlation. Parameters ---------- x : array_like, 1d data series maxlag : int Maximum lag which is included in test, default 12*(nobs/100)^{1/4} regression : {'c','ct','ctt','nc'} Constant and trend order to include in regression * 'c' : constant only (default) * 'ct' : constant and trend * 'ctt' : constant, and linear and quadratic trend * 'nc' : no constant, no trend autolag : {'AIC', 'BIC', 't-stat', None} * if None, then maxlag lags are used * if 'AIC' (default) or 'BIC', then the number of lags is chosen to minimize the corresponding information criterion * 't-stat' based choice of maxlag. Starts with maxlag and drops a lag until the t-statistic on the last lag length is significant using a 5%-sized test store : bool If True, then a result instance is returned additionally to the adf statistic. Default is False regresults : bool, optional If True, the full regression results are returned. Default is False Returns ------- adf : float Test statistic pvalue : float MacKinnon's approximate p-value based on MacKinnon (1994, 2010) usedlag : int Number of lags used nobs : int Number of observations used for the ADF regression and calculation of the critical values critical values : dict Critical values for the test statistic at the 1 %, 5 %, and 10 % levels. Based on MacKinnon (2010) icbest : float The maximized information criterion if autolag is not None. resstore : ResultStore, optional A dummy class with results attached as attributes Notes ----- The null hypothesis of the Augmented Dickey-Fuller is that there is a unit root, with the alternative that there is no unit root. If the pvalue is above a critical size, then we cannot reject that there is a unit root. The p-values are obtained through regression surface approximation from MacKinnon 1994, but using the updated 2010 tables. If the p-value is close to significant, then the critical values should be used to judge whether to reject the null. The autolag option and maxlag for it are described in Greene. Examples -------- See example notebook References ---------- .. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003. .. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994. .. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for unit-root and cointegration tests. `Journal of Business and Economic Statistics` 12, 167-76. .. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's University, Dept of Economics, Working Papers. Available at http://ideas.repec.org/p/qed/wpaper/1227.html """ if regresults: store = True trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'} if regression is None or isinstance(regression, integer_types): regression = trenddict[regression] regression = regression.lower() if regression not in ['c', 'nc', 'ct', 'ctt']: # pragma: no cover raise ValueError("regression option %s not understood" % regression) x = np.asarray(x) nobs = x.shape[0] if maxlag is None: # from Greene referencing Schwert 1989 maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.))) xdiff = np.diff(x) xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in') nobs = xdall.shape[0] # pylint: disable=E1103 xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x xdshort = xdiff[-nobs:] if store: resstore = ResultsStore() if autolag: if regression != 'nc': fullRHS = add_trend(xdall, regression, prepend=True) else: fullRHS = xdall # add +1 for level startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # search for lag length with smallest information criteria # Note: use the same number of observations to have comparable IC # aic and bic: smaller is better icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag, maxlag, autolag, regresults=True) if regresults: resstore.autolag_results = alres bestlag -= startlag # convert to lag not column index # rerun ols with best autolag xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in') nobs = xdall.shape[0] # pylint: disable=E1103 xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x xdshort = xdiff[-nobs:] usedlag = bestlag else: usedlag = maxlag icbest = None if regression != 'nc': resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1], regression)).fit() else: resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit() adfstat = resols.tvalues[0] # adfstat = (resols.params[0]-1.0)/resols.bse[0] # the "asymptotically correct" z statistic is obtained as # nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1) # I think this is the statistic that is used for series that are integrated # for orders higher than I(1), ie., not ADF but cointegration tests. # Get approx p-value and critical values pvalue = mackinnonp(adfstat, regression=regression, N=1) critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs) critvalues = { "1%": critvalues[0], "5%": critvalues[1], "10%": critvalues[2] } if store: resstore.resols = resols resstore.maxlag = maxlag resstore.usedlag = usedlag resstore.adfstat = adfstat resstore.critvalues = critvalues resstore.nobs = nobs resstore.H0 = ("The coefficient on the lagged level equals 1 - " "unit root") resstore.HA = "The coefficient on the lagged level < 1 - stationary" resstore.icbest = icbest resstore._str = 'Augmented Dickey-Fuller Test Results' return adfstat, pvalue, critvalues, resstore else: if not autolag: return adfstat, pvalue, usedlag, nobs, critvalues else: return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def __init__(self, endog, k_regimes, order, trend='c', exog=None, exog_tvtp=None, switching_ar=True, switching_trend=True, switching_exog=False, switching_variance=False, dates=None, freq=None, missing='none'): # Properties self.order = order # this also gets set in MarkovSwitching.__init__ self.switching_ar = switching_ar # Switching options if self.switching_ar is True or self.switching_ar is False: self.switching_ar = [self.switching_ar] * order elif len(self.switching_ar) != order: raise ValueError('Invalid iterable passed to `switching_ar`.') # Cache an array for holding slices self._predict_slices = [slice(None, None, None)] * (self.order + 1) # Autoregressive exog self.exog_ar = lagmat(endog, self.order)[self.order:] # Initialize the base model super(MarkovAutoregression, self).__init__(endog, k_regimes, trend=trend, exog=exog, order=order, exog_tvtp=exog_tvtp, switching_trend=switching_trend, switching_exog=switching_exog, switching_variance=switching_variance, dates=dates, freq=freq, missing=missing) # TODO: It looks like all existing tests have zero nulls in endog # Sanity checks if self.nobs <= self.order: # in tests (as of 2018-03-31) self.nobs always matches len(endog) # at this point, but it isn't obvious if this MUST hold raise ValueError('Must have more observations than the order of' ' the autoregression.') # Reshape other datasets self.nobs -= self.order # TODO: Do we have test cases where nulls have been dropped # from self.endog by this point? can we write this in # terms of anything fixed? self.orig_endog = self.endog # TODO: Does this necessarily match self.data.orig_endog? self.endog = self.endog[self.order:] if self._k_exog > 0: self.orig_exog = self.exog self.exog = self.exog[self.order:] # Reset the ModelData datasets # TODO: I'm not wild about altering self.data in-place... self.data.endog, self.data.exog = (self.data._convert_endog_exog( self.endog, self.exog)) # Reset indexes, if provided if self.data.row_labels is not None: self.data._cache['row_labels'] = self.data.row_labels[self.order:] if self._index is not None: if self._index_generated: self._index = self._index[:-self.order] else: self._index = self._index[self.order:] # Parameters self.parameters['autoregressive'] = self.switching_ar
def test_pandas_errors(self): # TODO: parametrize? for trim in ['none', 'backward']: for data in [self.macro_df, self.series]: with pytest.raises(ValueError): tsatools.lagmat(data, 3, trim=trim, use_pandas=True)
def pacf_ols(x, nlags=40, efficient=True, unbiased=False): """ Calculate partial autocorrelations via OLS Parameters ---------- x : 1d array observations of time series for which pacf is calculated nlags : int Number of lags for which pacf is returned. Lag 0 is not returned. efficient : bool, optional If true, uses the maximum number of available observations to compute each partial autocorrelation. If not, uses the same number of observations to compute all pacf values. unbiased : bool, optional Adjust each partial autocorrelation by n / (n - lag) Returns ------- pacf : 1d array partial autocorrelations, (maxlag,) array corresponding to lags 0, 1, ..., maxlag Notes ----- This solves a separate OLS estimation for each desired lag. Setting efficient to True has two effects. First, it uses `nobs - lag` observations of estimate each pacf. Second, it re-estimates the mean in each regression. If efficient is False, then the data are first demeaned, and then `nobs - maxlag` observations are used to estimate each partial autocorrelation. The inefficient estimator appears to have better finite sample properties. This option should only be used in time series that are covariance stationary. OLS estimation of the pacf does not guarantee that all pacf values are between -1 and 1. See also -------- sm2.tsa.stattools.pacf sm2.tsa.autocov.pacf_yw sm2.tsa.autocov.pacf_burg References ---------- .. [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). Time series analysis: forecasting and control. John Wiley & Sons, p. 66 """ pacf = np.empty(nlags + 1) pacf[0] = 1.0 x = np.squeeze(np.asarray(x)) if x.ndim != 1: raise ValueError('x must be squeezable to a 1-d array') if efficient: xlags, x0 = lagmat(x, nlags, original='sep') xlags = add_constant(xlags) for k in range(1, nlags + 1): params = lstsq(xlags[k:, :k + 1], x0[k:], rcond=None)[0] pacf[k] = params[-1] else: x = x - np.mean(x) # Create a single set of lags for multivariate OLS xlags, x0 = lagmat(x, nlags, original='sep', trim='both') for k in range(1, nlags + 1): params = lstsq(xlags[:, :k], x0, rcond=None)[0] # Last coefficient corresponds to PACF value (see [1]) pacf[k] = params[-1] if unbiased: n = len(x) pacf *= n / (n - np.arange(nlags + 1)) return pacf
def test_unknown_trim(self): with pytest.raises(ValueError): tsatools.lagmat(self.macro_df, 3, trim='unknown', use_pandas=True) with pytest.raises(ValueError): tsatools.lagmat(self.macro_df.values, 3, trim='unknown')
def test_too_few_observations(self): with pytest.raises(ValueError): tsatools.lagmat(self.macro_df, 300, use_pandas=True) with pytest.raises(ValueError): tsatools.lagmat(self.macro_df.values, 300)