def plot5(self): df1 = pd.read_csv('data/airline_passengers.csv', index_col='Month', parse_dates=True) df1.index.freq = 'MS' df2 = pd.read_csv('data/DailyTotalFemaleBirths.csv', index_col='Date', parse_dates=True) df2.index.freq = 'D' df = pd.DataFrame({'a': [13, 5, 11, 12, 9]}) arr = acovf(df['a']) arr2 = acovf(df['a'], unbiased=True) arr3 = acf(df['a']) arr4 = pacf_yw(df['a'], nlags=4, method='mle') lag_plot(df1['Thousands of Passengers']) lag_plot(df2['Births']) from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.statespace.tools import diff title = 'Autocorrelation: Daily Female Births' lags = 40 plot_acf(df2, title=title, lags=lags) title = 'Autocorrelation: Airline Passengers' lags = 40 plot_acf(df1, title=title, lags=lags) plt.interactive(False) plt.show()
def test_acovf_fft_vs_convolution(demean, unbiased): np.random.seed(1) q = np.random.normal(size=100) F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True) F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False) assert_almost_equal(F1, F2, decimal=7)
def test_acovf_fft_vs_convolution(demean, adjusted): np.random.seed(1) q = np.random.normal(size=100) F1 = acovf(q, demean=demean, adjusted=adjusted, fft=True) F2 = acovf(q, demean=demean, adjusted=adjusted, fft=False) assert_almost_equal(F1, F2, decimal=7)
def test_acovf2d(): dta = sunspots.load_pandas().data dta.index = DatetimeIndex(start='1700', end='2009', freq='A')[:309] del dta["YEAR"] res = acovf(dta) assert_equal(res, acovf(dta.values)) X = np.random.random((10,2)) assert_raises(ValueError, acovf, X)
def test_acovf_nlags_missing(acovf_data, unbiased, demean, fft, missing): acovf_data = acovf_data.copy() acovf_data[1:3] = np.nan full = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=missing) limited = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=missing, nlag=10) assert_allclose(full[:11], limited)
def test_acovf2d(): dta = sunspots.load_pandas().data dta.index = Index(dates_from_range('1700', '2008')) del dta["YEAR"] res = acovf(dta) assert_equal(res, acovf(dta.values)) X = np.random.random((10,2)) assert_raises(ValueError, acovf, X)
def test_acovf2d(): dta = sunspots.load_pandas().data dta.index = Index(dates_from_range('1700', '2008')) del dta["YEAR"] res = acovf(dta) assert_equal(res, acovf(dta.values)) X = np.random.random((10, 2)) assert_raises(ValueError, acovf, X)
def test_acovf_nlags_missing(acovf_data, adjusted, demean, fft, missing): acovf_data = acovf_data.copy() acovf_data[1:3] = np.nan full = acovf(acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing) limited = acovf(acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing, nlag=10) assert_allclose(full[:11], limited)
def test_acovf2d(): dta = sunspots.load_pandas().data dta.index = DatetimeIndex(start='1700', end='2009', freq='A') del dta["YEAR"] res = acovf(dta) assert_equal(res, acovf(dta.values)) X = np.random.random((10, 2)) assert_raises(ValueError, acovf, X)
def test_acovf2d(reset_randomstate): dta = sunspots.load_pandas().data dta.index = date_range(start='1700', end='2009', freq='A')[:309] del dta["YEAR"] res = acovf(dta, fft=False) assert_equal(res, acovf(dta.values, fft=False)) x = np.random.random((10, 2)) with pytest.raises(ValueError): acovf(x, fft=False)
def test_acovf_fft_vs_convolution(): np.random.seed(1) q = np.random.normal(size=100) for demean in [True, False]: for unbiased in [True, False]: F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True) F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False) assert_almost_equal(F1, F2, decimal=7)
def test_acovf_nlags(acovf_data, unbiased, demean, fft, missing): full = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=missing) limited = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=missing, nlag=10) assert_allclose(full[:11], limited)
def levinson_durbin_nitime(s, order=10, isacov=False): '''Levinson-Durbin recursion for autoregressive processes ''' #from nitime ## if sxx is not None and type(sxx) == np.ndarray: ## sxx_m = sxx[:order+1] ## else: ## sxx_m = ut.autocov(s)[:order+1] if isacov: sxx_m = s else: sxx_m = acovf(s)[:order+1] #not tested phi = np.zeros((order+1, order+1), 'd') sig = np.zeros(order+1) # initial points for the recursion phi[1,1] = sxx_m[1]/sxx_m[0] sig[1] = sxx_m[0] - phi[1,1]*sxx_m[1] for k in xrange(2,order+1): phi[k,k] = (sxx_m[k]-np.dot(phi[1:k,k-1], sxx_m[1:k][::-1]))/sig[k-1] for j in xrange(1,k): phi[j,k] = phi[j,k-1] - phi[k,k]*phi[k-j,k-1] sig[k] = sig[k-1]*(1 - phi[k,k]**2) sigma_v = sig[-1]; arcoefs = phi[1:,-1] return sigma_v, arcoefs, pacf, phi #return everything
def levinson_durbin_nitime(s, order=10, isacov=False): '''Levinson-Durbin recursion for autoregressive processes ''' #from nitime ## if sxx is not None and type(sxx) == np.ndarray: ## sxx_m = sxx[:order+1] ## else: ## sxx_m = ut.autocov(s)[:order+1] if isacov: sxx_m = s else: sxx_m = acovf(s)[:order + 1] #not tested phi = np.zeros((order + 1, order + 1), 'd') sig = np.zeros(order + 1) # initial points for the recursion phi[1, 1] = sxx_m[1] / sxx_m[0] sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1] for k in range(2, order + 1): phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k - 1], sxx_m[1:k][::-1])) / sig[k - 1] for j in range(1, k): phi[j, k] = phi[j, k - 1] - phi[k, k] * phi[k - j, k - 1] sig[k] = sig[k - 1] * (1 - phi[k, k]**2) sigma_v = sig[-1] arcoefs = phi[1:, -1] return sigma_v, arcoefs, pacf, phi #return everything
def DRAWAUTOCOV(x, llim): ac = acovf(x, unbiased=True, demean=True) lag = np.arange(len(ac)) fig, sub = plt.subplots() sub.set_title("Autocovariance", fontdict={"weight": "bold"}) sub.plot(lag, ac, ls="-", c="k") sub.set_xlabel("Time lag", fontdict={"weight": "bold"}) sub.set_xlim(min(lag), llim)
def test_multivariate_acovf(): _acovf = tools._compute_multivariate_acovf_from_coefficients # Test for a VAR(1) process. From Lutkepohl (2007), pages 27-28. # See (2.1.14) for Phi_1, (2.1.33) for Sigma_u, and (2.1.34) for Gamma_0 Sigma_u = np.array([[2.25, 0, 0], [0, 1.0, 0.5], [0, 0.5, 0.74]]) Phi_1 = np.array([[0.5, 0, 0], [0.1, 0.1, 0.3], [0, 0.2, 0.3]]) Gamma_0 = np.array([[3.0, 0.161, 0.019], [0.161, 1.172, 0.674], [0.019, 0.674, 0.954]]) assert_allclose(_acovf([Phi_1], Sigma_u)[0], Gamma_0, atol=1e-3) # Test for a VAR(2) process. From Lutkepohl (2007), pages 28-29 # See (2.1.40) for Phi_1, Phi_2, (2.1.14) for Sigma_u, and (2.1.42) for # Gamma_0, Gamma_1 Sigma_u = np.diag([0.09, 0.04]) Phi_1 = np.array([[0.5, 0.1], [0.4, 0.5]]) Phi_2 = np.array([[0, 0], [0.25, 0]]) Gamma_0 = np.array([[0.131, 0.066], [0.066, 0.181]]) Gamma_1 = np.array([[0.072, 0.051], [0.104, 0.143]]) Gamma_2 = np.array([[0.046, 0.040], [0.113, 0.108]]) Gamma_3 = np.array([[0.035, 0.031], [0.093, 0.083]]) assert_allclose( _acovf([Phi_1, Phi_2], Sigma_u, maxlag=0), [Gamma_0], atol=1e-3) assert_allclose( _acovf([Phi_1, Phi_2], Sigma_u, maxlag=1), [Gamma_0, Gamma_1], atol=1e-3) assert_allclose( _acovf([Phi_1, Phi_2], Sigma_u), [Gamma_0, Gamma_1], atol=1e-3) assert_allclose( _acovf([Phi_1, Phi_2], Sigma_u, maxlag=2), [Gamma_0, Gamma_1, Gamma_2], atol=1e-3) assert_allclose( _acovf([Phi_1, Phi_2], Sigma_u, maxlag=3), [Gamma_0, Gamma_1, Gamma_2, Gamma_3], atol=1e-3) # Test sample acovf in the univariate case against sm.tsa.acovf x = np.arange(20)*1.0 assert_allclose( np.squeeze(tools._compute_multivariate_sample_acovf(x, maxlag=4)), acovf(x, fft=False)[:5])
def find_G_D(data, bootstrap_type, M): kk = np.arange(-M, M + 1) acov = acovf(data)[:M + 1] R_k = np.r_[acov[1:][::-1], acov] Ghat = np.sum(lam(kk / float(M)) * np.abs(kk) * R_k) if bootstrap_type == 'Circular': Dhat = (4 / 3.) * np.sum(lam(kk / float(M)) * R_k)**2 else: Dhat = 2 * np.sum(lam(kk / float(M)) * R_k)**2 return Ghat, Dhat
def acf_df(dat, mode="correlation", nlags=40): """returns acf,aconv,pacf dataframe""" if mode == "covariance": vals = sts.acovf(dat) # nlag arg does not exist in v0.9.0 elif mode == "correlation": vals = sts.acf(dat, nlags=nlags) elif mode == "pacf": vals = sts.pacf(dat, nlags=nlags) else: raise Exception("wtf") return pd.DataFrame(np.array([np.array(range(len(vals))), vals]).T, columns=["lag", "values"])
def get_ar_coef(y, sn, p, add_lag, pad=None): if add_lag == 'p': max_lag = p * 2 else: max_lag = p + add_lag cov = acovf(y, fft=True) C_mat = toeplitz(cov[:max_lag], cov[:p]) - sn**2 * np.eye(max_lag, p) g = lstsq(C_mat, cov[1:max_lag + 1])[0] if pad: res = np.zeros(pad) res[:len(g)] = g return res else: return g
def test_brockwell_davis_example_511(): # Make the series stationary endog = dowj.diff().iloc[1:] # Should have 77 observations assert_equal(len(endog), 77) # Autocovariances desired = [0.17992, 0.07590, 0.04885] assert_allclose(acovf(endog, fft=True, nlag=2), desired, atol=1e-5) # Yule-Walker yw, _ = yule_walker(endog, ar_order=1, demean=True) assert_allclose(yw.ar_params, [0.4219], atol=1e-4) assert_allclose(yw.sigma2, 0.1479, atol=1e-4)
def stationarity_convergence(phi): m = 20 # burn in n = len(phi) - m if n <= 0: return 0 na = max(1,int(0.1*n)) nb = max(1,int(0.5*n)) phi_a = phi[(m+n-na):(m+n)] phi_b= phi[(m):(m+nb)] phi_b= phi[(m+n-nb):(m+n)] phi_b_bar = sum(phi_b)/nb phi_a_bar = sum(phi_a)/na if len(phi_a) <= 1 or len(phi_b) <= 1: return 1 v_a = acovf(phi_a) v_b = acovf(phi_b) # n gets large and na/n and nb/n stay fixed z_g = (phi_a_bar - phi_b_bar)/np.sqrt( v_a[0] + v_b[0] ) return z_g
def calc_acf(a_values: Dict[str, np.ndarray]) -> List[np.ndarray]: """ Calculate auto-correlation function (ACF) Args: a_values: A dict of adjacency matrix with neighbor atom id as keys and arrays of adjacent boolean (0/1) as values. Returns: A list of auto-correlation functions for each neighbor species. """ acfs = [] for atom_id, neighbors in a_values.items(): # atom_id_numeric = int(re.search(r"\d+", atom_id).group()) acfs.append(acovf(neighbors, demean=False, unbiased=True, fft=True)) return acfs
def compute_autocorrelation_parameters(record): ''' Compute Features which are derives from the autocorrelation function with K = 1 ''' record.cor = acovf(record.nn_ints, unbiased=True, fft=True, nlag=None) record.cor /= record.cor[0] record.r1 = record.cor[1] # Difference instead of first value? negatives = [neg[0] for neg in enumerate(record.cor) if neg[1] < 0] if len(negatives) > 0: record.m0 = negatives[0] else: record.m0 = 0 # Maybe not the best to set an 'undefined' value to 0. Nan?
def hidden_window_summary(self, axis, start, end): acf = stattools.acf(axis[start:end]) acv = stattools.acovf(axis[start:end]) sqd_error = (axis[start:end] - axis[start:end].mean())**2 return [ self.jitter(axis, start, end), self.mean_crossing_rate(axis, start, end), axis[start:end].mean(), axis[start:end].std(), axis[start:end].var(), axis[start:end].min(), axis[start:end].max(), acf.mean(), acf.std(), acv.mean(), acv.std(), skew(axis[start:end]), kurtosis(axis[start:end]), math.sqrt(sqd_error.mean()) ]
def window_summary(axis, start, end): acf = stattools.acf(axis[start:end]) acv = stattools.acovf(axis[start:end]) sqd_error = (axis[start:end] - axis[start:end].mean())**2 return [ axis[start:end].mean(), axis[start:end].std(), axis[start:end].var(), axis[start:end].min(), axis[start:end].max(), acf.mean(), # mean auto correlation acf.std(), # standard deviation auto correlation acv.mean(), # mean auto covariance acv.std(), # standard deviation auto covariance skew(axis[start:end]), kurtosis(axis[start:end]), math.sqrt(sqd_error.mean()) ]
def window_summary(axis, start, end): acf = stattools.acf(axis[start:end]) acv = stattools.acovf(axis[start:end]) sqd_error = (axis[start:end] - axis[start:end].mean())**2 return [ jitter(axis, start, end), #singal processing mean_crossing_rate( axis, start, end), # rate with signal crossing the mean value axis[start:end].mean(), axis[start:end].std(), axis[start:end].var(), axis[start:end].min(), axis[start:end].max(), acf.mean(), # mean auto correlation acf.std(), # standard deviation auto correlation acv.mean(), # mean auto covariance acv.std(), # standard deviation auto covariance skew(axis[start:end]), kurtosis(axis[start:end]), math.sqrt(sqd_error.mean()) ]
def window_summary(axis, start, end): ''' from https://github.com/theumairahmed/User-Identification-and-Classification-From-Walking-Activity/blob/master/Preprocessing.py ''' acf = stattools.acf(axis[start:end]) acv = stattools.acovf(axis[start:end]) sqd_error = (axis[start:end] - axis[start:end].mean())**2 return [ axis[start:end].mean(), axis[start:end].std(), axis[start:end].var(), axis[start:end].min(), axis[start:end].max(), acf.mean(), # mean auto correlation acf.std(), # standard deviation auto correlation acv.mean(), # mean auto covariance acv.std(), # standard deviation auto covariance skew(axis[start:end]), kurtosis(axis[start:end]), math.sqrt(sqd_error.mean()) ]
def approx_ar_cov_sum(x): params, sigma2, order = fit_ar(x) param_sum = params.sum() asympt_var = sigma2 / ((1 - param_sum) ** 2) gammas = acovf(x, nlag=order-1, fft=False) if (order != 0): Gamma = 0 for i in range(1, order+1): for k in range(1, i+1): Gamma = Gamma + params[i-1] * k * gammas[i-k] # Gamma is computed using the equation at the bottom of p. 9 in https://arxiv.org/pdf/1804.05975.pdf # See also https://stats.stackexchange.com/questions/371792/sum-of-autocovariances-for-arp-model/372006#372006 Gamma = 2 * ( Gamma + 0.5 * (asympt_var - gammas[0]) * (params * range(1, order + 1)).sum() ) / (1 - param_sum) else: Gamma = 0 return Gamma, asympt_var
def weak_wn_acf(returns, lags=40, alpha=0.95): n = len(returns) alpha = 0.95 gamma_0 = acovf(returns)[0] returns_sq = np.array(returns)**2 sigma_hat_p = ( (1 / n) * np.array([returns_sq[:n - h] @ returns_sq[h:] for h in range(1, n)]) / (gamma_0**2))**0.5 upper = sigma_hat_p * norm.ppf(alpha) / (n**0.5) upper = np.concatenate([[float('NaN')], upper]) lower = -sigma_hat_p * norm.ppf(alpha) / (n**0.5) lower = np.concatenate([[float('NaN')], lower]) plot_acf(returns, lags=lags, alpha=1 - alpha) plt.plot(upper[:lags + 1]) plt.plot(lower[:lags + 1]) plt.legend([ 'Strong WN CI', 'Auto Correlation', 'Weak WN CI Upper', 'Weak WN CI Lower' ]) plt.show()
kwargs.setdefault('linestyle', 'None') a, = self.plot(lags, c, **kwargs) b = None return lags, c, a, b arrvs = ar_generator() ##arma = ARIMA() ##res = arma.fit(arrvs[0], 4, 0) arma = ARIMA(arrvs[0]) res = arma.fit((4, 0, 0)) print(res[0]) acf1 = acf(arrvs[0]) acovf1b = acovf(arrvs[0], unbiased=False) acf2 = autocorr(arrvs[0]) acf2m = autocorr(arrvs[0] - arrvs[0].mean()) print(acf1[:10]) print(acovf1b[:10]) print(acf2[:10]) print(acf2m[:10]) x = arma_generate_sample([1.0, -0.8], [1.0], 500) print(acf(x)[:20]) import statsmodels.api as sm print(sm.regression.yule_walker(x, 10)) import matplotlib.pyplot as plt #ax = plt.axes() plt.plot(x)
def test_pandasacovf(): s = Series(lrange(1, 11)) assert_almost_equal(acovf(s), acovf(s.values))
def test_acovf_warns(acovf_data): with pytest.warns(FutureWarning): acovf(acovf_data)
arrvs = ar_generator() ##arma = ARIMA() ##res = arma.fit(arrvs[0], 4, 0) arma = ARIMA(arrvs[0]) res = arma.fit((4,0, 0)) print(res[0]) acf1 = acf(arrvs[0]) acovf1b = acovf(arrvs[0], unbiased=False) acf2 = autocorr(arrvs[0]) acf2m = autocorr(arrvs[0]-arrvs[0].mean()) print(acf1[:10]) print(acovf1b[:10]) print(acf2[:10]) print(acf2m[:10]) x = arma_generate_sample([1.0, -0.8], [1.0], 500) print(acf(x)[:20]) import statsmodels.api as sm print(sm.regression.yule_walker(x, 10)) import matplotlib.pyplot as plt #ax = plt.axes()
def durbin_levinson(endog, ar_order=0, demean=True, adjusted=False): """ Estimate AR parameters at multiple orders using Durbin-Levinson recursions. Parameters ---------- endog : array_like or SARIMAXSpecification Input time series array, assumed to be stationary. ar_order : int, optional Autoregressive order. Default is 0. demean : bool, optional Whether to estimate and remove the mean from the process prior to fitting the autoregressive coefficients. Default is True. adjusted : bool, optional Whether to use the "adjusted" autocovariance estimator, which uses n - h degrees of freedom rather than n. This option can result in a non-positive definite autocovariance matrix. Default is False. Returns ------- parameters : list of SARIMAXParams objects List elements correspond to estimates at different `ar_order`. For example, parameters[0] is an `SARIMAXParams` instance corresponding to `ar_order=0`. other_results : Bunch Includes one component, `spec`, containing the `SARIMAXSpecification` instance corresponding to the input arguments. Notes ----- The primary reference is [1]_, section 2.5.1. This procedure assumes that the series is stationary. References ---------- .. [1] Brockwell, Peter J., and Richard A. Davis. 2016. Introduction to Time Series and Forecasting. Springer. """ max_spec = SARIMAXSpecification(endog, ar_order=ar_order) endog = max_spec.endog # Make sure we have a consecutive process if not max_spec.is_ar_consecutive: raise ValueError('Durbin-Levinson estimation unavailable for models' ' with seasonal or otherwise non-consecutive AR' ' orders.') gamma = acovf(endog, adjusted=adjusted, fft=True, demean=demean, nlag=max_spec.ar_order) # If no AR component, just a variance computation if max_spec.ar_order == 0: ar_params = [None] sigma2 = [gamma[0]] # Otherwise, AR model else: Phi = np.zeros((max_spec.ar_order, max_spec.ar_order)) v = np.zeros(max_spec.ar_order + 1) Phi[0, 0] = gamma[1] / gamma[0] v[0] = gamma[0] v[1] = v[0] * (1 - Phi[0, 0]**2) for i in range(1, max_spec.ar_order): tmp = Phi[i - 1, :i] Phi[i, i] = (gamma[i + 1] - np.dot(tmp, gamma[i:0:-1])) / v[i] Phi[i, :i] = (tmp - Phi[i, i] * tmp[::-1]) v[i + 1] = v[i] * (1 - Phi[i, i]**2) ar_params = [None] + [Phi[i, :i + 1] for i in range(max_spec.ar_order)] sigma2 = v # Compute output out = [] for i in range(max_spec.ar_order + 1): spec = SARIMAXSpecification(ar_order=i) p = SARIMAXParams(spec=spec) if i == 0: p.params = sigma2[i] else: p.params = np.r_[ar_params[i], sigma2[i]] out.append(p) # Construct other results other_results = Bunch({ 'spec': spec, }) return out, other_results
def innovations(endog, ma_order=0, demean=True): """ Estimate MA parameters using innovations algorithm. Parameters ---------- endog : array_like or SARIMAXSpecification Input time series array, assumed to be stationary. ma_order : int, optional Maximum moving average order. Default is 0. demean : bool, optional Whether to estimate and remove the mean from the process prior to fitting the moving average coefficients. Default is True. Returns ------- parameters : list of SARIMAXParams objects List elements correspond to estimates at different `ma_order`. For example, parameters[0] is an `SARIMAXParams` instance corresponding to `ma_order=0`. other_results : Bunch Includes one component, `spec`, containing the `SARIMAXSpecification` instance corresponding to the input arguments. Notes ----- The primary reference is [1]_, section 5.1.3. This procedure assumes that the series is stationary. References ---------- .. [1] Brockwell, Peter J., and Richard A. Davis. 2016. Introduction to Time Series and Forecasting. Springer. """ spec = max_spec = SARIMAXSpecification(endog, ma_order=ma_order) endog = max_spec.endog if demean: endog = endog - endog.mean() if not max_spec.is_ma_consecutive: raise ValueError('Innovations estimation unavailable for models with' ' seasonal or otherwise non-consecutive MA orders.') sample_acovf = acovf(endog, fft=True) theta, v = innovations_algo(sample_acovf, nobs=max_spec.ma_order + 1) ma_params = [theta[i, :i] for i in range(1, max_spec.ma_order + 1)] sigma2 = v out = [] for i in range(max_spec.ma_order + 1): spec = SARIMAXSpecification(ma_order=i) p = SARIMAXParams(spec=spec) if i == 0: p.params = sigma2[i] else: p.params = np.r_[ma_params[i - 1], sigma2[i]] out.append(p) # Construct other results other_results = Bunch({ 'spec': spec, }) return out, other_results
B[:,:,1] = [[0,0],[0,0],[0,1]] xhat5, err5 = VARMA(x,B,C) #print(err5) #in differences #VARMA(np.diff(x,axis=0),B,C) #Note: # * signal correlate applies same filter to all columns if kernel.shape[1]<K # e.g. signal.correlate(x0,np.ones((3,1)),'valid') # * if kernel.shape[1]==K, then `valid` produces a single column # -> possible to run signal.correlate K times with different filters, # see the following example, which replicates VAR filter x0 = np.column_stack([np.arange(T), 2*np.arange(T)]) B[:,:,0] = np.ones((P,K)) B[:,:,1] = np.ones((P,K)) B[1,1,1] = 0 xhat0 = VAR(x0,B) xcorr00 = signal.correlate(x0,B[:,:,0])#[:,0] xcorr01 = signal.correlate(x0,B[:,:,1]) print(np.all(signal.correlate(x0,B[:,:,0],'valid')[:-1,0]==xhat0[P:,0])) print(np.all(signal.correlate(x0,B[:,:,1],'valid')[:-1,0]==xhat0[P:,1])) #import error #from movstat import acovf, acf from statsmodels.tsa.stattools import acovf, acf aav = acovf(x[:,0]) print(aav[0] == np.var(x[:,0])) aac = acf(x[:,0])
def test_acovf_error(acovf_data): with pytest.raises(ValueError): acovf(acovf_data, nlag=250, fft=False)
plt.plot(wm,sdm/sdm[0], '-', wm[maxind], sdm[maxind]/sdm[0], 'o') else: plt.plot(wm, sdm, '-', wm[maxind], sdm[maxind], 'o') plt.title('matplotlib') if hastalkbox: sdp, wp = stbs.periodogram(x) plt.subplot(2,3,3) if rescale: plt.plot(wp,sdp/sdp[0]) else: plt.plot(wp, sdp) plt.title('stbs.periodogram') xacov = acovf(x, unbiased=False) plt.subplot(2,3,4) plt.plot(xacov) plt.title('autocovariance') nr = len(x)#*2/3 #xacovfft = np.fft.fft(xacov[:nr], 2*nr-1) xacovfft = np.fft.fft(np.correlate(x,x,'full')) #abs(xacovfft)**2 or equivalently xacovfft = xacovfft * xacovfft.conj() plt.subplot(2,3,5) if rescale: plt.plot(xacovfft[:nr]/xacovfft[0]) else: plt.plot(xacovfft[:nr])
print "T = %i s, t = %3.2f ms"%(T,1000.*newInt) #try: bins, lc = binLightCurve(timeSpan[0],timeSpan[0]+T, times, newInt) lcAve = np.mean(lc) print "<I> = %4.2f"%lcAve lcVar = np.power(np.std(lc),2) print "sigma(I)^2 = %3.3f"%lcVar lcNorm = lc/lcAve #plot timestream with T total seconds binned into t=(n+1)*intTime integration time bins ax1.plot(bins,np.append(lcNorm,lcNorm[-1]),drawstyle='steps-post', label="%i s"%(T)) print "Calculating auto-covariance sequence..." #acvs = acovf(lc,unbiased=True,demean=True) #* 1.0/(lcVar-lcAve) acvs = acovf(lc,unbiased=False,demean=False) #corr,ljb,pvalue = acf(lc,unbiased=True,qstat=True,nlags = T/newInt) corr,ljb,pvalue = acf(lc,unbiased=False,qstat=True,nlags = T/newInt) standalone_ljb, standalone_pvalue = acorr_ljungbox(lc) print "Min(p-value) of acf Ljung-Box test = %f"%np.min(pvalue) try: print "Min(p) of acf LB at index %i of %i"%(np.where(pvalue==np.min(pvalue))[0],len(pvalue)) mostCorrLag = np.where(pvalue==np.min(pvalue))[0] * newInt*1000 print "Min(p) of acf LB at lag = %4.3f ms"%mostCorrLag except TypeError: print "Min(p) of acf LB at index %i of %i"%(np.where(pvalue==np.min(pvalue))[0][0],len(pvalue)) mostCorrLag = np.where(pvalue==np.min(pvalue))[0][0] * newInt*1000
def test_pandasacovf(): s = Series(range(1, 11)) assert_almost_equal(acovf(s), acovf(s.values))
def test_pandasacovf(): s = Series(lrange(1, 11)) assert_almost_equal(acovf(s, fft=False), acovf(s.values, fft=False))
plt.figure(1) plt.clf() maxm = 15 # 太陽黒点数 with open('sunspot.txt', encoding='utf-8') as f: data = np.array([float(k) for k in f.readlines()]) # データ数 N = len(data) # 対数値に変換 #data = np.log10(data) # 平均を引く data = data - np.mean(data) # 自己共分散関数 acovf = stattools.acovf(data) # acovf = acovf * (N - 1) / N # 連立方程式を直接解く方法 """ print("Yule-Walker") mar, arc_min, sig2_min, AIC_min = yule_walker(N, acovf, maxm) print('Best model: m=', mar) # スペクトル t, logp1 = calc_spectrum(200, arc_min, sig2_min) """ # Levinson's algorithm print() print("Levinson method") mar, arc_min, sig2_min, AIC_min = ar.levinson(acovf, N, maxm)