def test_autocorr(): N = 128 ar_seq, _, _ = utils.ar_generator(N=N) rxx = utils.autocorr(ar_seq) npt.assert_(rxx[0] == rxx.max(), "Zero lag autocorrelation is not maximum autocorrelation") rxx = utils.autocorr(ar_seq, all_lags=True) npt.assert_(rxx[127] == rxx.max(), "Zero lag autocorrelation is not maximum autocorrelation")
def test_autocorr(): N = 128 ar_seq, _, _ = utils.ar_generator(N=N) rxx = utils.autocorr(ar_seq) yield nt.assert_true, rxx[0] == 1, "Zero lag autocorrelation is not equal to 1" rxx = utils.autocorr(ar_seq, all_lags=True) yield nt.assert_true, rxx[127] == 1, "Zero lag autocorrelation is not equal to 1"
def test_autocorr(): N = 128 ar_seq, _, _ = utils.ar_generator(N=N) rxx = utils.autocorr(ar_seq) nt.assert_true(rxx[0] == rxx.max(), \ 'Zero lag autocorrelation is not maximum autocorrelation') rxx = utils.autocorr(ar_seq, all_lags=True) nt.assert_true(rxx[127] == rxx.max(), \ 'Zero lag autocorrelation is not maximum autocorrelation')
def ar_nitime(x, order=1, center=False): """Derive a model of the noise present in the functional timeseries for the calculation of the standardized DVARS. - Borrowed from nipy.algorithms.AR_est_YW. aka "from nitime import algorithms as alg". :type x: Nibabel data :param x: The vector of one voxel's timeseries. :type order: int :param order: (default: 1) Which lag of the autocorrelation of the timeseries to use in the calculation. :type center: bool :param center: (default: False) Whether to center the timeseries (to demean it). :rtype: float :return: The modeled noise value for the current voxel's timeseries. """ from nitime.lazy import scipy_linalg as linalg import nitime.utils as utils if center: x = x.copy() x = x - x.mean() r_m = utils.autocorr(x)[:order + 1] Tm = linalg.toeplitz(r_m[:order]) y = r_m[1:] ak = linalg.solve(Tm, y) return ak[0]
def AR_est_YW(x, order, rxx=None): r"""Determine the autoregressive (AR) model of a random process x using the Yule Walker equations. The AR model takes this convention: .. math:: x(n) = a(1)x(n-1) + a(2)x(n-2) + \dots + a(p)x(n-p) + e(n) where e(n) is a zero-mean white noise process with variance sig_sq, and p is the order of the AR model. This method returns the a_i and sigma The orthogonality property of minimum mean square error estimates states that .. math:: E\{e(n)x^{*}(n-k)\} = 0 \quad 1\leq k\leq p Inserting the definition of the error signal into the equations above yields the Yule Walker system of equations: .. math:: R_{xx}(k) = \sum_{i=1}^{p}a(i)R_{xx}(k-i) \quad1\leq k\leq p Similarly, the variance of the error process is .. math:: E\{e(n)e^{*}(n)\} = E\{e(n)x^{*}(n)\} = R_{xx}(0)-\sum_{i=1}^{p}a(i)R^{*}(i) Parameters ---------- x : ndarray The sampled autoregressive random process order : int The order p of the AR system rxx : ndarray (optional) An optional, possibly unbiased estimate of the autocorrelation of x Returns ------- ak, sig_sq: The estimated AR coefficients and innovations variance """ if rxx is not None and type(rxx) == np.ndarray: r_m = rxx[: order + 1] else: r_m = utils.autocorr(x)[: order + 1] Tm = linalg.toeplitz(r_m[:order]) y = r_m[1:] ak = linalg.solve(Tm, y) sigma_v = r_m[0].real - np.dot(r_m[1:].conj(), ak).real return ak, sigma_v
def AR_est_YW(x, order, rxx=None): r"""Determine the autoregressive (AR) model of a random process x using the Yule Walker equations. The AR model takes this convention: .. math:: x(n) = a(1)x(n-1) + a(2)x(n-2) + \dots + a(p)x(n-p) + e(n) where e(n) is a zero-mean white noise process with variance sig_sq, and p is the order of the AR model. This method returns the a_i and sigma The orthogonality property of minimum mean square error estimates states that .. math:: E\{e(n)x^{*}(n-k)\} = 0 \quad 1\leq k\leq p Inserting the definition of the error signal into the equations above yields the Yule Walker system of equations: .. math:: R_{xx}(k) = \sum_{i=1}^{p}a(i)R_{xx}(k-i) \quad1\leq k\leq p Similarly, the variance of the error process is .. math:: E\{e(n)e^{*}(n)\} = E\{e(n)x^{*}(n)\} = R_{xx}(0)-\sum_{i=1}^{p}a(i)R^{*}(i) Parameters ---------- x : ndarray The sampled autoregressive random process order : int The order p of the AR system rxx : ndarray (optional) An optional, possibly unbiased estimate of the autocorrelation of x Returns ------- ak, sig_sq : The estimated AR coefficients and innovations variance """ if rxx is not None and type(rxx) == np.ndarray: r_m = rxx[:order + 1] else: r_m = utils.autocorr(x)[:order + 1] Tm = linalg.toeplitz(r_m[:order]) y = r_m[1:] ak = linalg.solve(Tm, y) sigma_v = r_m[0].real - np.dot(r_m[1:].conj(), ak).real return ak, sigma_v
def AR_est_LD(x, order, rxx=None): """Levinson-Durbin algorithm for solving the Hermitian Toeplitz system R[m]w[m]=r[m+1]: (XXX review this definition for complex) [[r(0) r(1) r(2) ... r(m-1)], R[m] = [r*(1) r(0) r(1) ... r(m-2)], [... ], [r*(m-1) r*(m-2) r*(m-3)... r(0) ]] r[m+1] = [r(1), r(2), ..., r(m)].T r(k) = E{X(t+k)X*(t)} and w[m] is the vector of m AR coefficients Parameters ---------- x: ndarray the zero-mean stochastic process order : int the AR model order--IE the rank of the system. rxx : ndarray, optional (at least) order+1 samples of the autocorrelation sequence Returns ------- ak, sig_sq The AR coefficients for 1 <= k <= P, and the variance of the driving white noise process """ if rxx is not None and type(rxx) == np.ndarray: rxx_m = rxx[:order + 1] else: rxx_m = utils.autocorr(x)[:order + 1] w = np.zeros((order+1,), rxx_m.dtype) # intialize the recursion with the R[0]w[1]=r[1] solution (p=1) b = rxx_m[0].real w_k = rxx_m[1]/b w[1] = w_k p = 2 while p<=order: b *= (1-(w_k*w_k.conj()).real) w_k = (rxx_m[p] - (w[1:p]*rxx_m[1:p][::-1]).sum())/b # update w_k from k=1,2,...,p-1 # with a correction from w*_i i=p-1,p-2,...,1 w[1:p] = w[1:p] - w_k*(w[1:p][::-1].conj()) w[p] = w_k p += 1 b *= (1 - (w_k*w_k.conj()).real) return w[1:], b
def ar_nitime(x, order=1, center=False): """ Borrowed from nipy.algorithms.AR_est_YW. aka from nitime import algorithms as alg. We could speed this up by having the autocorr only compute lag1. """ from nitime.lazy import scipy_linalg as linalg import nitime.utils as utils if center: x = x.copy() x = x - x.mean() r_m = utils.autocorr(x)[:order + 1] Tm = linalg.toeplitz(r_m[:order]) y = r_m[1:] ak = linalg.solve(Tm, y) return ak[0]
def AR_est_YW(x, order, rxx=None): r"""Determine the autoregressive (AR) model of a random process x using the Yule Walker equations. The AR model takes this convention: x(n) = a(1)x(n-1) + a(2)x(n-2) + ... + a(P)x(n-P) + e(n) where e(n) is a zero-mean white noise process with variance sig_sq, and P is the order of the AR model. This method returns the a_i and sigma The orthogonality property of minimum mean square error estimates yields the Yule Walker equations. Parameters ---------- x : ndarray The sampled autoregressive random process order : int The order P of the AR system rxx : ndarray (optional) An optional, possibly unbiased estimate of the autocorrelation of x Returns ------- ak, sig_sq: The estimated AR coefficients and innovations variance """ if rxx is not None and type(rxx) == np.ndarray: r_m = rxx[:order + 1] else: r_m = utils.autocorr(x)[:order + 1] Tm = linalg.toeplitz(r_m[:order]) y = r_m[1:] ak = linalg.solve(Tm, y) sigma_v = r_m[0].real - np.dot(r_m[1:].conj(), ak).real return ak, sigma_v
def AR_est_LD(x, order, rxx=None): r"""Levinson-Durbin algorithm for solving the Hermitian Toeplitz system of Yule-Walker equations in the AR estimation problem .. math:: T^{(p)}a^{(p)} = \gamma^{(p+1)} where .. math:: :nowrap: \begin{align*} T^{(p)} &= \begin{pmatrix} R_{0} & R_{1}^{*} & \cdots & R_{p-1}^{*}\\ R_{1} & R_{0} & \cdots & R_{p-2}^{*}\\ \vdots & \vdots & \ddots & \vdots\\ R_{p-1}^{*} & R_{p-2}^{*} & \cdots & R_{0} \end{pmatrix}\\ a^{(p)} &=\begin{pmatrix} a_1 & a_2 & \cdots a_p \end{pmatrix}^{T}\\ \gamma^{(p+1)}&=\begin{pmatrix}R_1 & R_2 & \cdots & R_p \end{pmatrix}^{T} \end{align*} and :math:`R_k` is the autocorrelation of the kth lag Parameters ---------- x: ndarray the zero-mean stochastic process order : int the AR model order--IE the rank of the system. rxx : ndarray, optional (at least) order+1 samples of the autocorrelation sequence Returns ------- ak, sig_sq The AR coefficients for 1 <= k <= p, and the variance of the driving white noise process """ if rxx is not None and type(rxx) == np.ndarray: rxx_m = rxx[: order + 1] else: rxx_m = utils.autocorr(x)[: order + 1] w = np.zeros((order + 1,), rxx_m.dtype) # intialize the recursion with the R[0]w[1]=r[1] solution (p=1) b = rxx_m[0].real w_k = rxx_m[1] / b w[1] = w_k p = 2 while p <= order: b *= 1 - (w_k * w_k.conj()).real w_k = (rxx_m[p] - (w[1:p] * rxx_m[1:p][::-1]).sum()) / b # update w_k from k=1,2,...,p-1 # with a correction from w*_i i=p-1,p-2,...,1 w[1:p] = w[1:p] - w_k * w[1:p][::-1].conj() w[p] = w_k p += 1 b *= 1 - (w_k * w_k.conj()).real return w[1:], b
def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'): """ Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. Paramters --------- N : int sequence length NW : float, unitless standardized half bandwidth corresponding to 2NW = BW*f0 = BW*N/dt but with dt taken as 1 Kmax : int number of DPSS windows to return is Kmax (orders 0 through Kmax-1) interp_from: int (optional) The dpss will can calculated using interpolation from a set of dpss with the same NW and Kmax, but shorter N. This is the length of this shorter set of dpss windows. interp_kind: str (optional) This input variable is passed to scipy.interpolate.interp1d and specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the order of the spline interpolator to use. Returns ------- v, e : tuple, v is an array of DPSS windows shaped (Kmax, N) e are the eigenvalues Notes ----- Tridiagonal form of DPSS calculation from: Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and uncertainty V: The discrete case. Bell System Technical Journal, Volume 57 (1978), 1371430 """ Kmax = int(Kmax) W = float(NW) / N nidx = np.arange(N, dtype='d') # In this case, we create the dpss windows of the smaller size # (interp_from) and then interpolate to the larger size (N) if interp_from is not None: if interp_from > N: e_s = 'In dpss_windows, interp_from is: %s ' % interp_from e_s += 'and N is: %s. ' % N e_s += 'Please enter interp_from smaller than N.' raise ValueError(e_s) dpss = [] d, e = dpss_windows(interp_from, NW, Kmax) for this_d in d: x = np.arange(this_d.shape[-1]) I = interpolate.interp1d(x, this_d, kind=interp_kind) d_temp = I(np.arange(0, this_d.shape[-1] - 1, float(this_d.shape[-1] - 1) / N)) # Rescale: d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2)) dpss.append(d_temp) dpss = np.array(dpss) else: # here we want to set up an optimization problem to find a sequence # whose energy is maximally concentrated within band [-W,W]. # Thus, the measure lambda(T,W) is the ratio between the energy within # that band, and the total energy. This leads to the eigen-system # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest # eigenvalue is the sequence with maximally concentrated energy. The # collection of eigenvectors of this system are called Slepian # sequences, or discrete prolate spheroidal sequences (DPSS). Only the # first K, K = 2NW/dt orders of DPSS will exhibit good spectral # concentration # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem] # Here I set up an alternative symmetric tri-diagonal eigenvalue # problem such that # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1] # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1] # [see Percival and Walden, 1993] diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W) off_diag = np.zeros_like(nidx) off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2. # put the diagonals in LAPACK "packed" storage ab = np.zeros((2, N), 'd') ab[1] = diagonal ab[0, 1:] = off_diag[:-1] # only calculate the highest Kmax eigenvalues w = linalg.eigvals_banded(ab, select='i', select_range=(N - Kmax, N - 1)) w = w[::-1] # find the corresponding eigenvectors via inverse iteration t = np.linspace(0, np.pi, N) dpss = np.zeros((Kmax, N), 'd') for k in xrange(Kmax): dpss[k] = utils.tridi_inverse_iteration( diagonal, off_diag, w[k], x0=np.sin((k + 1) * t) ) # By convention (Percival and Walden, 1993 pg 379) # * symmetric tapers (k=0,2,4,...) should have a positive average. # * antisymmetric tapers should begin with a positive lobe fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2 * i] *= -1 fix_skew = (dpss[1::2, 1] < 0) for i, f in enumerate(fix_skew): if f: dpss[2 * i + 1] *= -1 # Now find the eigenvalues of the original spectral concentration problem # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390 dpss_rxx = utils.autocorr(dpss) * N r = 4 * W * np.sinc(2 * W * nidx) r[0] = 2 * W eigvals = np.dot(dpss_rxx, r) return dpss, eigvals
def AR_est_LD(x, order, rxx=None): r"""Levinson-Durbin algorithm for solving the Hermitian Toeplitz system of Yule-Walker equations in the AR estimation problem .. math:: T^{(p)}a^{(p)} = \gamma^{(p+1)} where .. math:: :nowrap: \begin{align*} T^{(p)} &= \begin{pmatrix} R_{0} & R_{1}^{*} & \cdots & R_{p-1}^{*}\\ R_{1} & R_{0} & \cdots & R_{p-2}^{*}\\ \vdots & \vdots & \ddots & \vdots\\ R_{p-1}^{*} & R_{p-2}^{*} & \cdots & R_{0} \end{pmatrix}\\ a^{(p)} &=\begin{pmatrix} a_1 & a_2 & \cdots a_p \end{pmatrix}^{T}\\ \gamma^{(p+1)}&=\begin{pmatrix}R_1 & R_2 & \cdots & R_p \end{pmatrix}^{T} \end{align*} and :math:`R_k` is the autocorrelation of the kth lag Parameters ---------- x : ndarray the zero-mean stochastic process order : int the AR model order--IE the rank of the system. rxx : ndarray, optional (at least) order+1 samples of the autocorrelation sequence Returns ------- ak, sig_sq The AR coefficients for 1 <= k <= p, and the variance of the driving white noise process """ if rxx is not None and type(rxx) == np.ndarray: rxx_m = rxx[:order + 1] else: rxx_m = utils.autocorr(x)[:order + 1] w = np.zeros((order + 1, ), rxx_m.dtype) # initialize the recursion with the R[0]w[1]=r[1] solution (p=1) b = rxx_m[0].real w_k = rxx_m[1] / b w[1] = w_k p = 2 while p <= order: b *= 1 - (w_k * w_k.conj()).real w_k = (rxx_m[p] - (w[1:p] * rxx_m[1:p][::-1]).sum()) / b # update w_k from k=1,2,...,p-1 # with a correction from w*_i i=p-1,p-2,...,1 w[1:p] = w[1:p] - w_k * w[1:p][::-1].conj() w[p] = w_k p += 1 b *= 1 - (w_k * w_k.conj()).real return w[1:], b