示例#1
0
    def _fit_start_params_hr(self, order):
        """
        Get starting parameters for fit.

        Parameters
        ----------
        order : iterable
            (p,q,k) - AR lags, MA lags, and number of exogenous variables
            including the constant.

        Returns
        -------
        start_params : array
            A first guess at the starting parameters.

        Notes
        -----
        If necessary, fits an AR process with the laglength selected according
        to best BIC.  Obtain the residuals.  Then fit an ARMA(p,q) model via
        OLS using these residuals for a first approximation.  Uses a separate
        OLS regression to find the coefficients of exogenous variables.

        References
        ----------
        Hannan, E.J. and Rissanen, J.  1982.  "Recursive estimation of mixed
            autoregressive-moving average order."  `Biometrika`.  69.1.
        """
        p,q,k = order
        start_params = zeros((p+q+k))
        endog = self.endog.copy() # copy because overwritten
        exog = self.exog
        if k != 0:
            ols_params = GLS(endog, exog).fit().params
            start_params[:k] = ols_params
            endog -= np.dot(exog, ols_params).squeeze()
        if q != 0:
            if p != 0:
                armod = AR(endog).fit(ic='bic', trend='nc')
                arcoefs_tmp = armod.params
                p_tmp = armod.k_ar
                resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
                                trim='both'), arcoefs_tmp)
                if p < p_tmp + q:
                    endog_start = p_tmp + q - p
                    resid_start = 0
                else:
                    endog_start = 0
                    resid_start = p - p_tmp - q
                lag_endog = lagmat(endog, p, 'both')[endog_start:]
                lag_resid = lagmat(resid, q, 'both')[resid_start:]
                # stack ar lags and resids
                X = np.column_stack((lag_endog, lag_resid))
                coefs = GLS(endog[max(p_tmp+q,p):], X).fit().params
                start_params[k:k+p+q] = coefs
            else:
                start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
        if q==0 and p != 0:
            arcoefs = yule_walker(endog, order=p)[0]
            start_params[k:k+p] = arcoefs
        return start_params
示例#2
0
    def pred_density(self, phi, sigma, y=None, h=8, impact_mat=None):
        """Simulates predictive density..."""

        if y is None:
            #xest, yest = self.xest, self.yest
            xest = lagmat(self.data, maxlag=self._p, trim='backward')
            xest = add_constant(xest, prepend=False)
            xest = xest[-self._p]
        else:
            xest, yest = lagmat(y, maxlag=self._p, trim="both", original="sep")
            if self._cons is True:
                xest = add_constant(xest, prepend=False)

        x = xest

        nx = x.shape[0]
        y = np.zeros((h, self._ny))

        if impact_mat == None:
            impact_mat = np.ones_like(y)

        impact_mat = np.squeeze(impact_mat)

        for i in range(0, h):
            y[i, :] = x.dot(
                phi) + impact_mat[i] * np.random.multivariate_normal(
                    np.zeros(self._ny), sigma)
            if self._p > 0:
                x[:(self._ny * (self._p - 1))] = x[(self._ny):(nx -
                                                               self._cons)]
                x[(self._ny * (self._p - 1)):-1] = y[i, :]
            else:
                x = y[i, :]
        return y
示例#3
0
    def _fit_start_params_hr(self, order):
        """
        Get starting parameters for fit.

        Parameters
        ----------
        order : iterable
            (p,q,k) - AR lags, MA lags, and number of exogenous variables
            including the constant.

        Returns
        -------
        start_params : array
            A first guess at the starting parameters.

        Notes
        -----
        If necessary, fits an AR process with the laglength selected according
        to best BIC.  Obtain the residuals.  Then fit an ARMA(p,q) model via
        OLS using these residuals for a first approximation.  Uses a separate
        OLS regression to find the coefficients of exogenous variables.

        References
        ----------
        Hannan, E.J. and Rissanen, J.  1982.  "Recursive estimation of mixed
            autoregressive-moving average order."  `Biometrika`.  69.1.
        """
        p, q, k = order
        start_params = zeros((p + q + k))
        endog = self.endog.copy()  # copy because overwritten
        exog = self.exog
        if k != 0:
            ols_params = GLS(endog, exog).fit().params
            start_params[:k] = ols_params
            endog -= np.dot(exog, ols_params).squeeze()
        if q != 0:
            if p != 0:
                armod = AR(endog).fit(ic='bic', trend='nc')
                arcoefs_tmp = armod.params
                p_tmp = armod.k_ar
                resid = endog[p_tmp:] - np.dot(
                    lagmat(endog, p_tmp, trim='both'), arcoefs_tmp)
                if p < p_tmp + q:
                    endog_start = p_tmp + q - p
                    resid_start = 0
                else:
                    endog_start = 0
                    resid_start = p - p_tmp - q
                lag_endog = lagmat(endog, p, 'both')[endog_start:]
                lag_resid = lagmat(resid, q, 'both')[resid_start:]
                # stack ar lags and resids
                X = np.column_stack((lag_endog, lag_resid))
                coefs = GLS(endog[max(p_tmp + q, p):], X).fit().params
                start_params[k:k + p + q] = coefs
            else:
                start_params[k + p:k + p + q] = yule_walker(endog, order=q)[0]
        if q == 0 and p != 0:
            arcoefs = yule_walker(endog, order=p)[0]
            start_params[k:k + p] = arcoefs
        return start_params
示例#4
0
    def _process_inputs(self, X, E=None, lengths=None):
        if self.n_features == 1:
            lagged = None
            if lengths is None:
                lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
                                original='ex')
            else:
                lagged = np.zeros((len(X), self.n_lags))
                for i, j in iter_from_X_lengths(X, lengths):
                    lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
                                            trim='forward', original='ex')

            return {'obs': X.reshape(-1,1),
                    'lagged': lagged.reshape(-1, self.n_features, self.n_lags)}
        else:
            lagged = None
            lagged = np.zeros((X.shape[0], self.n_features, self.n_lags))
            if lengths is None:
                tem = lagmat(X, maxlag=self.n_lags, trim='forward',
                             original='ex')
                for sample in range(X.shape[0]):
                    lagged[sample] = np.reshape\
                    (tem[sample], (self.n_features, self.n_lags), 'F')

            else:
                for i, j in iter_from_X_lengths(X, lengths):
                    lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
                                            trim='forward', original='ex')
                    lagged.reshape(-1, self.n_featurs, self.n_lags)

            return {'obs': X, 'lagged': lagged}
示例#5
0
def create_dataset(dataset, timestep=1, look_back=1, look_ahead=1):
    dataX = lagmat(dataset, maxlag=look_back, trim='both', original='ex')
    dataY = lagmat(dataset[look_back:],
                   maxlag=look_ahead,
                   trim='backward',
                   original='ex')
    dataX = dataX.reshape(-1, timestep, dataX.shape[1])[:-(look_ahead - 1)]

    return np.array(dataX), np.array(dataY[:-(look_ahead - 1)])
def maximum_likelihood_estimation(ts, lags):
    """
    Obtain the cointegrating vectors and corresponding eigenvalues
    """
    # Make sure we are working with array, convert if necessary
    ts = np.asarray(ts)

    # Calculate the differences of ts
    ts_diff = np.diff(ts, axis=0)

    # Calculate lags of ts_diff.
    ts_diff_lags = lagmat(ts_diff, lags, trim='both')

    # First lag of ts
    ts_lag = lagmat(ts, 1, trim='both')

    # Trim ts_diff and ts_lag
    ts_diff = ts_diff[lags:]
    ts_lag = ts_lag[lags:]

    # Include intercept in the regressions
    ones = np.ones((ts_diff_lags.shape[0], 1))
    ts_diff_lags = np.append(ts_diff_lags, ones, axis=1)

    # Calculate the residuals of the regressions of diffs and lags
    # into ts_diff_lags
    inverse = np.linalg.pinv(ts_diff_lags)
    u = ts_diff - np.dot(ts_diff_lags, np.dot(inverse, ts_diff))
    v = ts_lag - np.dot(ts_diff_lags, np.dot(inverse, ts_lag))

    # Covariance matrices of the calculated residuals
    t = ts_diff_lags.shape[0]
    Svv = np.dot(v.T, v) / t
    Suu = np.dot(u.T, u) / t
    Suv = np.dot(u.T, v) / t
    Svu = Suv.T

    # ToDo: check for singular matrices and exit
    Svv_inv = np.linalg.inv(Svv)
    Suu_inv = np.linalg.inv(Suu)

    # Calculate eigenvalues and eigenvectors of the product of covariances
    cov_prod = np.dot(Svv_inv, np.dot(Svu, np.dot(Suu_inv, Suv)))
    eigenvalues, eigenvectors = np.linalg.eig(cov_prod)

    # Use Cholesky decomposition on eigenvectors
    evec_Svv_evec = np.dot(eigenvectors.T, np.dot(Svv, eigenvectors))
    cholesky_factor = np.linalg.cholesky(evec_Svv_evec)
    eigenvectors = np.dot(eigenvectors, np.linalg.inv(cholesky_factor.T))

    # Order the eigenvalues and eigenvectors
    indices_ordered = np.argsort(eigenvalues)
    indices_ordered = np.flipud(indices_ordered)

    # Return the calculated values
    return eigenvalues[indices_ordered], eigenvectors[:, indices_ordered]
def fit_discrete_state_transition(speed,
                                  is_replay,
                                  penalty=1E-5,
                                  speed_knots=None,
                                  diagonal=None):
    """Estimate the predicted probablity of replay given speed and whether
    it was a replay in the previous time step.

    p(I_t | I_t-1, v_t-1)

    p_I_0, p_I_1 in Long Tao's code

    Parameters
    ----------
    speed : ndarray, shape (n_time,)
    is_replay : boolean ndarray, shape (n_time,)
    speed_knots : ndarray, shape (n_knots,)

    Returns
    -------
    probability_replay : ndarray, shape (n_time, 2)

    """
    data = pd.DataFrame({
        'is_replay':
        is_replay.astype(np.float64),
        'lagged_is_replay':
        lagmat(is_replay, maxlag=1).astype(np.float64).squeeze(),
        'lagged_speed':
        lagmat(speed, maxlag=1).squeeze()
    }).dropna()

    if speed_knots is None:
        speed_mid_point = np.nanmedian(speed[speed > 10])
        speed_knots = [1., 2., 3., speed_mid_point]

    MODEL_FORMULA = (
        'is_replay ~ 1 + lagged_is_replay + '
        'cr(lagged_speed, knots=speed_knots, constraints="center")')
    response, design_matrix = dmatrices(MODEL_FORMULA, data)
    penalty = np.ones((design_matrix.shape[1], )) * penalty
    penalty[0] = 0.0
    fit = penalized_IRLS(design_matrix,
                         response,
                         family=FAMILY,
                         penalty=penalty)
    if np.isnan(fit.AIC):
        logger.error("Discrete state transition failed to fit properly. "
                     "Try specifying `speed_knots`")
    return partial(predict_probability,
                   design_matrix=design_matrix,
                   coefficients=fit.coefficients)
示例#8
0
    def _process_inputs(self, X, E=None, lengths=None):
        # Makes sure inputs have correct shape, generates features
        lagged = None
        if lengths is None:
            lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
                            original='ex')
        else:
            lagged = np.zeros((len(X), self.n_lags))
            for i, j in iter_from_X_lengths(X, lengths):
                lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
                                        trim='forward', original='ex')

        inputs = {'obs': X.reshape(-1,1),
                  'lagged': lagged}
        return inputs
示例#9
0
def pacf_ols(x, nlags=40):
    '''Calculate partial autocorrelations

    Parameters
    ----------
    x : 1d array
        observations of time series for which pacf is calculated
    nlags : int
        Number of lags for which pacf is returned.  Lag 0 is not returned.

    Returns
    -------
    pacf : 1d array
        partial autocorrelations, maxlag+1 elements

    Notes
    -----
    This solves a separate OLS estimation for each desired lag.
    '''
    #TODO: add warnings for Yule-Walker
    #NOTE: demeaning and not using a constant gave incorrect answers?
    #JP: demeaning should have a better estimate of the constant
    #maybe we can compare small sample properties with a MonteCarlo
    xlags, x0 = lagmat(x, nlags, original='sep')
    #xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
    xlags = add_constant(xlags)
    pacf = [1.]
    for k in range(1, nlags+1):
        res = OLS(x0[k:], xlags[k:, :k+1]).fit()
         #np.take(xlags[k:], range(1,k+1)+[-1],

        pacf.append(res.params[-1])
    return np.array(pacf)
示例#10
0
def _estimate_df_regression(y, trend, lags):
    """Helper function that estimates the core (A)DF regression

    Parameters
    ----------
    y : ndarray
        The data for the lag selection
    trend : {'nc','c','ct','ctt'}
        The trend order
    lags : int
        The number of lags to include in the ADF regression

    Returns
    -------
    ols_res : OLSResults
        A results class object produced by OLS.fit()

    Notes
    -----
    See statsmodels.regression.linear_model.OLS for details on the results
    returned
    """
    delta_y = diff(y)

    rhs = lagmat(delta_y[:, None], lags, trim='both', original='in')
    nobs = rhs.shape[0]
    lhs = rhs[:, 0].copy()  # lag-0 values are lhs, Is copy() necessary?
    rhs[:, 0] = y[-nobs - 1:-1]  # replace lag 0 with level of y
    rhs = _add_column_names(rhs, lags)

    if trend != 'nc':
        rhs = add_trend(rhs.iloc[:, :lags + 1], trend)

    return OLS(lhs, rhs).fit()
示例#11
0
def pacf_ols(x, nlags=40):
    '''Calculate partial autocorrelations

    Parameters
    ----------
    x : 1d array
        observations of time series for which pacf is calculated
    nlags : int
        Number of lags for which pacf is returned.  Lag 0 is not returned.

    Returns
    -------
    pacf : 1d array
        partial autocorrelations, maxlag+1 elements

    Notes
    -----
    This solves a separate OLS estimation for each desired lag.
    '''
    #TODO: add warnings for Yule-Walker
    #NOTE: demeaning and not using a constant gave incorrect answers?
    #JP: demeaning should have a better estimate of the constant
    #maybe we can compare small sample properties with a MonteCarlo
    xlags, x0 = lagmat(x, nlags, original='sep')
    #xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
    xlags = add_constant(xlags)
    pacf = [1.]
    for k in range(1, nlags+1):
        res = OLS(x0[k:], xlags[k:, :k+1]).fit()
         #np.take(xlags[k:], range(1,k+1)+[-1],

        pacf.append(res.params[-1])
    return np.array(pacf)
示例#12
0
def _estimate_df_regression(y, trend, lags):
    """Helper function that estimates the core (A)DF regression

    Parameters
    ----------
    y : array-like, (nobs,)
        The data for the lag selection
    trend : str, {'nc','c','ct','ctt'}
        The trend order
    lags : int
        The number of lags to include in the ADF regression

    Returns
    -------
    ols_res : OLSResults
        A results class object produced by OLS.fit()

    Notes
    -----
    See statsmodels.regression.linear_model.OLS for details on the results
    returned
    """
    delta_y = diff(y)

    rhs = lagmat(delta_y[:, None], lags, trim='both', original='in')
    nobs = rhs.shape[0]
    lhs = rhs[:, 0].copy()  # lag-0 values are lhs, Is copy() necessary?
    rhs[:, 0] = y[-nobs - 1:-1]  # replace lag 0 with level of y

    if trend != 'nc':
        rhs = add_trend(rhs[:, :lags + 1], trend)

    return OLS(lhs, rhs).fit()
示例#13
0
文件: dio.py 项目: mobeets/jonasASD
def load_(infile, nLags=1000):
    from statsmodels.tsa.tsatools import lagmat
    assert infile.endswith('.npy')
    X, Y = np.load(infile)
    X0 = lagmat(X, nLags, trim='both')
    ind = len(X)-len(X0)
    return X0, Y[ind:]
示例#14
0
    def __init__(self, endog, use_exp=True, h=0.1, p=1, q=1, **kwargs):
        exog, endog = lagmat(endog,
                             p,
                             trim='both',
                             original='sep',
                             use_pandas=True)

        super().__init__(endog, add_constant(exog), **kwargs)

        self.p = p
        self.q = q
        self.h = h

        for plag1 in range(1, p + 1):
            self.exog_names.append('phi2%d%d' % (plag1, plag1))
        # for plag1 in range(1,p+1):
        #     for plag2 in range(plag1,p+1):
        #         self.exog_names.append('phi2%d%d' % (plag1,plag2))

        for qlag in range(1, q + 1):
            self.exog_names.append('gamma%d' % qlag)

        self.exog_names.append('sigma')

        # for lag in range(1,max(p,q)+1):
        #     self.exog_names.append('s%d' % lag)

        self.nlinear_terms = self.p + 1
        self.nquad_terms = int(self.p * (self.p + 1) / 2)
        self.ngamma_terms = self.q
        self.ninit_terms = max(self.p, self.q)
示例#15
0
def _get_mixed_design_mat_part(seas_part, order, seas_order, m):
    """
    Prepares `pP, qQ` parts of the design matrix for SGD.

    Parameters
    ----------
    seas_part : array_like
        Either the seasonal part that was previosuly calculated with
        `_get_seasonal_design_mat_part` for P or for Q.

    order : int
        Either p or q of SARIMAX.

    seas_order : int
        Either P or Q of SARIMAX.

    m : int
        The seasonal period of SARIMAX.

    Returns
    -------
        A matrix shaped (N, P) or (N, Q) that will correspond to the
        multiplying terms in SARIMAX's "mixed" parameters i.e. containing both
        ordinals and seasonals as a result of multiplicative formulation.
    """
    N = seas_part.shape[0]
    part = np.empty((N, order * seas_order))
    for i in range(seas_order):
        part[:, i * order: (i + 1) * order] = lagmat(
            seas_part[:, i], maxlag=order
        )
    return part
def GetADFuller(Y, maxlags=None, regression='c'):
    #Y = np.asarray(Y)
    Y = Y.T
    dy = np.diff(Y)
    if dy.ndim == 1:
        dy = dy[:, None]
    ydall = lagmat(dy, maxlags, trim='both', original='in')
    nobs = ydall.shape[0]
    ydall[:, 0] = Y[-nobs - 1:-1]
    dYshort = dy[-nobs:]

    if regression != 'nc':
        Z = add_trend(ydall[:, :maxlags + 1], regression)
    else:
        Z = ydall[:, :maxlags + 1]

    resultADFuller = GetOLS(Y=dYshort.T, X=Z.T)

    K_dash = 2 * (2 * maxlags + 1)
    AIC = np.log(np.absolute(np.linalg.det(
        resultADFuller['sigma_hat']))) + 2.0 * K_dash / (
            resultADFuller['nobs'])  # log(sigma_hat) + 2*K_dash/T
    BIC = np.log(np.absolute(np.linalg.det(resultADFuller['sigma_hat']))) + (
        K_dash / resultADFuller['nobs']) * np.log(
            resultADFuller['nobs'])  # log(sigma_hat) + K_dash/T*log(T)
    resultADFuller['AIC'] = AIC
    resultADFuller['BIC'] = BIC
    resultADFuller['adfstat'] = resultADFuller['tvalues'][0, 0]
    resultADFuller['maxlag'] = maxlags

    return resultADFuller
示例#17
0
    def train(self, data, **kwargs):
        if self.indexer is not None and isinstance(data, pd.DataFrame):
            data = self.indexer.get_data(data)

        lagdata, ndata = lagmat(data, maxlag=self.order, trim="both", original='sep')

        mqt = QuantReg(ndata, lagdata).fit(0.5)
        if self.alpha is not None:
            uqt = QuantReg(ndata, lagdata).fit(1 - self.alpha)
            lqt = QuantReg(ndata, lagdata).fit(self.alpha)

        self.mean_qt = [k for k in mqt.params]
        if self.alpha is not None:
            self.upper_qt = [k for k in uqt.params]
            self.lower_qt = [k for k in lqt.params]

        if self.dist:
            self.dist_qt = []
            for alpha in np.arange(0.05,0.5,0.05):
                lqt = QuantReg(ndata, lagdata).fit(alpha)
                uqt = QuantReg(ndata, lagdata).fit(1 - alpha)
                lo_qt = [k for k in lqt.params]
                up_qt = [k for k in uqt.params]
                self.dist_qt.append([lo_qt, up_qt])

        self.shortname = "QAR(" + str(self.order) + ") - " + str(self.alpha)
def lag_func(data,lag,col):
    lag = lag
    X = lagmat(data["diff"], lag)
    lagged = data.copy()
    for c in range(1,lag+1):
        lagged[col+"%d" % c] = X[:, c-1]
    return lagged
def fit_speed_likelihood(speed, is_replay, speed_threshold=4.0):
    """Fits the standard deviation of the change in speed for the replay and
    non-replay state.

    Parameters
    ----------
    speed : ndarray, shape (n_time,)
    is_replay : ndarray, shape (n_time,)
    speed_threshold : float, optional

    Returns
    -------
    speed_likelihood : function

    """
    is_replay = np.asarray(is_replay).astype(bool)
    lagged_speed = lagmat(speed, 1)
    lagged_speed[0] = speed[0]
    replay_coefficients, replay_scale = fit_speed_model(
        speed[is_replay], lagged_speed[is_replay])
    no_replay_coefficients, no_replay_scale = fit_speed_model(
        speed[~is_replay], lagged_speed[~is_replay])
    return partial(speed_likelihood,
                   replay_coefficients=replay_coefficients,
                   replay_scale=replay_scale,
                   no_replay_coefficients=no_replay_coefficients,
                   no_replay_scale=no_replay_scale,
                   speed_threshold=speed_threshold)
示例#20
0
def selectlag_IC(data, maxlag, penalty):
    # Select VAR lag length using information criterion

    # preliminaries
    n_x = np.size(data, 1)

    # compute information criterion for each possible lag length
    IC = np.empty((maxlag + 1, 1))
    for i in range(1, maxlag + 1):
        # set lag length
        p = i

        # estimate VAR
        data_est = data[maxlag - p:, :]
        T = np.size(data_est, 0)
        T_VAR = T - p
        X = lagmat(data_est, p, original='ex')
        X = X[p:, :]
        Y = data_est[p:, :]
        X = np.column_stack((X, np.ones((len(X), 1))))
        VAR_coeff = (np.linalg.inv(X.T @ X)) @ (X.T @ Y)
        VAR_res = Y - X @ VAR_coeff
        #     Sigma_u    = (VAR_res'*VAR_res)/(T_VAR-n_x*p-1)
        Sigma_u = (VAR_res.T @ VAR_res) / T_VAR
        # compute information criterion
        T = np.size(data, 0) - maxlag
        #     IC(i)  = log(det(Sigma_u)) + i * n_x^2 * 2/T
        IC[i] = np.log(np.linalg.det(Sigma_u)) + i * n_x**2 * penalty(T)
    lag = np.where(IC == min(IC))
    return int(lag[0][0])
示例#21
0
    def _init_model(self) -> None:
        """Should be called whenever the model is initialized or changed"""
        self._reformat_lags()
        self._check_specification()

        nobs_orig = self._y.shape[0]
        if self.constant:
            reg_constant = np.ones((nobs_orig, 1), dtype=np.float64)
        else:
            reg_constant = np.ones((nobs_orig, 0), dtype=np.float64)

        if self.lags is not None and nobs_orig > 0:
            maxlag = np.max(self.lags)
            lag_array = lagmat(self._y, maxlag)
            reg_lags = np.empty((nobs_orig, self._lags.shape[1]),
                                dtype=np.float64)
            for i, lags in enumerate(self._lags.T):
                reg_lags[:, i] = np.mean(lag_array[:, lags[0]:lags[1]], 1)
        else:
            reg_lags = np.empty((nobs_orig, 0), dtype=np.float64)

        if self._x is not None:
            reg_x = self._x
        else:
            reg_x = np.empty((nobs_orig, 0), dtype=np.float64)

        self.regressors = np.hstack((reg_constant, reg_lags, reg_x))
示例#22
0
def adf(ts, maxlag=1):
    """
    Augmented Dickey-Fuller unit root test
    """
    # make sure we are working with an array, convert if necessary
    ts = np.asarray(ts)

    # Get the dimension of the array
    nobs = ts.shape[0]

    # Calculate the discrete difference
    tsdiff = np.diff(ts)

    # Create a 2d array of lags, trim invalid observations on both sides
    tsdall = lagmat(tsdiff[:, None], maxlag, trim='both', original='in')
    # Get dimension of the array
    nobs = tsdall.shape[0]

    # replace 0 xdiff with level of x
    tsdall[:, 0] = ts[-nobs - 1:-1]
    tsdshort = tsdiff[-nobs:]

    # Calculate the linear regression using an ordinary least squares model
    results = OLS(tsdshort, add_trend(tsdall[:, :maxlag + 1], 'c')).fit()
    adfstat = results.tvalues[0]

    # Get approx p-value from a precomputed table (from stattools)
    pvalue = mackinnonp(adfstat, 'c', N=1)
    return pvalue
    def __init__(self, endog):
        if not isinstance(endog, pd.DataFrame):
            endog = pd.DataFrame(endog)

        k = endog.shape[1]
        augmented = lagmat(endog, 1, trim='both', original='in',
                           use_pandas=True)
        endog = augmented.iloc[:, :k]
        exog = add_constant(augmented.iloc[:, k:])

        k_states = k * (k + 1)
        super().__init__(endog, k_states=k_states)

        self.ssm.initialize('known', stationary_cov=np.eye(self.k_states) * 5)

        self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
        for i in range(self.k_endog):
            start = i * (self.k_endog + 1)
            end = start + self.k_endog + 1
            self['design', i, start:end, :] = exog.T
        self['transition'] = np.eye(k_states)
        self['selection'] = np.eye(k_states)

        self._obs_cov_slice = np.s_[:self.k_endog * (self.k_endog + 1) // 2]
        self._obs_cov_tril = np.tril_indices(self.k_endog)
        self._state_cov_slice = np.s_[-self.k_states:]
        self._state_cov_ix = ('state_cov',) + np.diag_indices(self.k_states)
示例#24
0
    def sample(self, nsim=1000, y=None, flatten_output=False):

        if y is None:
            y = self.data

        ydum, xdum = self._prior.get_pseudo_obs()
        xest, yest = lagmat(y, maxlag=self._p, trim="both", original="sep")

        if self._cons is True:
            xest = add_constant(xest, prepend=False)

        #if not (ydum == None).all():
        yest = np.vstack((ydum, yest))
        xest = np.vstack((xdum, xest))

        # This is just a random initialization point....
        phihatT = np.linalg.solve(xest.T.dot(xest), xest.T.dot(yest))
        S = (yest - xest.dot(phihatT)).T.dot(yest - xest.dot(phihatT))
        nu = yest.shape[0] - self._p * self._ny - self._cons * 1
        omega = xest.T.dot(xest)
        muphi = phihatT

        phis, sigmas = NormInvWishart(phihatT, omega, S, nu).rvs(nsim)

        if flatten_output:
            return np.array([
                np.r_[phis[i].flatten(order='F'),
                      vech(sigmas[i])] for i in range(nsim)
            ])
        else:
            return phis, sigmas

        return phis, sigmas
示例#25
0
文件: mean.py 项目: rhodge1/arch
    def _init_model(self):
        """Should be called whenever the model is initialized or changed"""
        self._reformat_lags()
        self._check_specification()

        nobs_orig = self._y.shape[0]
        if self.constant:
            reg_constant = ones((nobs_orig, 1), dtype=np.float64)
        else:
            reg_constant = ones((nobs_orig, 0), dtype=np.float64)

        if self.lags is not None and nobs_orig > 0:
            maxlag = np.max(self.lags)
            lag_array = lagmat(self._y, maxlag)
            reg_lags = empty((nobs_orig, self._lags.shape[1]), dtype=np.float64)
            for i, lags in enumerate(self._lags.T):
                reg_lags[:, i] = np.mean(lag_array[:, lags[0]:lags[1]], 1)
        else:
            reg_lags = empty((nobs_orig, 0), dtype=np.float64)

        if self._x is not None:
            reg_x = self._x
        else:
            reg_x = empty((nobs_orig, 0), dtype=np.float64)

        self.regressors = np.hstack((reg_constant, reg_lags, reg_x))
        first_obs, last_obs = self._indices
        self.regressors = self.regressors[first_obs:last_obs, :]
        self._y_adj = self._y[first_obs:last_obs]
示例#26
0
    def fit_scale(self, leading_indicator, cases, lag=0):
        """
        Scaling correction for AR(p) model to fit case counts.
        Args:
            leading_indicator: Covariate that is a leading indicator for case counts
            cases (nd.array): 1-dimensional array containing case counts
            offset (int): Offset for window of cases counts you want to correct over
        """
        self.lag = lag

        leading_mean = np.mean(leading_indicator)
        cases_mean = np.mean(cases)
        leading_std = np.std(leading_indicator)
        cases_std = np.std(cases)

        z = leading_indicator[:-lag - 1]
        x = cases[lag + 1:]

        p = self.p
        n = z.shape[0]

        self.beta[0] += -leading_mean * cases_std / leading_std + cases_mean
        self.beta[1:] *= cases_std / leading_std

        Z = np.hstack([tf.ones((n, 1)), lagmat(z, maxlag=p)])[p:]
        x_pred = Z @ self.beta.T

        self.scale = np.sum(x) / np.sum(x_pred)
def infer_discrete_state_transition_from_training_data(is_non_local,
                                                       penalty=1e-5):
    data = pd.DataFrame({
        'is_non_local':
        is_non_local.astype(np.float64),
        'lagged_is_non_local':
        lagmat(is_non_local, maxlag=1).astype(np.float64).squeeze(),
    }).dropna()

    MODEL_FORMULA = 'is_non_local ~ 1 + lagged_is_non_local'
    response, design_matrix = dmatrices(MODEL_FORMULA, data)
    penalty = np.ones((design_matrix.shape[1], )) * penalty
    penalty[0] = 0.0
    fit = penalized_IRLS(design_matrix,
                         response,
                         family=families.Binomial(),
                         penalty=penalty)

    predict_data = {
        'lagged_is_non_local': np.asarray([0, 1]),
    }
    predict_design_matrix = build_design_matrices(
        [design_matrix.design_info],
        predict_data,
        NA_action=NAAction(NA_types=[]))[0]

    non_local_probability = families.Binomial().link.inverse(
        predict_design_matrix @ np.squeeze(fit.coefficients))

    non_local_probability[np.isnan(non_local_probability)] = 0.0

    return np.asarray(
        [[1 - non_local_probability[0], non_local_probability[0]],
         [1 - non_local_probability[1], non_local_probability[1]]])
示例#28
0
def my_AR(endog, maxlag, trend=None):
    """
    Autoregressive model implementation, aka AR(p)
    :param endog: the dependent variables
    :param maxlag: maximum number of lags to use
    :param trend: 'c': add constant, 'nc' or None: no constant
    :return: my_OLS dictionary

    See ref: http://statsmodels.sourceforge.net/stable/_modules/statsmodels/tsa/ar_model.html#AR
    """
    # Dependent data matrix
    Y = endog[maxlag:]  # has observations for the fit p lags removed
    # Explanatory data matrix
    X = lagmat(endog, maxlag, trim='both')
    if trend is not None:
        X = add_trend(
            X, prepend=True,
            trend=trend)  # prepends puts trend column at the beginning
    result = my_OLS(Y, X)
    result['maxlag'] = maxlag
    # Akaike information criterion using statsmodel def for AR(p) (Lutkephol's definition)
    # note this is diff to adfuller's def
    result['aic'] = np.log(
        result['sigma']) + 2.0 * (1.0 + result['df_model']) / result['nobs']

    return result
示例#29
0
def VAR_forecast(Vector, VAR_estimates, lag_order, horizon, shock=None):
    """
    Inputs the VAR Vector, VAR estimates, the lag order of the model,
    the forecast horizon, and the desired first period shock.
    Args:

    Returns:
    
    """
    # Initial Period shock for IRF.
    if shock is None:
        shock = np.zeros(len(Vector[0]))
    error = np.zeros((len(Vector), len(Vector[0])))
    error[0] = shock
    # Predictions for Forecast Horizon.
    for t in np.arange(0, horizon):
        X_hat = Vector
        for i in range(1, lag_order):
            # X_hat = np.column_stack([X_hat, lag(Vector, i)])
            lagged_vector = tsatools.lagmat(Vector, maxlag=1)
            X_hat = np.column_stack([X_hat, lagged_vector])
        X_hat = sm.add_constant(X_hat)
        Y_hat = []
        for Eq in VAR_estimates:
            Y_hat.append(np.dot(X_hat[-1], VAR_estimates[Eq].params))
        Forecast = Y_hat + error[t]
        Vector = np.vstack((Vector, Forecast))
    return Vector[-horizon:]
示例#30
0
文件: unitroot.py 项目: fpochon/arch
def _df_select_lags(y, trend, max_lags, method):
    """
    Helper method to determine the best lag length in DF-like regressions

    Parameters
    ----------
    y : array-like, (nobs,)
        The data for the lag selection exercise
    trend : str, {'nc','c','ct','ctt'}
        The trend order
    max_lags : int
        The maximum number of lags to check.  This setting affects all
        estimation since the sample is adjusted by max_lags when
        fitting the models
    method : str, {'AIC','BIC','t-stat'}
        The method to use when estimating the model

    Returns
    -------
    best_ic : float
        The information criteria at the selected lag
    best_lag : int
        The selected lag
    all_res : list
        List of OLS results from fitting max_lag + 1 models

    Notes
    -----
    See statsmodels.tsa.tsatools._autolag for details.  If max_lags is None, the
    default value of 12 * (nobs/100)**(1/4) is used.
    """
    nobs = y.shape[0]
    delta_y = diff(y)

    if max_lags is None:
        max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))

    rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
    nobs = rhs.shape[0]
    rhs[:, 0] = y[-nobs - 1:-1]  # replace 0 with level of y
    lhs = delta_y[-nobs:]

    if trend != 'nc':
        full_rhs = add_trend(rhs, trend, prepend=True)
    else:
        full_rhs = rhs

    start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
    ic_best, best_lag, all_res = _autolag(OLS,
                                          lhs,
                                          full_rhs,
                                          start_lag,
                                          max_lags,
                                          method,
                                          regresults=True)
    # To get the correct number of lags, subtract the start_lag since
    # lags 0,1,...,start_lag-1 were not actual lags, but other variables
    best_lag -= start_lag
    return ic_best, best_lag, all_res
示例#31
0
def _df_select_lags(y, trend, max_lags, method, low_memory=False):
    """
    Helper method to determine the best lag length in DF-like regressions

    Parameters
    ----------
    y : ndarray
        The data for the lag selection exercise
    trend : {'nc','c','ct','ctt'}
        The trend order
    max_lags : int
        The maximum number of lags to check.  This setting affects all
        estimation since the sample is adjusted by max_lags when
        fitting the models
    method : {'AIC','BIC','t-stat'}
        The method to use when estimating the model
    low_memory : bool
        Flag indicating whether to use the low-memory algorithm for
        lag-length selection.

    Returns
    -------
    best_ic : float
        The information criteria at the selected lag
    best_lag : int
        The selected lag

    Notes
    -----
    If max_lags is None, the default value of 12 * (nobs/100)**(1/4) is used.
    """
    nobs = y.shape[0]
    # This is the absolute maximum number of lags possible,
    # only needed to very short time series.
    max_max_lags = nobs // 2 - 1
    if trend != 'nc':
        max_max_lags -= len(trend)
    if max_lags is None:
        max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))
        max_lags = max(min(max_lags, max_max_lags), 0)
    if low_memory:
        out = _autolag_ols_low_memory(y, max_lags, trend, method)
        return out

    delta_y = diff(y)
    rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
    nobs = rhs.shape[0]
    rhs[:, 0] = y[-nobs - 1:-1]  # replace 0 with level of y
    lhs = delta_y[-nobs:]

    if trend != 'nc':
        full_rhs = add_trend(rhs, trend, prepend=True)
    else:
        full_rhs = rhs

    start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
    ic_best, best_lag = _autolag_ols(lhs, full_rhs, start_lag, max_lags,
                                     method)
    return ic_best, best_lag
示例#32
0
    def _tsls_arima(self, x, arlags, model):
        """
        Two-stage least squares approach for estimating ARIMA(p, 1, 1)
        parameters as an alternative to MLE estimation in the case of
        solver non-convergence

        Parameters
        ----------
        x : array_like
            data series
        arlags : int
            AR(p) order
        model : {'c','ct'}
            Constant and trend order to include in regression
            * 'c'  : constant only
            * 'ct' : constant and trend

        Returns
        -------
        arparams : int
            AR(1) coefficient plus constant
        theta : int
            MA(1) coefficient
        olsfit.resid : ndarray
            residuals from second-stage regression
        """
        endog = np.diff(x, axis=0)
        exog = lagmat(endog, arlags, trim='both')
        # add constant if requested
        if model == 'ct':
            exog = add_constant(exog)
        # remove extra terms from front of endog
        endog = endog[arlags:]
        if arlags > 0:
            resids = lagmat(OLS(endog, exog).fit().resid, 1, trim='forward')
        else:
            resids = lagmat(-endog, 1, trim='forward')
        # add negated residuals column to exog as MA(1) term
        exog = np.append(exog, -resids, axis=1)
        olsfit = OLS(endog, exog).fit()
        if model == 'ct':
            arparams = olsfit.params[1:(len(olsfit.params) - 1)]
        else:
            arparams = olsfit.params[0:(len(olsfit.params) - 1)]
        theta = olsfit.params[len(olsfit.params) - 1]
        return arparams, theta, olsfit.resid
    def __init__(self, endog, k_regimes, order, trend='c', exog=None,
                 exog_tvtp=None, switching_ar=True, switching_trend=True,
                 switching_exog=False, switching_variance=False,
                 dates=None, freq=None, missing='none'):

        # Properties
        self.switching_ar = switching_ar

        # Switching options
        if self.switching_ar is True or self.switching_ar is False:
            self.switching_ar = [self.switching_ar] * order
        elif not len(self.switching_ar) == order:
            raise ValueError('Invalid iterable passed to `switching_ar`.')

        # Initialize the base model
        super(MarkovAutoregression, self).__init__(
            endog, k_regimes, trend=trend, exog=exog, order=order,
            exog_tvtp=exog_tvtp, switching_trend=switching_trend,
            switching_exog=switching_exog,
            switching_variance=switching_variance, dates=dates, freq=freq,
            missing=missing)

        # Sanity checks
        if self.nobs <= self.order:
            raise ValueError('Must have more observations than the order of'
                             ' the autoregression.')

        # Autoregressive exog
        self.exog_ar = lagmat(endog, self.order)[self.order:]

        # Reshape other datasets
        self.nobs -= self.order
        self.orig_endog = self.endog
        self.endog = self.endog[self.order:]
        if self._k_exog > 0:
            self.orig_exog = self.exog
            self.exog = self.exog[self.order:]

        # Reset the ModelData datasets
        self.data.endog, self.data.exog = (
            self.data._convert_endog_exog(self.endog, self.exog))

        # Reset indexes, if provided
        if self.data.row_labels is not None:
            self.data._cache['row_labels'] = (
                self.data.row_labels[self.order:])
        if self._index is not None:
            if self._index_generated:
                self._index = self._index[:-self.order]
            else:
                self._index = self._index[self.order:]

        # Parameters
        self.parameters['autoregressive'] = self.switching_ar

        # Cache an array for holding slices
        self._predict_slices = [slice(None, None, None)] * (self.order + 1)
示例#34
0
    def forecast(self, Phi, Sigma, h=12, y=None, append=True):
        if y is None:
            y = self.data

        y_base = y
        y0 = np.ones((self._ny * self._p + self._cons))
        y0[:self._ny * self._p] = lagmat(y_base, self._p, 'none')[-self._p]

        yfcst = np.zeros((h, self._ny))

        for i in range(h):
            yfcst[i] = np.dot(y0, Phi)
            y = np.vstack((y, yfcst[i]))
            if self._cons:
                y0[:-self._cons] = lagmat(y, self._p, 'none')[-self._p]
            else:
                y0 = lagmat(y, self._p, 'none')[-self._p]
        return yfcst
示例#35
0
文件: arg.py 项目: khrapovs/argamma
    def moment_ret(self, theta_ret, theta_vol=None, uarg=None,
                   zlag=1, **kwargs):
        """Moment conditions (returns) for spectral GMM estimator.

        Parameters
        ----------
        theta_ret : (2, ) array
            Vector of model parameters. [phi, price_ret]
        theta_vol : (3, ) array
            Vector of model parameters. [mean, rho, delta]
        uarg : (nu, ) array
            Grid to evaluate a and b functions
        zlag : int
            Number of lags to use for the instrument

        Returns
        -------
        moment : (nobs, nmoms) array
            Matrix of momcond restrictions

        Raises
        ------
        ValueError

        """

        if uarg is None:
            raise ValueError("uarg is missing!")

        vollag, vol = lagmat(self.vol, maxlag=zlag,
                             original='sep', trim='both')
        # Number of observations after truncation
        nobs = vol.shape[0]
        # Number of moments
        nmoms = 2 * uarg.shape[0] * (zlag+1)
        # Change class attribute with the current theta
        param = ARGparams()
        try:
            param.update(theta_ret=theta_ret, theta_vol=theta_vol)
        except ValueError:
            return np.ones((nobs, nmoms))*1e10
        # Must be (nobs, nu) array
        try:
            cfun = self.char_fun_ret(uarg, param)[zlag-1:]
        except ValueError:
            return np.ones((nobs, nmoms))*1e10
        # Must be (nobs, nu) array
        error = np.exp(-self.ret[zlag:, np.newaxis] * uarg) - cfun
        # Instruments, (nobs, ninstr) array
        instr = np.hstack([np.exp(-1j * vollag), np.ones((nobs, 1))])
        # Must be (nobs, nmoms) array
        moment = error[:, np.newaxis, :] * instr[:, :, np.newaxis]
        moment = moment.reshape((nobs, nmoms//2))
        # (nobs, 2 * ninstr)
        moment = np.hstack([np.real(moment), np.imag(moment)])

        return moment
    def __init__(self, endog, k_regimes, order, trend='c', exog=None,
                 exog_tvtp=None, switching_ar=True, switching_trend=True,
                 switching_exog=False, switching_variance=False,
                 dates=None, freq=None, missing='none'):

        # Properties
        self.switching_ar = switching_ar

        # Switching options
        if self.switching_ar is True or self.switching_ar is False:
            self.switching_ar = [self.switching_ar] * order
        elif not len(self.switching_ar) == order:
            raise ValueError('Invalid iterable passed to `switching_ar`.')

        # Initialize the base model
        super(MarkovAutoregression, self).__init__(
            endog, k_regimes, trend=trend, exog=exog, order=order,
            exog_tvtp=exog_tvtp, switching_trend=switching_trend,
            switching_exog=switching_exog,
            switching_variance=switching_variance, dates=dates, freq=freq,
            missing=missing)

        # Sanity checks
        if self.nobs <= self.order:
            raise ValueError('Must have more observations than the order of'
                             ' the autoregression.')

        # Autoregressive exog
        self.exog_ar = lagmat(endog, self.order)[self.order:]

        # Reshape other datasets
        self.nobs -= self.order
        self.orig_endog = self.endog
        self.endog = self.endog[self.order:]
        if self._k_exog > 0:
            self.orig_exog = self.exog
            self.exog = self.exog[self.order:]

        # Reset the ModelData datasets
        self.data.endog, self.data.exog = (
            self.data._convert_endog_exog(self.endog, self.exog))

        # Reset indexes, if provided
        if self.data.row_labels is not None:
            self.data._cache['row_labels'] = (
                self.data.row_labels[self.order:])
        if self._index is not None:
            if self._index_generated:
                self._index = self._index[:-self.order]
            else:
                self._index = self._index[self.order:]

        # Parameters
        self.parameters['autoregressive'] = self.switching_ar

        # Cache an array for holding slices
        self._predict_slices = [slice(None, None, None)] * (self.order + 1)
def fit_discrete_state_transition_no_speed(speed,
                                           is_replay,
                                           penalty=1E-5,
                                           speed_knots=None,
                                           diagonal=None):
    """Estimate the predicted probablity of replay and whether
    it was a replay in the previous time step.

    p(I_t | I_t-1, v_t-1)

    p_I_0, p_I_1 in Long Tao's code

    Parameters
    ----------
    speed : ndarray, shape (n_time,)
    is_replay : boolean ndarray, shape (n_time,)
    speed_knots : ndarray, shape (n_knots,)

    Returns
    -------
    probability_replay : ndarray, shape (n_time, 2)

    """
    data = pd.DataFrame({
        'is_replay':
        is_replay.astype(np.float64),
        'lagged_is_replay':
        lagmat(is_replay, maxlag=1).astype(np.float64).squeeze(),
        'lagged_speed':
        lagmat(speed, maxlag=1).squeeze()
    }).dropna()

    MODEL_FORMULA = 'is_replay ~ 1 + lagged_is_replay'
    response, design_matrix = dmatrices(MODEL_FORMULA, data)
    penalty = np.ones((design_matrix.shape[1], )) * penalty
    penalty[0] = 0.0
    fit = penalized_IRLS(design_matrix,
                         response,
                         family=FAMILY,
                         penalty=penalty)
    return partial(predict_probability,
                   design_matrix=design_matrix,
                   coefficients=fit.coefficients)
示例#38
0
def _df_select_lags(y, trend, max_lags, method):
    """
    Helper method to determine the best lag length in DF-like regressions

    Parameters
    ----------
    y : array-like, (nobs,)
        The data for the lag selection exercise
    trend : str, {'nc','c','ct','ctt'}
        The trend order
    max_lags : int
        The maximum number of lags to check.  This setting affects all
        estimation since the sample is adjusted by max_lags when
        fitting the models
    method : str, {'AIC','BIC','t-stat'}
        The method to use when estimating the model

    Returns
    -------
    best_ic : float
        The information criteria at the selected lag
    best_lag : int
        The selected lag
    all_res : list
        List of OLS results from fitting max_lag + 1 models

    Notes
    -----
    See statsmodels.tsa.tsatools._autolag for details.  If max_lags is None, the
    default value of 12 * (nobs/100)**(1/4) is used.
    """
    nobs = y.shape[0]
    delta_y = diff(y)

    if max_lags is None:
        max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))

    rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
    nobs = rhs.shape[0]
    rhs[:, 0] = y[-nobs - 1:-1]  # replace 0 with level of y
    lhs = delta_y[-nobs:]

    if trend != 'nc':
        full_rhs = add_trend(rhs, trend, prepend=True)
    else:
        full_rhs = rhs

    start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
    ic_best, best_lag, all_res = _autolag(OLS, lhs, full_rhs, start_lag,
                                          max_lags, method, regresults=True)
    # To get the correct number of lags, subtract the start_lag since
    # lags 0,1,...,start_lag-1 were not actual lags, but other variables
    best_lag -= start_lag
    return ic_best, best_lag, all_res
示例#39
0
    def _stackX(self, k_ar, trend):
        """
        Private method to build the RHS matrix for estimation.

        Columns are trend terms then lags.
        """
        endog = self.endog
        X = lagmat(endog, maxlag=k_ar, trim='both')
        k_trend = util.get_trendorder(trend)
        if k_trend:
            X = add_trend(X, prepend=True, trend=trend)
        self.k_trend = k_trend
        return X
示例#40
0
文件: unitroot.py 项目: esvhd/arch
def _df_select_lags(y, trend, max_lags, method):
    """
    Helper method to determine the best lag length in DF-like regressions

    Parameters
    ----------
    y : array
        The data for the lag selection exercise
    trend : {'nc','c','ct','ctt'}
        The trend order
    max_lags : int
        The maximum number of lags to check.  This setting affects all
        estimation since the sample is adjusted by max_lags when
        fitting the models
    method : {'AIC','BIC','t-stat'}
        The method to use when estimating the model

    Returns
    -------
    best_ic : float
        The information criteria at the selected lag
    best_lag : int
        The selected lag

    Notes
    -----
    If max_lags is None, the default value of 12 * (nobs/100)**(1/4) is used.
    """
    nobs = y.shape[0]
    delta_y = diff(y)

    if max_lags is None:
        max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))

    rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
    nobs = rhs.shape[0]
    rhs[:, 0] = y[-nobs - 1:-1]  # replace 0 with level of y
    lhs = delta_y[-nobs:]

    if trend != 'nc':
        full_rhs = add_trend(rhs, trend, prepend=True)
    else:
        full_rhs = rhs

    start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
    ic_best, best_lag = _autolag_ols(lhs, full_rhs, start_lag, max_lags, method)

    return ic_best, best_lag
示例#41
0
    def fit(self, nlags):
        '''estimate parameters using ols

        Parameters
        ----------
        nlags : integer
            number of lags to include in regression, same for all variables

        Returns
        -------
        None, but attaches

        arhat : array (nlags, nvar, nvar)
            full lag polynomial array
        arlhs : array (nlags-1, nvar, nvar)
            reduced lag polynomial for left hand side
        other statistics as returned by linalg.lstsq : need to be completed



        This currently assumes all parameters are estimated without restrictions.
        In this case SUR is identical to OLS

        estimation results are attached to the class instance


        '''
        self.nlags = nlags # without current period
        nvars = self.nvars
        #TODO: ar2s looks like a module variable, bug?
        #lmat = lagmat(ar2s, nlags, trim='both', original='in')
        lmat = lagmat(self.y, nlags, trim='both', original='in')
        self.yred = lmat[:,:nvars]
        self.xred = lmat[:,nvars:]
        res = np.linalg.lstsq(self.xred, self.yred, rcond=-1)
        self.estresults = res
        self.arlhs = res[0].reshape(nlags, nvars, nvars)
        self.arhat = ar2full(self.arlhs)
        self.rss = res[1]
        self.xredrank = res[2]
    def _em_autoregressive(self, result, betas, tmp=None):
        """
        EM step for autoregressive coefficients and variances
        """
        if tmp is None:
            tmp = np.sqrt(result.smoothed_marginal_probabilities)

        resid = np.zeros((self.k_regimes, self.nobs + self.order))
        resid[:] = self.orig_endog
        if self._k_exog > 0:
            for i in range(self.k_regimes):
                resid[i] -= np.dot(self.orig_exog, betas[i])

        # The difference between this and `_em_exog` is that here we have a
        # different endog and exog for each regime
        coeffs = np.zeros((self.k_regimes,) + (self.order,))
        variance = np.zeros((self.k_regimes,))
        exog = np.zeros((self.nobs, self.order))
        for i in range(self.k_regimes):
            endog = resid[i, self.order:]
            exog = lagmat(resid[i], self.order)[self.order:]
            tmp_endog = tmp[i] * endog
            tmp_exog = tmp[i][:, None] * exog

            coeffs[i] = np.dot(np.linalg.pinv(tmp_exog), tmp_endog)

            if self.switching_variance:
                tmp_resid = endog - np.dot(exog, coeffs[i])
                variance[i] = (np.sum(
                    tmp_resid**2 * result.smoothed_marginal_probabilities[i]) /
                    np.sum(result.smoothed_marginal_probabilities[i]))
            else:
                tmp_resid = tmp_endog - np.dot(tmp_exog, coeffs[i])
                variance[i] = np.sum(tmp_resid**2)

        # Variances
        if not self.switching_variance:
            variance = variance.sum() / self.nobs

        return coeffs, variance
示例#43
0
文件: Corr_Dim.py 项目: johntanz/ROP
def fnn(data, maxm):
    """
    Compute the embedding dimension of a time series data to build the phase space using the false neighbors criterion
    data--> time series
    maxm--> maximmum embeding dimension
    """    
    RT=15.0
    AT=2
    sigmay=np.std(data, ddof=1)
    nyr=len(data)
    m=maxm
    EM=lagmat(data, maxlag=m-1)
    EEM=np.asarray([EM[j,:] for j in range(m-1, EM.shape[0])])
    embedm=maxm
    for k in range(AT,EEM.shape[1]+1):
        fnn1=[]
        fnn2=[]
        Ma=EEM[:,range(k)]
        D=dist(Ma)
        for i in range(1,EEM.shape[0]-m-k):
            #print D.shape            
            #print(D[i,range(i-1)])
            d=D[i,:]
            pdnz=np.where(d>0)
            dnz=d[pdnz]
            Rm=np.min(dnz)
            l=np.where(d==Rm)
            l=l[0]
            l=l[len(l)-1]
            if l+m+k-1<nyr:
                fnn1.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/Rm)
                fnn2.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/sigmay)
        Ind1=np.where(np.asarray(fnn1)>RT)
        Ind2=np.where(np.asarray(fnn2)>AT)
        if len(Ind1[0])/float(len(fnn1))<0.1 and len(Ind2[0])/float(len(fnn2))<0.1:
            embedm=k
            break
    return embedm
示例#44
0
                     [ 0.1, -0.1]]])

    a31 = np.r_[np.eye(3)[None,:,:], 0.8*np.eye(3)[None,:,:]]
    a32 = np.array([[[ 1. ,  0. ,  0. ],
                     [ 0. ,  1. ,  0. ],
                     [ 0. ,  0. ,  1. ]],

                    [[ 0.8,  0. ,  0. ],
                     [ 0.1,  0.6,  0. ],
                     [ 0. ,  0. ,  0.9]]])

    ########
    ut = np.random.randn(1000,2)
    ar2s = vargenerate(a22,ut)
    #res = np.linalg.lstsq(lagmat(ar2s,1)[:,1:], ar2s)
    res = np.linalg.lstsq(lagmat(ar2s,1), ar2s)
    bhat = res[0].reshape(1,2,2)
    arhat = ar2full(bhat)
    #print(maxabs(arhat - a22)


    v = _Var(ar2s)
    v.fit(1)
    v.forecast()
    v.forecast(25)[-30:]


    ar23 = np.array([[[ 1. ,  0. ],
                     [ 0. ,  1. ]],

                    [[-0.6,  0. ],
示例#45
0
def acorr_lm(x, maxlag=None, autolag='AIC', store=False, regresults=False):
    '''Lagrange Multiplier tests for autocorrelation

    This is a generic Lagrange Multiplier test for autocorrelation. I don't
    have a reference for it, but it returns Engle's ARCH test if x is the
    squared residual array. A variation on it with additional exogenous
    variables is the Breusch-Godfrey autocorrelation test.

    Parameters
    ----------
    resid : ndarray, (nobs,)
        residuals from an estimation, or time series
    maxlag : int
        highest lag to use
    autolag : None or string
        If None, then a fixed number of lags given by maxlag is used.
    store : bool
        If true then the intermediate results are also returned

    Returns
    -------
    lm : float
        Lagrange multiplier test statistic
    lmpval : float
        p-value for Lagrange multiplier test
    fval : float
        fstatistic for F test, alternative version of the same test based on
        F test for the parameter restriction
    fpval : float
        pvalue for F test
    resstore : instance (optional)
        a class instance that holds intermediate results. Only returned if
        store=True

    See Also
    --------
    het_arch
    acorr_breusch_godfrey
    acorr_ljung_box

    '''

    if regresults:
        store = True

    x = np.asarray(x)
    nobs = x.shape[0]
    if maxlag is None:
        #for adf from Greene referencing Schwert 1989
        maxlag = int(np.ceil(12. * np.power(nobs/100., 1/4.)))#nobs//4  #TODO: check default, or do AIC/BIC


    xdiff = np.diff(x)
    #
    xdall = lagmat(x[:,None], maxlag, trim='both')
    nobs = xdall.shape[0]
    xdall = np.c_[np.ones((nobs,1)), xdall]
    xshort = x[-nobs:]

    if store: resstore = ResultsStore()

    if autolag:
        #search for lag length with highest information criteria
        #Note: I use the same number of observations to have comparable IC
        results = {}
        for mlag in range(1, maxlag+1):
            results[mlag] = OLS(xshort, xdall[:,:mlag+1]).fit()

        if autolag.lower() == 'aic':
            bestic, icbestlag = min((v.aic,k) for k,v in iteritems(results))
        elif autolag.lower() == 'bic':
            icbest, icbestlag = min((v.bic,k) for k,v in iteritems(results))
        else:
            raise ValueError("autolag can only be None, 'AIC' or 'BIC'")

        #rerun ols with best ic
        xdall = lagmat(x[:,None], icbestlag, trim='both')
        nobs = xdall.shape[0]
        xdall = np.c_[np.ones((nobs,1)), xdall]
        xshort = x[-nobs:]
        usedlag = icbestlag
        if regresults:
            resstore.results = results
    else:
        usedlag = maxlag

    resols = OLS(xshort, xdall[:,:usedlag+1]).fit()
    fval = resols.fvalue
    fpval = resols.f_pvalue
    lm = nobs * resols.rsquared
    lmpval = stats.chi2.sf(lm, usedlag)
    # Note: degrees of freedom for LM test is nvars minus constant = usedlags
    #return fval, fpval, lm, lmpval

    if store:
        resstore.resols = resols
        resstore.usedlag = usedlag
        return lm, lmpval, fval, fpval, resstore
    else:
        return lm, lmpval, fval, fpval
示例#46
0
def acorr_breusch_godfrey(results, nlags=None, store=False):
    '''Breusch Godfrey Lagrange Multiplier tests for residual autocorrelation

    Parameters
    ----------
    results : Result instance
        Estimation results for which the residuals are tested for serial
        correlation
    nlags : int
        Number of lags to include in the auxiliary regression. (nlags is
        highest lag)
    store : bool
        If store is true, then an additional class instance that contains
        intermediate results is returned.

    Returns
    -------
    lm : float
        Lagrange multiplier test statistic
    lmpval : float
        p-value for Lagrange multiplier test
    fval : float
        fstatistic for F test, alternative version of the same test based on
        F test for the parameter restriction
    fpval : float
        pvalue for F test
    resstore : instance (optional)
        a class instance that holds intermediate results. Only returned if
        store=True

    Notes
    -----
    BG adds lags of residual to exog in the design matrix for the auxiliary
    regression with residuals as endog,
    see Greene 12.7.1.

    References
    ----------
    Greene Econometrics, 5th edition

    '''

    x = np.asarray(results.resid)
    exog_old = results.model.exog
    nobs = x.shape[0]
    if nlags is None:
        #for adf from Greene referencing Schwert 1989
        nlags = np.trunc(12. * np.power(nobs/100., 1/4.))#nobs//4  #TODO: check default, or do AIC/BIC
        nlags = int(nlags)

    x = np.concatenate((np.zeros(nlags), x))

    #xdiff = np.diff(x)
    #
    xdall = lagmat(x[:,None], nlags, trim='both')
    nobs = xdall.shape[0]
    xdall = np.c_[np.ones((nobs,1)), xdall]
    xshort = x[-nobs:]
    exog = np.column_stack((exog_old, xdall))
    k_vars = exog.shape[1]

    if store: resstore = ResultsStore()

    resols = OLS(xshort, exog).fit()
    ft = resols.f_test(np.eye(nlags, k_vars, k_vars - nlags))
    fval = ft.fvalue
    fpval = ft.pvalue
    fval = np.squeeze(fval)[()]   #TODO: fix this in ContrastResults
    fpval = np.squeeze(fpval)[()]
    lm = nobs * resols.rsquared
    lmpval = stats.chi2.sf(lm, nlags)
    # Note: degrees of freedom for LM test is nvars minus constant = usedlags
    #return fval, fpval, lm, lmpval

    if store:
        resstore.resols = resols
        resstore.usedlag = nlags
        return lm, lmpval, fval, fpval, resstore
    else:
        return lm, lmpval, fval, fpval
示例#47
0
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
             store=False, regresults=False):
    """
    Augmented Dickey-Fuller unit root test

    The Augmented Dickey-Fuller test can be used to test for a unit root in a
    univariate process in the presence of serial correlation.

    Parameters
    ----------
    x : array_like, 1d
        data series
    maxlag : int
        Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
    regression : {'c','ct','ctt','nc'}
        Constant and trend order to include in regression

        * 'c' : constant only (default)
        * 'ct' : constant and trend
        * 'ctt' : constant, and linear and quadratic trend
        * 'nc' : no constant, no trend
    autolag : {'AIC', 'BIC', 't-stat', None}
        * if None, then maxlag lags are used
        * if 'AIC' (default) or 'BIC', then the number of lags is chosen
          to minimize the corresponding information criterion
        * 't-stat' based choice of maxlag.  Starts with maxlag and drops a
          lag until the t-statistic on the last lag length is significant
          using a 5%-sized test
    store : bool
        If True, then a result instance is returned additionally to
        the adf statistic. Default is False
    regresults : bool, optional
        If True, the full regression results are returned. Default is False

    Returns
    -------
    adf : float
        Test statistic
    pvalue : float
        MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
    usedlag : int
        Number of lags used
    nobs : int
        Number of observations used for the ADF regression and calculation of
        the critical values
    critical values : dict
        Critical values for the test statistic at the 1 %, 5 %, and 10 %
        levels. Based on MacKinnon (2010)
    icbest : float
        The maximized information criterion if autolag is not None.
    resstore : ResultStore, optional
        A dummy class with results attached as attributes

    Notes
    -----
    The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
    root, with the alternative that there is no unit root. If the pvalue is
    above a critical size, then we cannot reject that there is a unit root.

    The p-values are obtained through regression surface approximation from
    MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
    to significant, then the critical values should be used to judge whether
    to reject the null.

    The autolag option and maxlag for it are described in Greene.

    Examples
    --------
    See example notebook

    References
    ----------
    .. [1] W. Green.  "Econometric Analysis," 5th ed., Pearson, 2003.

    .. [2] Hamilton, J.D.  "Time Series Analysis".  Princeton, 1994.

    .. [3] MacKinnon, J.G. 1994.  "Approximate asymptotic distribution functions for
        unit-root and cointegration tests.  `Journal of Business and Economic
        Statistics` 12, 167-76.

    .. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."  Queen's
        University, Dept of Economics, Working Papers.  Available at
        http://ideas.repec.org/p/qed/wpaper/1227.html
    """

    if regresults:
        store = True

    trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
    if regression is None or isinstance(regression, (int, long)):
        regression = trenddict[regression]
    regression = regression.lower()
    if regression not in ['c', 'nc', 'ct', 'ctt']:
        raise ValueError("regression option %s not understood") % regression
    x = np.asarray(x)
    nobs = x.shape[0]

    if maxlag is None:
        #from Greene referencing Schwert 1989
        maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))

    xdiff = np.diff(x)
    xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
    nobs = xdall.shape[0]  # pylint: disable=E1103

    xdall[:, 0] = x[-nobs - 1:-1]  # replace 0 xdiff with level of x
    xdshort = xdiff[-nobs:]

    if store:
        resstore = ResultsStore()
    if autolag:
        if regression != 'nc':
            fullRHS = add_trend(xdall, regression, prepend=True)
        else:
            fullRHS = xdall
        startlag = fullRHS.shape[1] - xdall.shape[1] + 1  # 1 for level  # pylint: disable=E1103
        #search for lag length with smallest information criteria
        #Note: use the same number of observations to have comparable IC
        #aic and bic: smaller is better

        if not regresults:
            icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
                                       maxlag, autolag)
        else:
            icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
                                              maxlag, autolag,
                                              regresults=regresults)
            resstore.autolag_results = alres

        bestlag -= startlag  # convert to lag not column index

        #rerun ols with best autolag
        xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
        nobs = xdall.shape[0]   # pylint: disable=E1103
        xdall[:, 0] = x[-nobs - 1:-1]  # replace 0 xdiff with level of x
        xdshort = xdiff[-nobs:]
        usedlag = bestlag
    else:
        usedlag = maxlag
        icbest = None
    if regression != 'nc':
        resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
                     regression)).fit()
    else:
        resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()

    adfstat = resols.tvalues[0]
#    adfstat = (resols.params[0]-1.0)/resols.bse[0]
    # the "asymptotically correct" z statistic is obtained as
    # nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
    # I think this is the statistic that is used for series that are integrated
    # for orders higher than I(1), ie., not ADF but cointegration tests.

    # Get approx p-value and critical values
    pvalue = mackinnonp(adfstat, regression=regression, N=1)
    critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
    critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
                  "10%" : critvalues[2]}
    if store:
        resstore.resols = resols
        resstore.maxlag = maxlag
        resstore.usedlag = usedlag
        resstore.adfstat = adfstat
        resstore.critvalues = critvalues
        resstore.nobs = nobs
        resstore.H0 = ("The coefficient on the lagged level equals 1 - "
                       "unit root")
        resstore.HA = "The coefficient on the lagged level < 1 - stationary"
        resstore.icbest = icbest
        resstore._str = 'Augmented Dickey-Fuller Test Results'
        return adfstat, pvalue, critvalues, resstore
    else:
        if not autolag:
            return adfstat, pvalue, usedlag, nobs, critvalues
        else:
            return adfstat, pvalue, usedlag, nobs, critvalues, icbest
示例#48
0
文件: arg.py 项目: khrapovs/argamma
    def momcond_vol(self, theta_vol, uarg=None, zlag=1):
        """Moment conditions (volatility) for spectral GMM estimator.

        Parameters
        ----------
        theta_vol : (3, ) array
            Vector of model parameters. [scale, rho, delta]
        uarg : (nu, ) array
            Grid to evaluate a and b functions
        zlag : int
            Number of lags to use for the instrument

        Returns
        -------
        moment : (nobs, nmoms) array
            Matrix of momcond restrictions
        dmoment : (nmoms, nparams) array
            Gradient of momcond restrictions. Mean over observations

        Raises
        ------
        ValueError

        """

        if uarg is None:
            raise ValueError("uarg is missing!")

        vollag, vol = lagmat(self.vol, maxlag=zlag,
                             original='sep', trim='both')
        prevvol = vollag[:, 0][:, np.newaxis]
        # Number of observations after truncation
        nobs = vol.shape[0]
        # Number of moments
        nmoms = 2 * uarg.shape[0] * (zlag+1)
        # Number of parameters
        nparams = theta_vol.shape[0]

        # Change class attribute with the current theta
        param = ARGparams()
        try:
            param.update(theta_vol=theta_vol)
        except ValueError:
            return np.ones((nobs, nmoms))*1e10, np.ones((nmoms, nparams))*1e10

        # Must be (nobs, nu) array
        error = np.exp(-vol * uarg) - self.char_fun_vol(uarg, param)[zlag-1:]
        # Instruments, (nobs, ninstr) array
        instr = np.hstack([np.exp(-1j * vollag), np.ones((nobs, 1))])
        # Must be (nobs, nmoms) array
        moment = error[:, np.newaxis, :] * instr[:, :, np.newaxis]
        moment = moment.reshape((nobs, nmoms//2))
        # (nobs, 2 * ninstr)
        moment = np.hstack([np.real(moment), np.imag(moment)])

        # Initialize derivative matrix
        dmoment = np.empty((nmoms, nparams))
        for i in range(nparams):
            dexparg = - prevvol * self.dafun(uarg, param)[i] \
                - np.ones((nobs, 1)) * self.dbfun(uarg, param)[i]
            derror = - self.char_fun_vol(uarg, param)[zlag-1:] * dexparg

            derrorinstr = derror[:, np.newaxis, :] * instr[:, :, np.newaxis]
            derrorinstr = derrorinstr.reshape((nobs, nmoms//2))
            derrorinstr = np.hstack([np.real(derrorinstr),
                                     np.imag(derrorinstr)])
            dmoment[:, i] = derrorinstr.mean(0)

        return moment, dmoment
示例#49
0
max_xc = 0
best_shift = 0
for shift in range(-10, 0): #Tune this search range
    xc = (numpy.roll(sig1, shift) * sig2).sum()
    if xc > max_xc:
        max_xc = xc
        best_shift = shift
print 'Best shift:', best_shift
"""
If best_shift is at the edges of your search range,
you should expand the search range.
"""
#############################################################################

# create lag matrix for regression
bpmat = tools.lagmat(x,lag, original='in')
etmat = tools.lagmat(z,lag, original='in')
lamat = numpy.column_stack([bpmat,etmat])
#for i in range(len(etmat)):
#    lagmat.append(bpmat[i]+etmat[i])
#transpose matrix to determine required length
#run least squared regression
sqrd = numpy.linalg.lstsq(bpmat,y)
sqrdlag = numpy.linalg.lstsq(lamat,y)

wlls = sqrd[0]
lagls = sqrdlag[0]

cumls = numpy.cumsum(wlls)
lagcumls =numpy.cumsum(lagls)
示例#50
0
def cointegration_johansen(input_df, lag=1):
    """
    For axis: -1 means no deterministic part, 0 means constant term, 1 means constant plus time-trend,
    > 1 means higher order polynomial.

    :param input_df: the input vectors as a pandas.DataFrame instance
    :param lag: number of lagged difference terms used when computing the estimator
    :return: returns test statistics data
    """
    count_samples, count_dimensions = input_df.shape
    input_df = detrend(input_df, type='constant', axis=0)
    diff_input_df = numpy.diff(input_df, 1, axis=0)
    z = tsatools.lagmat(diff_input_df, lag)
    z = z[lag:]
    z = detrend(z, type='constant', axis=0)
    diff_input_df = diff_input_df[lag:]
    diff_input_df = detrend(diff_input_df, type='constant', axis=0)
    r0t = residuals(diff_input_df, z)
    lx = input_df[:-lag]
    lx = lx[1:]
    diff_input_df = detrend(lx, type='constant', axis=0)
    rkt = residuals(diff_input_df, z)

    if rkt is None:
        return None

    skk = numpy.dot(rkt.T, rkt) / rkt.shape[0]
    sk0 = numpy.dot(rkt.T, r0t) / rkt.shape[0]
    s00 = numpy.dot(r0t.T, r0t) / r0t.shape[0]
    sig = numpy.dot(sk0, numpy.dot(linalg.inv(s00), sk0.T))
    eigenvalues, eigenvectors = linalg.eig(numpy.dot(linalg.inv(skk), sig))

    # normalizing the eigenvectors such that (du'skk*du) = I
    temp = linalg.inv(linalg.cholesky(numpy.dot(eigenvectors.T, numpy.dot(skk, eigenvectors))))
    dt = numpy.dot(eigenvectors, temp)

    # sorting eigenvalues and vectors
    order_decreasing = numpy.flipud(numpy.argsort(eigenvalues))
    sorted_eigenvalues = eigenvalues[order_decreasing]
    sorted_eigenvectors = dt[:, order_decreasing]

    # computing the trace and max eigenvalue statistics
    trace_statistics = numpy.zeros(count_dimensions)
    eigenvalue_statistics = numpy.zeros(count_dimensions)
    critical_values_max_eigenvalue = numpy.zeros((count_dimensions, 3))
    critical_values_trace = numpy.zeros((count_dimensions, 3))
    iota = numpy.ones(count_dimensions)
    t, junk = rkt.shape
    for i in range(0, count_dimensions):
        tmp = numpy.log(iota - sorted_eigenvalues)[i:]
        trace_statistics[i] = -t * numpy.sum(tmp, 0)
        eigenvalue_statistics[i] = -t * numpy.log(1 - sorted_eigenvalues[i])
        critical_values_max_eigenvalue[i, :] = get_critical_values_max_eigenvalue(count_dimensions - i, time_polynomial_order=0)
        critical_values_trace[i, :] = get_critical_values_trace(count_dimensions - i, time_polynomial_order=0)
        order_decreasing[i] = i

    result = dict()
    result['rkt'] = rkt
    result['r0t'] = r0t
    result['eigenvalues'] = sorted_eigenvalues
    result['eigenvectors'] = sorted_eigenvectors
    result['trace_statistic'] = trace_statistics  # likelihood ratio trace statistic
    result['eigenvalue_statistics'] = eigenvalue_statistics  # maximum eigenvalue statistic
    result['critical_values_trace'] = critical_values_trace
    result['critical_values_max_eigenvalue'] = critical_values_max_eigenvalue
    result['order_decreasing'] = order_decreasing  # indices of eigenvalues in decreasing order
    return result
Trans_M2 = curv(delt_M2,P_M2,r)
Trans_O1 = curv(delt_O1,P_O1,r)

tf=time.clock()
print '...Done!',tf-t0, 'seconds'
t0=time.clock

###########################################################################
# Calculate BP Response Function
###########################################################################
ti=time.clock() # measure time of calculation
print 'Calculating BP Response function...',
t0=time.clock()

# create lag matrix for regression
bpmat = tools.lagmat(dbp, lag, original='in')
etmat = tools.lagmat(ddl, lag, original='in')
#lamat combines lag matrices of bp and et
lamat = numpy.column_stack([bpmat,etmat])
#for i in range(len(etmat)):
#    lagmat.append(bpmat[i]+etmat[i])
#transpose matrix to determine required length
#run least squared regression
sqrd = numpy.linalg.lstsq(bpmat,dwl)
#determine lag coefficients of the lag matrix lamat
sqrdlag = numpy.linalg.lstsq(lamat,dwl)

wlls = sqrd[0]
#lagls return the coefficients of the least squares of lamat
lagls = sqrdlag[0]
示例#52
0
                     [ 0.1, -0.1]]])

    a31 = np.r_[np.eye(3)[None,:,:], 0.8*np.eye(3)[None,:,:]]
    a32 = np.array([[[ 1. ,  0. ,  0. ],
                     [ 0. ,  1. ,  0. ],
                     [ 0. ,  0. ,  1. ]],

                    [[ 0.8,  0. ,  0. ],
                     [ 0.1,  0.6,  0. ],
                     [ 0. ,  0. ,  0.9]]])

    ########
    ut = np.random.randn(1000,2)
    ar2s = vargenerate(a22,ut)
    #res = np.linalg.lstsq(lagmat(ar2s,1)[:,1:], ar2s)
    res = np.linalg.lstsq(lagmat(ar2s,1), ar2s, rcond=-1)
    bhat = res[0].reshape(1,2,2)
    arhat = ar2full(bhat)
    #print(maxabs(arhat - a22)

    v = _Var(ar2s)
    v.fit(1)
    v.forecast()
    v.forecast(25)[-30:]

    ar23 = np.array([[[ 1. ,  0. ],
                     [ 0. ,  1. ]],

                    [[-0.6,  0. ],
                     [ 0.2, -0.6]],
示例#53
0
def tideproc(inpfile,bpdata,edata):

    delta = 1.1562
    p = 7.692E5

    ###########################################################################
    """
    INPUT FILES ARE PUT IN BELOW
    """

    lag = 100
    tol = 0.05  #percentage of variance in frequency allowed; default is 2%
    r = 1 #well radius in inches
    Be = 0.10 #barometric efficiency
    numb = 2000 # number of values to process
    spd = 24 #samples per day hourly sampling = 24
    lagt = -6.0 #hours different from UTC (negative indicates west); UT is -7

    """
    INPUT FILES END HERE
    """
    ###########################################################################

    #frequencies in cpd
    O1 = 0.9295 #principal lunar
    K1 = 1.0029 #Lunar Solar
    M2 = 1.9324 #principal lunar
    S2 = 2.00   #Principal Solar
    N2 = 1.8957 #Lunar elliptic

    #periods in days
    P_M2 = 0.5175
    P_O1 = 1.0758

    # amplitude factors from Merritt 2004
    b_O1 = 0.377
    b_P1 = 0.176
    b_K1 = 0.531
    b_N2 = 0.174
    b_M2 = 0.908
    b_S2 = 0.423
    b_K2 = 0.115

    #love numbers and other constants from Agnew 2007
    l = 0.0839
    k = 0.2980
    h = 0.6032
    Km = 1.7618 #general lunar coefficient
    pi = math.pi #pi

    #gravity and earth radius
    g = 9.821  #m/s**2
    a = 6.3707E6 #m
    g_ft = 32.23 #ft
    a_ft = 2.0902e7 #ft/s**2

    #values to determine porosity from Merritt 2004 pg 56
    Beta = 2.32E-8
    rho = 62.4

    impfile = inpfile
    outfile = 'c'+impfile
    data = csv.reader(open(impfile, 'rb'), delimiter=",")
    dy, u, l, nm, d, wl, t, vert =[], [], [], [], [], [], [], []
    yrdty, year, month, day, hours, minutes, seconds, julday = [], [], [], [], [], [], [], []
    yrdty2,year2, month2, day2, hours2, minutes2, seconds2, julday2 = [], [], [], [], [], [], [], []
    yrdty3,year3, month3, day3, hours3, minutes3, seconds3, julday3 = [], [], [], [], [], [], [], []

    # read in bp data
    bpdata = bpdata
    bdata = csv.reader(open(bpdata, 'rb'), delimiter=",")
    v, d2, bp=[], [], []
    d3, SG33WDD, PW19S2, PW19M2, MXSWDD = [],[],[],[],[]

    etdata = edata

    #assign data in csv to arrays
    for row in data:
        u.append(row)

    for row in bdata:
        v.append(row)

    #pick well name, lat., long., and elevation data out of header of wl file
    well_name = u[0][1]
    lon = [float(u[5][1])]
    latt = [round(float(u[4][1]),3)]
    el = [round(float(u[10][1])/3.2808,3)]

    #import the bp data
    with open(bpdata, 'rb') as tot:
        csvreader1 = csv.reader(tot)
        for row in skip_first(csvreader1, 3):
            d2.append(row[2])
            bp.append(float(row[3]))

    #import the wl data
    with open(impfile, 'rb') as total:
        csvreader = csv.reader(total)
        for row in skip_first(csvreader, 62):
            dy.append(row[0])
            nm.append(row[1])

    #import supplemental earth tide data
    with open(etdata, 'rb') as tos:
        csvreader2 = csv.reader(tos)
        for row in skip_first(csvreader2,2):
            d3.append(row[5])
            SG33WDD.append(float(row[6]))
            PW19S2.append(row[7])
            PW19M2.append(row[8])
            MXSWDD.append(row[9])

    #import a smaller part of the wl data
    for i in range(len(dy)-numb,len(dy)):
        d.append(dy[i])
        wl.append(nm[i])

    #fill in last line of wl data
    wl[-1]=wl[-2]
    for i in range(len(wl)):
        if wl[i] is '':
            wl[i]=wl[i-1]

    #create a list of latitude, longitude, elevation, and gmt for tidal calculation
    lat = latt*len(d)
    longit = lon*len(d)
    elev = el*len(d)
    gmtt = [float(lagt)]*len(d)

    # define the various components of the date, represented by d
    # dates for wl data
    for i in range(len(d)):
        yrdty.append(time.strptime(d[i],"%Y-%m-%d %H:%M:%S"))
        year.append(int(yrdty[i].tm_year))
        month.append(int(yrdty[i].tm_mon))
        day.append(int(yrdty[i].tm_mday))
        hours.append(int(yrdty[i].tm_hour))
        minutes.append(int(yrdty[i].tm_min))
        seconds.append(int(0)) #yrdty[i].tm_sec
    # dates for bp data
    for i in range(len(d2)):
        yrdty2.append(time.strptime(d2[i],"%Y-%m-%d %H:%M:%S"))
        year2.append(int(yrdty2[i].tm_year))
        month2.append(int(yrdty2[i].tm_mon))
        day2.append(int(yrdty2[i].tm_mday))
        hours2.append(int(yrdty2[i].tm_hour))
        minutes2.append(int(yrdty2[i].tm_min))
        seconds2.append(int(0)) #yrdty2[i].tm_sec
    # dates for bp data
    for i in range(len(d3)):
        yrdty3.append(time.strptime(d3[i],"%m/%d/%Y %H:%M"))
        year3.append(int(yrdty3[i].tm_year))
        month3.append(int(yrdty3[i].tm_mon))
        day3.append(int(yrdty3[i].tm_mday))
        hours3.append(int(yrdty3[i].tm_hour))
        minutes3.append(int(yrdty3[i].tm_min))
        seconds3.append(int(0)) #yrdty2[i].tm_sec

    #julian day calculation
    def calc_jday(Y, M, D, h, m, s):
      # Y is year, M is month, D is day
      # h is hour, m is minute, s is second
      # returns decimal day (float)
      Months = [0, 31, 61, 92, 122, 153, 184, 214, 245, 275, 306, 337]
      if M < 3:
        Y = Y-1
        M = M+12
      JD = math.floor((Y+4712)/4.0)*1461 + ((Y+4712)%4)*365
      JD = JD + Months[M-3] + D
      JD = JD + (h + (m/60.0) + (s/3600.0)) / 24.0
      # corrections-
      # 59 accounts for shift of year from 1 Jan to 1 Mar
      # -13 accounts for shift between Julian and Gregorian calendars
      # -0.5 accounts for shift between noon and prev. midnight
      JD = JD + 59 - 13.5
      return JD

    # create a list of julian dates
    for i in range(len(d)):
        julday.append(calc_jday(year[i],month[i],day[i],hours[i],minutes[i],seconds[i]))

    for i in range(len(d2)):
        julday2.append(calc_jday(year2[i],month2[i],day2[i],hours2[i],minutes2[i],seconds2[i]))

    for i in range(len(d3)):
        julday3.append(calc_jday(year3[i],month3[i],day3[i],hours3[i],minutes3[i],seconds3[i]))

    #run tidal function
    for i in range(len(d)):
        t.append(tamura.tide(int(year[i]), int(month[i]), int(day[i]), int(hours[i]), int(minutes[i]), int(seconds[i]), float(longit[i]), float(lat[i]), float(elev[i]), 0.0, lagt)) #float(gmtt[i])

    vert, Grav_tide, WDD_tam, areal, potential, dilation = [], [], [], [], [], []

    #determine vertical strain from Agnew 2007
    #units are in sec squared, meaning results in mm
    # areal determine areal strain from Agnew 2007, units in mm
    #dilation from relationship defined using Harrison's code
    #WDD is used to recreate output from TSoft
    for i in range(len(t)):
        areal.append(t[i]*p*1E-5)
        potential.append(-318.49681664*t[i] - 0.50889238)
        WDD_tam.append(t[i]*(-.99362956469)-7.8749754)
        dilation.append(0.381611837*t[i] - 0.000609517)
        vert.append(t[i] * 1.692)
        Grav_tide.append(-1*t[i])

    #convert to excel date-time numeric format
    xls_date = []
    for i in range(len(d)):
        xls_date.append(float(julday[i])-2415018.5)

    xls_date2 = []
    for i in range(len(d2)):
        xls_date2.append(float(julday2[i])-2415018.5)

    xls_date3 = []
    for i in range(len(d3)):
        xls_date3.append(float(julday3[i])-2415018.5)

    t_start = xls_date[0]
    t_end = xls_date[-1]
    t_len = (len(xls_date))

    #align bp data with wl data
    t1 = numpy.linspace(t_start, t_end, t_len)
    bpint = numpy.interp(t1, xls_date2, bp)
    etint = numpy.interp(t1, xls_date3, SG33WDD)

    xprep, yprep, zprep = [], [], []
    #convert text from csv to float values
    for i in range(len(julday)):
        xprep.append(float(julday[i]))
        yprep.append(float(dilation[i]))
        zprep.append(float(wl[i]))

    #put data into numpy arrays for analysis
    xdata = numpy.array(xprep)
    ydata = numpy.array(yprep)
    zdata = numpy.array(zprep)
    bpdata = numpy.array(bpint)
    etdata = numpy.array(etint)
    bp = bpdata
    z = zdata
    y = ydata
    #    tempdata = numpy.array(tempint)
    #standarize julian day to start at zero

    x0data = xdata - xdata[0]

    wl_z = []
    mn = numpy.mean(z)
    std = numpy.std(z)
    for i in range(len(z)):
        wl_z.append((z[i]-mn)/std)

    bp_z = []
    mn = numpy.mean(bp)
    std = numpy.std(bp)
    for i in range(len(bp)):
        bp_z.append((bp[i]-mn)/std)

    t_z = []
    mn = numpy.mean(y)
    std = numpy.std(y)
    for i in range(len(y)):
        t_z.append((t[i]-mn)/std)

    dbp = []
    for i in range(len(bp)-1):
        dbp.append(bp[i]-bp[i+1])

    dwl = []
    for i in range(len(z)-1):
        dwl.append(z[i]-z[i+1])

    dt = []
    for i in range(len(y)-1):
        dt.append(y[i]-y[i+1])

    dbp.append(0)

    dwl.append(0)

    dt.append(0)


    ###########################################################################
    #
    ############################################################ Filter Signals
    #
    ###########################################################################
    ''' these filtered data are not necessary,
    but are good for graphical comparison '''
    ### define filtering function
    def filt(frq,tol,data):
        #define frequency tolerance range
        lowcut = (frq-frq*tol)
        highcut = (frq+frq*tol)
        #conduct fft
        ffta = fft.fft(data)
        bp2 = ffta[:]
        fftb = fft.fftfreq(len(bp2))
        #make every amplitude value 0 that is not in the tolerance range of frequency of interest
        #24 adjusts the frequency to cpd
        for i in range(len(fftb)):
            #spd is samples per day (if hourly = 24)
            if (fftb[i]*spd)>highcut or (fftb[i]*spd)<lowcut:
                bp2[i]=0
        #conduct inverse fft to transpose the filtered frequencies back into time series
        crve = fft.ifft(bp2)
        yfilt = crve.real
        return yfilt



    #filter tidal data
    yfilt_O1 = filt(O1,tol,ydata)
    yfilt_M2 = filt(M2,tol,ydata)
    #filter wl data
    zfilt_O1 = filt(O1,tol,zdata)
    zfilt_M2 = filt(M2,tol,zdata)

    zffta = abs(fft.fft(zdata))
    zfftb = abs(fft.fftfreq(len(zdata))*spd)



    def phasefind(A,frq):
        spectrum = fft.fft(A)
        freq = fft.fftfreq(len(spectrum))
        r = []
        #filter = eliminate all values in the wl data fft except the frequencies in the range of interest
        for i in range(len(freq)):
            #spd is samples per day (if hourly = 24)
            if (freq[i]*spd)>(frq-frq*tol) and (freq[i]*spd)<(frq+frq*tol):
                r.append(freq[i]*spd)
            else:
                r.append(0)
        #find the place of the max complex value for the filtered frequencies and return the complex number
        p = max(enumerate(r), key=itemgetter(1))[0]
        pla = spectrum[p]
        T5 = cmath.phase(pla)*180/pi
        return T5

    yphsO1 = phasefind(ydata,O1)
    zphsO1 = phasefind(zdata,O1)
    phsO1 = zphsO1 - yphsO1
    yphsM2 = phasefind(ydata,M2)
    zphsM2 = phasefind(zdata,M2)
    phsM2 = zphsM2 - yphsM2


#    def phase_find(A,B,P):
#        period = P
#        tmax = len(xdata)*24
#        nsamples = len(A)
#        # calculate cross correlation of the two signals
#        t6 = numpy.linspace(0.0, tmax, nsamples, endpoint=False)
#        xcorr = numpy.correlate(A, B)
#        # The peak of the cross-correlation gives the shift between the two signals
#        # The xcorr array goes from -nsamples to nsamples
#        dt6 = numpy.linspace(-t6[-1], t6[-1], 2*nsamples-1)
#        recovered_time_shift = dt6[xcorr.argmax()]
#
#        # force the phase shift to be in [-pi:pi]
#        #recovered_phase_shift = 2*pi*(((0.5 + recovered_time_shift/(period*24)) % 1.0) - 0.5)
#        return recovered_time_shift
#
#
#    O1_ph= phase_find(ydata,zdata,P_O1)
#    M2_ph= phase_find(ydata,zdata,P_M2)




    ###########################################################################
    #
    ####################################################### Regression Analysis
    #
    ###########################################################################

    #define functions used for least squares fitting
    def f3(p, x):
        #a,b,c = p
        m = 2.0 * O1 * pi
        y = p[0] + p[1] * (numpy.cos(m*x)) + p[2] * (numpy.sin(m*x))
        return y

    def f4(p, x):
        #a,b,c = p
        m =2.0 * M2 * pi
        y  = p[0] + p[1] * (numpy.cos(m*x)) + p[2] * (numpy.sin(m*x))
        return y

    #define functions to minimize
    def err3(p,y,x):
        return y - f3(p,x)

    def err4(p,y,x):
        return y - f4(p,x)

    #conducts regression, then calculates amplitude and phase angle
    def lstsq(func,y,x):
        #define starting values with x0
       x0 = numpy.array([sum(y)/float(len(y)), 0.01, 0.01])
       fit ,chks = optimization.leastsq(func, x0, args=(y, x))
       amp = math.sqrt((fit[1]**2)+(fit[2]**2))      #amplitude
       phi = numpy.arctan(-1*(fit[2],fit[1]))*(180/pi)   #phase angle
       return amp,phi,fit

    #water level signal regression
    WLO1 = lstsq(err3,zdata,xdata)
    WLM2 = lstsq(err4,zdata,xdata)

    #tide signal regression
    TdO1 = lstsq(err3,ydata,xdata)
    TdM2 = lstsq(err4,ydata,xdata)

    #calculate phase shift
    phase_sft_O1 = WLO1[1] - TdO1[1]
    phase_sft_M2 = WLM2[1] - TdM2[1]

    delt_O1 = (phase_sft_O1/(O1*360))*24
    delt_M2 = (phase_sft_M2/(M2*360))*24

    #determine tidal potential Cutillo and Bredehoeft 2010 pg 5 eq 4
    f_O1 = math.sin(float(lat[1])*pi/180)*math.cos(float(lat[1])*pi/180)
    f_M2 = 0.5*math.cos(float(lat[1])*pi/180)**2

    A2_M2 = g_ft*Km*b_M2*f_M2
    A2_O1 = g_ft*Km*b_O1*f_O1

    #Calculate ratio of head change to change in potential
    dW2_M2 = A2_M2/(WLM2[0])
    dW2_O1 = A2_O1/(WLO1[0])

    #estimate specific storage Cutillo and Bredehoeft 2010
    def SS(rat):
        return 6.95690250E-10*rat

    Ss_M2 = SS(dW2_M2)
    Ss_O1 = SS(dW2_O1)

    def curv(Y,P,r):
        rc = (r/12.0)*(r/12.0)
        Y = Y
        X = -1421.15/(0.215746 + Y) - 13.3401 - 0.000000143487*Y**4 - 9.58311E-16*Y**8*math.cos(0.9895 + Y + 1421.08/(0.215746 + Y) + 0.000000143487*Y**4)
        T = (X*rc)/P
        return T

    Trans_M2 = curv(phase_sft_O1,P_M2,r)
    Trans_O1 = curv(phase_sft_M2,P_O1,r)


    ###########################################################################
    #
    ############################################ Calculate BP Response Function
    #
    ###########################################################################

    # create lag matrix for regression
    bpmat = tools.lagmat(dbp, lag, original='in')
    etmat = tools.lagmat(dt, lag, original='in')
    #lamat combines lag matrices of bp and et
    lamat = numpy.column_stack([bpmat,etmat])
    #for i in range(len(etmat)):
    #    lagmat.append(bpmat[i]+etmat[i])
    #transpose matrix to determine required length
    #run least squared regression
    sqrd = numpy.linalg.lstsq(bpmat,dwl)
    #determine lag coefficients of the lag matrix lamat
    sqrdlag = numpy.linalg.lstsq(lamat,dwl)

    wlls = sqrd[0]
    #lagls return the coefficients of the least squares of lamat
    lagls = sqrdlag[0]

    cumls = numpy.cumsum(wlls)
    #returns cumulative coefficients of et and bp (lamat)
    lagcumls =numpy.cumsum(lagls)

    ymod = numpy.dot(bpmat,wlls)
    lagmod = numpy.dot(lamat,lagls)

    #resid gives the residual of the bp
    resid=[]
    for i in range(len(dwl)):
        resid.append(dwl[i] - ymod[i])
    #alpha returns the lag coefficients associated with bp
    alpha = lagls[0:len(lagls)/2]
    alpha_cum = numpy.cumsum(alpha)
    #gamma returns the lag coefficients associated with ET
    gamma = lagls[len(lagls)/2:len(lagls)]
    gamma_cum = numpy.cumsum(gamma)

    lag_time = []
    for i in range(len(xls_date)):
        lag_time.append((xls_date[i] - xls_date[0])*24)


    ######################################### determine slope of late time data
    lag_trim1 = lag_time[0:len(cumls)]
    lag_time_trim = lag_trim1[len(lag_trim1)-(len(lag_trim1)/2):len(lag_trim1)]
    alpha_trim = alpha_cum[len(lag_trim1)-(len(lag_trim1)/2):len(lag_trim1)]
    #calculate slope of late-time data
    lag_len = len(lag_time_trim)
    tran = numpy.array([lag_time_trim, numpy.ones(lag_len)])

    reg_late = numpy.linalg.lstsq(tran.T,alpha_trim)[0]
    late_line=[]
    for i in range(len(lag_trim1)):
        late_line.append(reg_late[0] * lag_trim1[i] + reg_late[1]) #regression line


    ######################################## determine slope of early time data
    lag_time_trim2 = lag_trim1[0:len(lag_trim1)-int(round((len(lag_trim1)/1.5),0))]
    alpha_trim2 = alpha_cum[0:len(lag_trim1)-int(round((len(lag_trim1)/1.5),0))]

    lag_len1 = len(lag_time_trim2)
    tran2 = numpy.array([lag_time_trim2, numpy.ones(lag_len1)])

    reg_early = numpy.linalg.lstsq(tran2.T,alpha_trim2)[0]
    early_line= []
    for i in range(len(lag_trim1)):
        early_line.append(reg_early[0] * lag_trim1[i] + reg_early[1]) #regression line

    aquifer_type = []
    if reg_early[0] > 0.001:
        aquifer_type = 'borehole storage'
    elif reg_early[0] < -0.001:
        aquifer_type = 'unconfined conditions'
    else:
        aquifer_type = 'confined conditions'


    ###########################################################################
    #
    ################################################################ Make Plots
    #
    ###########################################################################
    fig_1_lab = well_name + ' bp response function'
    fig_2_lab = well_name + ' signal processing'

    plt.figure(fig_1_lab)
    plt.suptitle(fig_1_lab, x= 0.2, y=.99, fontsize='small')
    plt.subplot(2,1,1)
    #plt.plot(lag_time[0:len(cumls)],cumls, label='b.p. alone')
    plt.plot(lag_time[0:len(cumls)],alpha_cum,"o", label='b.p. when \n considering e.t.')
    # plt.plot(lag_time[0:len(cumls)],gamma_cum, label='e.t.')
    plt.plot(lag_trim1, late_line, 'r-', label='late reg.')
    plt.plot(lag_trim1, early_line, 'g-', label='early reg.')
    plt.xlabel('lag (hr)')
    plt.ylabel('cumulative response function')
    plt.legend(loc=4,fontsize='small')
    plt.subplot(2,1,2)
    plt.plot(lag_time,dwl, label='wl', lw=2)
    plt.plot(lag_time,ymod, label='wl modeled w bp')
    plt.plot(lag_time,lagmod, 'r--', label='wl modeled w bp&et')
    plt.legend(loc=4,fontsize='small')
    plt.xlim(0,lag)
    plt.ylabel('change (ft)')
    plt.xlabel('time (hrs)')
    plt.tight_layout()
    plt.savefig('l'+ os.path.splitext(impfile)[0]+'.pdf')


    plt.figure(fig_2_lab)
    plt.suptitle(fig_2_lab, x=0.2, fontsize='small')
    plt.title(os.path.splitext(impfile)[0])
    plt.subplot(4,1,1)
    plt.xcorr(yfilt_O1,zfilt_O1,maxlags=10)
    plt.ylim(-1.1,1.1)
    plt.tick_params(labelsize=8)
    plt.xlabel('lag (hrs)',fontsize='small')
    plt.ylabel('lag (hrs)',fontsize='small')
    plt.title('Cross Correl O1',fontsize='small')
    plt.subplot(4,1,2)
    plt.xcorr(yfilt_M2,zfilt_M2,maxlags=10)
    plt.ylim(-1.1,1.1)
    plt.tick_params(labelsize=8)
    plt.xlabel('lag (hrs)',fontsize='small')
    plt.ylabel('lag (hrs)',fontsize='small')
    plt.title('Cross Correl M2',fontsize='small')
    plt.subplot(4,1,3)
    plt.plot(zfftb,zffta)
    plt.tick_params(labelsize=8)
    plt.xlabel('frequency (cpd)',fontsize='small')
    plt.ylabel('amplitude')
    plt.title('WL fft',fontsize='small')
    plt.xlim(0,4)
    plt.ylim(0,30)
    plt.subplot(4,1,4)
    plt.plot(x0data,zdata, 'b')
    plt.tick_params(labelsize=8)
    plt.xlabel('julian days',fontsize='small')
    plt.ylabel('water level (ft)',fontsize='small')
    plt.twinx()
    plt.plot(x0data,f3(WLO1[2],x0data), 'r')
    plt.plot(x0data,f4(WLM2[2],x0data), 'g')
    plt.tick_params(labelsize=8)
    plt.xlim(0,10)
    plt.ylabel('tidal strain (ppb)',fontsize='small')
    plt.tick_params(labelsize=8)
    plt.tight_layout()
    plt.title('Regression Fit',fontsize='small')
    plt.savefig('f'+ os.path.splitext(impfile)[0]+'.pdf')
    plt.close()

    ###########################################################################
    #Write output to files
    ###########################################################################

    # create row of data for compiled output file info.csv
    myCSVrow = [os.path.splitext(inpfile)[0],well_name, A2_O1, A2_M2, phase_sft_O1, phase_sft_M2, delt_O1,
                delt_M2, Trans_M2, Trans_O1, Ss_O1, Ss_M2, WLO1[1], TdO1[1], WLM2[1], TdM2[1],
                WLO1[0], TdO1[0], WLM2[0], TdM2[0], WLO1[2][1], TdO1[2][1], WLM2[2][1],
                TdM2[2][1], WLO1[2][2], TdO1[2][2], WLM2[2][2], TdM2[2][2], reg_late[1], reg_early[0], aquifer_type, phsO1, phsM2]
    # add data row to compiled output file
    compfile = open('info.csv', 'a')
    writer = csv.writer(compfile)
    writer.writerow(myCSVrow)
    compfile.close()


    #export tidal data to individual (well specific) output file
    with open(outfile, "wb") as f:
        filewriter = csv.writer(f, delimiter=',')
    #write header
        header = ['xl_time','date_time','V_ugal','vert_mm','areal_mm','WDD_tam','potential','dilation_ppb','wl_ft','dbp','dwl','resid','bp','Tsoft_SG23']
        filewriter.writerow(header)
        for row in range(0,1):
            for i in range(len(d)):
            #you can add more columns here
                filewriter.writerow([xls_date[i],d[i],Grav_tide[i],vert[i],areal[i],WDD_tam[i],potential[i],
                                     dilation[i],wl[i],dbp[i],dwl[i],resid[i],bp[i],etint[i]])