Ejemplo n.º 1
0
def _partial_regression(endog, exog_i, exog_others):
    """Partial regression.

    regress endog on exog_i conditional on exog_others

    uses OLS

    Parameters
    ----------
    endog : array_like
    exog : array_like
    exog_others : array_like

    Returns
    -------
    res1c : OLS results instance

    (res1a, res1b) : tuple of OLS results instances
         results from regression of endog on exog_others and of exog_i on
         exog_others

    """
    #FIXME: This function doesn't appear to be used.
    res1a = OLS(endog, exog_others).fit()
    res1b = OLS(exog_i, exog_others).fit()
    res1c = OLS(res1a.resid, res1b.resid).fit()

    return res1c, (res1a, res1b)
Ejemplo n.º 2
0
def reset_ramsey(res, degree=5):
    '''Ramsey's RESET specification test for linear models

    This is a general specification test, for additional non-linear effects
    in a model.


    Notes
    -----
    The test fits an auxilliary OLS regression where the design matrix, exog,
    is augmented by powers 2 to degree of the fitted values. Then it performs
    an F-test whether these additional terms are significant.

    If the p-value of the f-test is below a threshold, e.g. 0.1, then this
    indicates that there might be additional non-linear effects in the model
    and that the linear model is mis-specified.


    References
    ----------
    http://en.wikipedia.org/wiki/Ramsey_RESET_test

    '''
    order = degree + 1
    k_vars = res.model.exog.shape[1]
    #vander without constant and x:
    y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant
    exog = np.column_stack((res.model.exog, y_fitted_vander))
    res_aux = OLS(res.model.endog, exog).fit()
    #r_matrix = np.eye(degree, exog.shape[1], k_vars)
    r_matrix = np.eye(degree-1, exog.shape[1], k_vars)
    #df1 = degree - 1
    #df2 = exog.shape[0] - degree - res.df_model  (without constant)
    return res_aux.f_test(r_matrix) #, r_matrix, res_aux
Ejemplo n.º 3
0
def coint(y1, y2, regression="c"):
    """
    This is a simple cointegration test. Uses unit-root test on residuals to
    test for cointegrated relationship

    See Hamilton (1994) 19.2

    Parameters
    ----------
    y1 : array_like, 1d
        first element in cointegrating vector
    y2 : array_like
        remaining elements in cointegrating vector
    c : str {'c'}
        Included in regression
        * 'c' : Constant

    Returns
    -------
    coint_t : float
        t-statistic of unit-root test on residuals
    pvalue : float
        MacKinnon's approximate p-value based on MacKinnon (1994)
    crit_value : dict
        Critical values for the test statistic at the 1 %, 5 %, and 10 % levels.


    Notes
    -----
    The Null hypothesis is that there is no cointegration, the alternative
    hypothesis is that there is cointegrating relationship. If the pvalue is
    small, below a critical size, then we can reject the hypothesis that there
    is no cointegrating relationship.

    P-values are obtained through regression surface approximation from
    MacKinnon 1994.

    References
    ----------
    MacKinnon, J.G. 1994.  "Approximate asymptotic distribution functions for
        unit-root and cointegration tests.  `Journal of Business and Economic
        Statistics` 12, 167-76.

    """
    regression = regression.lower()
    if regression not in ['c', 'nc', 'ct', 'ctt']:
        raise ValueError("regression option %s not understood") % regression
    y1 = np.asarray(y1)
    y2 = np.asarray(y2)
    if regression == 'c':
        y2 = add_constant(y2)
    st1_resid = OLS(y1, y2).fit().resid  #stage one residuals
    lgresid_cons = add_constant(st1_resid[0:-1])
    uroot_reg = OLS(st1_resid[1:], lgresid_cons).fit()
    coint_t = (uroot_reg.params[0] - 1) / uroot_reg.bse[0]
    pvalue = mackinnonp(coint_t, regression="c", N=2, lags=None)
    crit_value = mackinnoncrit(N=1, regression="c", nobs=len(y1))
    return coint_t, pvalue, crit_value
Ejemplo n.º 4
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     res1 = OLS(data.endog, data.exog).fit()
     R = np.array([[0, 1, 1, 0, 0, 0, 0], [0, 1, 0, 1, 0, 0, 0],
                   [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0],
                   [0, 0, 0, 0, 0, 1, 0]])
     q = np.array([0, 0, 0, 1, 0])
     cls.Ftest1 = res1.f_test(R, q)
Ejemplo n.º 5
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     res1 = OLS(data.endog, data.exog).fit()
     R = np.array([[0,1,1,0,0,0,0],
           [0,1,0,1,0,0,0],
           [0,1,0,0,0,0,0],
           [0,0,0,0,1,0,0],
           [0,0,0,0,0,1,0]])
     q = np.array([0,0,0,1,0])
     cls.Ftest1 = res1.f_test(R,q)
Ejemplo n.º 6
0
    def setupClass(cls):
        from results.results_regression import Longley
        data = longley.load()
        data.exog = add_constant(data.exog)
        res1 = OLS(data.endog, data.exog).fit()
        res2 = Longley()
        res2.wresid = res1.wresid  # workaround hack
        cls.res1 = res1
        cls.res2 = res2

        res_qr = OLS(data.endog, data.exog).fit(method="qr")
        cls.res_qr = res_qr
Ejemplo n.º 7
0
def qqline(ax, line, x=None, y=None, dist=None, fmt='r-'):
    """
    Plot a reference line for a qqplot.

    Parameters
    ----------
    ax : matplotlib axes instance
        The axes on which to plot the line
    line : str {'45','r','s','q'}
        Options for the reference line to which the data is compared.
        '45' - 45-degree line
        's'  - standardized line, the expected order statistics are scaled by the
               standard deviation of the given sample and have the mean added to them
        'r'  - A regression line is fit
        'q'  - A line is fit through the quartiles.
        None - By default no reference line is added to the plot.
    x : array
        X data for plot. Not needed if line is '45'.
    y : array
        Y data for plot. Not needed if line is '45'.
    dist : scipy.stats.distribution
        A scipy.stats distribution, needed if line is 'q'.

    Notes
    -----
    There is no return value. The line is plotted on the given `ax`.
    """
    if line == '45':
        end_pts = zip(ax.get_xlim(), ax.get_ylim())
        end_pts[0] = max(end_pts[0])
        end_pts[1] = min(end_pts[1])
        ax.plot(end_pts, end_pts, fmt)
        return # does this have any side effects?
    if x is None and y is None:
        raise ValueError("If line is not 45, x and y cannot be None.")
    elif line == 'r':
        # could use ax.lines[0].get_xdata(), get_ydata(),
        # but don't know axes are 'clean'
        y = OLS(y, add_constant(x)).fit().fittedvalues
        ax.plot(x,y,fmt)
    elif line == 's':
        m,b = y.std(), y.mean()
        ref_line = x*m + b
        ax.plot(x, ref_line, fmt)
    elif line == 'q':
        q25 = stats.scoreatpercentile(y, 25)
        q75 = stats.scoreatpercentile(y, 75)
        theoretical_quartiles = dist.ppf([.25,.75])
        m = (q75 - q25) / np.diff(theoretical_quartiles)
        b = q25 - m*theoretical_quartiles[0]
        ax.plot(x, m*x + b, fmt)
Ejemplo n.º 8
0
def qqline(ax, line, x=None, y=None, dist=None, fmt='r-'):
    """
    Plot a reference line for a qqplot.

    Parameters
    ----------
    ax : matplotlib axes instance
        The axes on which to plot the line
    line : str {'45','r','s','q'}
        Options for the reference line to which the data is compared.
        '45' - 45-degree line
        's'  - standardized line, the expected order statistics are scaled by the
               standard deviation of the given sample and have the mean added to them
        'r'  - A regression line is fit
        'q'  - A line is fit through the quartiles.
        None - By default no reference line is added to the plot.
    x : array
        X data for plot. Not needed if line is '45'.
    y : array
        Y data for plot. Not needed if line is '45'.
    dist : scipy.stats.distribution
        A scipy.stats distribution, needed if line is 'q'.

    Notes
    -----
    There is no return value. The line is plotted on the given `ax`.
    """
    if line == '45':
        end_pts = zip(ax.get_xlim(), ax.get_ylim())
        end_pts[0] = max(end_pts[0])
        end_pts[1] = min(end_pts[1])
        ax.plot(end_pts, end_pts, fmt)
        return  # does this have any side effects?
    if x is None and y is None:
        raise ValueError("If line is not 45, x and y cannot be None.")
    elif line == 'r':
        # could use ax.lines[0].get_xdata(), get_ydata(),
        # but don't know axes are 'clean'
        y = OLS(y, add_constant(x)).fit().fittedvalues
        ax.plot(x, y, fmt)
    elif line == 's':
        m, b = y.std(), y.mean()
        ref_line = x * m + b
        ax.plot(x, ref_line, fmt)
    elif line == 'q':
        q25 = stats.scoreatpercentile(y, 25)
        q75 = stats.scoreatpercentile(y, 75)
        theoretical_quartiles = dist.ppf([.25, .75])
        m = (q75 - q25) / np.diff(theoretical_quartiles)
        b = q25 - m * theoretical_quartiles[0]
        ax.plot(x, m * x + b, fmt)
Ejemplo n.º 9
0
 def setupClass(cls):
     #        if skipR:
     #            raise SkipTest, "Rpy not installed"
     #        try:
     #            r.library('car')
     #        except RPyRException:
     #            raise SkipTest, "car library not installed for R"
     R = np.zeros(7)
     R[4:6] = [1, -1]
     #        self.R = R
     data = longley.load()
     data.exog = add_constant(data.exog)
     res1 = OLS(data.endog, data.exog).fit()
     cls.Ttest1 = res1.t_test(R)
Ejemplo n.º 10
0
    def setupClass(cls):
#        if skipR:
#            raise SkipTest, "Rpy not installed"
#        try:
#            r.library('car')
#        except RPyRException:
#            raise SkipTest, "car library not installed for R"
        R = np.zeros(7)
        R[4:6] = [1,-1]
#        self.R = R
        data = longley.load()
        data.exog = add_constant(data.exog)
        res1 = OLS(data.endog, data.exog).fit()
        cls.Ttest1 = res1.t_test(R)
Ejemplo n.º 11
0
    def __init__(self):
        super(self.__class__, self).__init__()  #initialize DGP

        nobs = self.nobs
        y_true, x, exog = self.y_true, self.x, self.exog

        np.random.seed(8765993)
        sigma_noise = 0.1
        y = y_true + sigma_noise * np.random.randn(nobs)

        m = AdditiveModel(x)
        m.fit(y)
        res_gam = m.results  #TODO: currently attached to class

        res_ols = OLS(y, exog).fit()

        #Note: there still are some naming inconsistencies
        self.res1 = res1 = Dummy()  #for gam model
        #res2 = Dummy() #for benchmark
        self.res2 = res2 = res_ols  #reuse existing ols results, will add additional

        res1.y_pred = res_gam.predict(x)
        res2.y_pred = res_ols.model.predict(res_ols.params, exog)
        res1.y_predshort = res_gam.predict(x[:10])

        slopes = [i for ss in m.smoothers for i in ss.params[1:]]

        const = res_gam.alpha + sum([ss.params[1] for ss in m.smoothers])
        #print const, slopes
        res1.params = np.array([const] + slopes)
Ejemplo n.º 12
0
 def setupClass(cls):
     np.random.seed(54321)
     cls.endog_n_ = np.random.uniform(0, 20, size=30)
     cls.endog_n_one = cls.endog_n_[:, None]
     cls.exog_n_ = np.random.uniform(0, 20, size=30)
     cls.exog_n_one = cls.exog_n_[:, None]
     cls.degen_exog = cls.exog_n_one[:-1]
     cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
     cls.mod1.df_model += 1
     #cls.mod1.df_resid -= 1
     cls.res1 = cls.mod1.fit()
     # Note that these are created for every subclass..
     # A little extra overhead probably
     cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
     cls.mod2.df_model += 1
     cls.res2 = cls.mod2.fit()
Ejemplo n.º 13
0
def pacf_ols(x, nlags=40):
    '''Calculate partial autocorrelations

    Parameters
    ----------
    x : 1d array
        observations of time series for which pacf is calculated
    nlags : int
        Number of lags for which pacf is returned.  Lag 0 is not returned.

    Returns
    -------
    pacf : 1d array
        partial autocorrelations, maxlag+1 elements

    Notes
    -----
    This solves a separate OLS estimation for each desired lag.
    '''
    #TODO: add warnings for Yule-Walker
    #NOTE: demeaning and not using a constant gave incorrect answers?
    #JP: demeaning should have a better estimate of the constant
    #maybe we can compare small sample properties with a MonteCarlo
    xlags, x0 = lagmat(x, nlags, original='sep')
    #xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
    xlags = add_constant(xlags, prepend=True)
    pacf = [1.]
    for k in range(1, nlags + 1):
        res = OLS(x0[k:], xlags[k:, :k + 1]).fit()
        #np.take(xlags[k:], range(1,k+1)+[-1],

        pacf.append(res.params[-1])
    return np.array(pacf)
Ejemplo n.º 14
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     ols_res = OLS(data.endog, data.exog).fit()
     gls_res = GLS(data.endog, data.exog).fit()
     cls.res1 = gls_res
     cls.res2 = ols_res
Ejemplo n.º 15
0
    def fitbygroups(self):
        '''Fit OLS regression for each group separately.

        Returns
        -------
        results are attached

        olsbygroup : dictionary of result instance
            the returned regression results for each group
        sigmabygroup : array (ngroups,) (this should be called sigma2group ??? check)
            mse_resid for each group
        weights : array (nobs,)
            standard deviation of group extended to the original observations. This can
            be used as weights in WLS for group-wise heteroscedasticity.



        '''
        olsbygroup = {}
        sigmabygroup = []
        for gi, group in enumerate(
                self.unique):  #np.arange(len(self.unique))):
            groupmask = self.groupsint == gi  #group index
            res = OLS(self.endog[groupmask], self.exog[groupmask]).fit()
            olsbygroup[group] = res
            sigmabygroup.append(res.mse_resid)
        self.olsbygroup = olsbygroup
        self.sigmabygroup = np.array(sigmabygroup)
        self.weights = np.sqrt(
            self.sigmabygroup[self.groupsint])  #TODO:chk sqrt
Ejemplo n.º 16
0
    def spec_hausman(self, dof=None):
        '''Hausman's specification test


        See Also
        --------
        spec_hausman : generic function for Hausman's specification test

        '''
        #use normalized cov_params for OLS

        resols = OLS(endog, exog).fit()
        normalized_cov_params_ols = resols.model.normalized_cov_params
        se2 = resols.mse_resid

        params_diff = self._results.params - resols.params

        cov_diff = np.linalg.pinv(self.xhatprod) - normalized_cov_params_ols
        #TODO: the following is very inefficient, solves problem (svd) twice
        #use linalg.lstsq or svd directly
        #cov_diff will very often be in-definite (singular)
        if not dof:
            dof = tools.rank(cov_diff)
        cov_diffpinv = np.linalg.pinv(cov_diff)
        H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))/se2
        pval = stats.chi2.sf(H, dof)

        return H, pval, dof
Ejemplo n.º 17
0
def plot_partregress_ax(endog,
                        exog_i,
                        exog_others,
                        varname='',
                        title_fontsize=None,
                        ax=None):
    """Plot partial regression for a single regressor.

    Parameters
    ----------
    endog : ndarray
       endogenous or response variable
    exog_i : ndarray
        exogenous, explanatory variable
    exog_others : ndarray
        other exogenous, explanatory variables, the effect of these variables
        will be removed by OLS regression
    varname : str
        name of the variable used in the title
    ax : Matplotlib AxesSubplot instance, optional
        If given, this subplot is used to plot in instead of a new figure being
        created.

    Return
    ------
    fig : Matplotlib figure instance
        If `ax` is None, the created figure.  Otherwise the figure to which
        `ax` is connected.

    See Also
    --------
    plot_partregress : Plot partial regression for a set of regressors.

    """
    fig, ax = utils.create_mpl_ax(ax)

    res1a = OLS(endog, exog_others).fit()
    res1b = OLS(exog_i, exog_others).fit()
    ax.plot(res1b.resid, res1a.resid, 'o')
    res1c = OLS(res1a.resid, res1b.resid).fit()
    ax.plot(res1b.resid, res1c.fittedvalues, '-', color='k')
    ax.set_title('Partial Regression plot %s' % varname,
                 fontsize=title_fontsize)  # + namestr)

    return fig
Ejemplo n.º 18
0
 def fitpooled(self):
     '''fit the pooled model, which assumes there are no differences across groups
     '''
     if self.het:
         if not hasattr(self, 'weights'):
             self.fitbygroups()
         weights = self.weights
         res = WLS(self.endog, self.exog, weights=weights).fit()
     else:
         res = OLS(self.endog, self.exog).fit()
     self.lspooled = res
Ejemplo n.º 19
0
    def __init__(self):
        super(self.__class__, self).__init__() #initialize DGP

        y, x, exog = self.y, self.x, self.exog

        #use order = 2 in regression
        pmod = smoothers.PolySmoother(2, x)
        pmod.fit(y)  #no return

        self.res_ps = pmod
        self.res2 = OLS(y, exog[:,:2+1]).fit()
Ejemplo n.º 20
0
    def __init__(self):
        super(self.__class__, self).__init__() #initialize DGP

        y, x, exog = self.y, self.x, self.exog

        #use order = 3 in regression
        pmod = smoothers.PolySmoother(3, x)
        #pmod.fit(y)  #no return
        pmod.smooth(y)  #no return, use alias for fit

        self.res_ps = pmod
        self.res2 = OLS(y, exog[:,:3+1]).fit()
Ejemplo n.º 21
0
    def fit(self):
        alpha0 = 0.1  #startvalue
        func = self.fit_conditional
        fitres = optimize.fmin(func, alpha0)

        # fit_conditional only returns ssr for now
        alpha = fitres[0]
        y = self.ar1filter(self.endog, alpha)
        x = self.ar1filter(self.exog, alpha)
        reso = OLS(y, x).fit()

        return fitres, reso
Ejemplo n.º 22
0
    def setupClass(cls):
        from results.results_regression import LongleyGls

        data = longley.load()
        exog = add_constant(np.column_stack(\
                (data.exog[:,1],data.exog[:,4])))
        tmp_results = OLS(data.endog, exog).fit()
        rho = np.corrcoef(tmp_results.resid[1:],
                          tmp_results.resid[:-1])[0][1]  # by assumption
        order = toeplitz(np.arange(16))
        sigma = rho**order
        GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
        cls.res1 = GLS_results
        cls.res2 = LongleyGls()
Ejemplo n.º 23
0
 def setupClass(cls):
     super(TestNxNxOne, cls).setupClass()
     cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
     cls.mod2.df_model += 1
     cls.res2 = cls.mod2.fit()
Ejemplo n.º 24
0
    def fit(self,
            maxlag=None,
            method='cmle',
            ic=None,
            trend='c',
            transparams=True,
            start_params=None,
            solver=None,
            maxiter=35,
            full_output=1,
            disp=1,
            callback=None,
            **kwargs):
        """
        Fit the unconditional maximum likelihood of an AR(p) process.

        Parameters
        ----------
        maxlag : int
            If `ic` is None, then maxlag is the lag length used in fit.  If
            `ic` is specified then maxlag is the highest lag order used to
            select the correct lag order.  If maxlag is None, the default is
            round(12*(nobs/100.)**(1/4.))
        method : str {'cmle', 'mle'}, optional
            cmle - Conditional maximum likelihood using OLS
            mle - Unconditional (exact) maximum likelihood.  See `solver`
            and the Notes.
        ic : str {'aic','bic','hic','t-stat'}
            Criterion used for selecting the optimal lag length.
            aic - Akaike Information Criterion
            bic - Bayes Information Criterion
            t-stat - Based on last lag
            hq - Hannan-Quinn Information Criterion
            If any of the information criteria are selected, the lag length
            which results in the lowest value is selected.  If t-stat, the
            model starts with maxlag and drops a lag until the highest lag
            has a t-stat that is significant at the 95 % level.
        trend : str {'c','nc'}
            Whether to include a constant or not. 'c' - include constant.
            'nc' - no constant.

        The below can be specified if method is 'mle'

        transparams : bool, optional
            Whether or not to transform the parameters to ensure stationarity.
            Uses the transformation suggested in Jones (1980).
        start_params : array-like, optional
            A first guess on the parameters.  Default is cmle estimates.
        solver : str or None, optional
            Solver to be used.  The default is 'l_bfgs' (limited memory Broyden-
            Fletcher-Goldfarb-Shanno).  Other choices are 'bfgs', 'newton'
            (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
            'ncg' (non-conjugate gradient), and 'powell'.
            The limited memory BFGS uses m=30 to approximate the Hessian,
            projected gradient tolerance of 1e-7 and factr = 1e3.  These
            cannot currently be changed for l_bfgs.  See notes for more
            information.
        maxiter : int, optional
            The maximum number of function evaluations. Default is 35.
        tol : float
            The convergence tolerance.  Default is 1e-08.
        full_output : bool, optional
            If True, all output from solver will be available in
            the Results object's mle_retvals attribute.  Output is dependent
            on the solver.  See Notes for more information.
        disp : bool, optional
            If True, convergence information is output.
        callback : function, optional
            Called after each iteration as callback(xk) where xk is the current
            parameter vector.
        kwargs
            See Notes for keyword arguments that can be passed to fit.

        References
        ----------
        Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
            series with missing observations."  `Technometrics`.  22.3.
            389-95.

        See also
        --------
        scikits.statsmodels.model.LikelihoodModel.fit for more information
        on using the solvers.

        Notes
        ------
        The below is the docstring from
        scikits.statsmodels.LikelihoodModel.fit
        """
        self.transparams = transparams
        method = method.lower()
        if method not in ['cmle', 'yw', 'mle']:
            raise ValueError("Method %s not recognized" % method)
        self.method = method
        nobs = len(self.endog)  # overwritten if method is 'cmle'
        if maxlag is None:
            maxlag = int(round(12 * (nobs / 100.)**(1 / 4.)))

        endog = self.endog
        exog = self.exog
        k_ar = maxlag  # stays this if ic is None

        # select lag length
        if ic is not None:
            ic = ic.lower()
            if ic not in ['aic', 'bic', 'hqic', 't-stat']:
                raise ValueError("ic option %s not understood" % ic)
            # make Y and X with same nobs to compare ICs
            Y = endog[maxlag:]
            self.Y = Y  # attach to get correct fit stats
            X = self._stackX(maxlag, trend)  # sets k_trend
            self.X = X
            startlag = self.k_trend  # k_trend set in _stackX
            if exog is not None:
                startlag += exog.shape[1]  # add dim happens in super?
            startlag = max(1, startlag)  # handle if startlag is 0
            results = {}
            if ic != 't-stat':
                for lag in range(startlag, maxlag + 1):
                    # have to reinstantiate the model to keep comparable models
                    endog_tmp = endog[maxlag - lag:]
                    fit = AR(endog_tmp).fit(maxlag=lag,
                                            method=method,
                                            full_output=full_output,
                                            trend=trend,
                                            maxiter=maxiter,
                                            disp=disp)
                    results[lag] = eval('fit.' + ic)
                bestic, bestlag = min(
                    (res, k) for k, res in results.iteritems())
            else:  # choose by last t-stat.
                stop = 1.6448536269514722  # for t-stat, norm.ppf(.95)
                for lag in range(maxlag, startlag - 1, -1):
                    # have to reinstantiate the model to keep comparable models
                    endog_tmp = endog[maxlag - lag:]
                    fit = AR(endog_tmp).fit(maxlag=lag,
                                            method=method,
                                            full_output=full_output,
                                            trend=trend,
                                            maxiter=maxiter,
                                            disp=disp)
                    if np.abs(fit.tvalues[-1]) >= stop:
                        bestlag = lag
                        break
            k_ar = bestlag

        # change to what was chosen by fit method
        self.k_ar = k_ar

        # redo estimation for best lag
        # make LHS
        Y = endog[k_ar:, :]
        # make lagged RHS
        X = self._stackX(k_ar, trend)  # sets self.k_trend
        k_trend = self.k_trend
        self.Y = Y
        self.X = X

        if solver:
            solver = solver.lower()
        if method == "cmle":  # do OLS
            arfit = OLS(Y, X).fit()
            params = arfit.params
            self.nobs = nobs - k_ar
        if method == "mle":
            self.nobs = nobs
            if not start_params:
                start_params = OLS(Y, X).fit().params
                start_params = self._invtransparams(start_params)
            loglike = lambda params: -self.loglike(params)
            if solver == None:  # use limited memory bfgs
                bounds = [(None, ) * 2] * (k_ar + k_trend)
                mlefit = optimize.fmin_l_bfgs_b(loglike,
                                                start_params,
                                                approx_grad=True,
                                                m=30,
                                                pgtol=1e-7,
                                                factr=1e3,
                                                bounds=bounds,
                                                iprint=1)
                self.mlefit = mlefit
                params = mlefit[0]
            else:
                mlefit = super(AR, self).fit(start_params=start_params,
                                             method=solver,
                                             maxiter=maxiter,
                                             full_output=full_output,
                                             disp=disp,
                                             callback=callback,
                                             **kwargs)
                self.mlefit = mlefit
                params = mlefit.params
            if self.transparams:
                params = self._transparams(params)
                self.transparams = False  # turn off now for other results


# don't use yw, because we can't estimate the constant
#        elif method == "yw":
#            params, omega = yule_walker(endog, order=maxlag,
#                    method="mle", demean=False)
# how to handle inference after Yule-Walker?
#            self.params = params #TODO: don't attach here
#            self.omega = omega

        pinv_exog = np.linalg.pinv(X)
        normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
        arfit = ARResults(self, params, normalized_cov_params)
        self._results = arfit
        return arfit
Ejemplo n.º 25
0

        #using GMM and IV2SLS classes
        #----------------------------

        mod = IVGMM(endog, exog, instrument, nmoms=instrument.shape[1])
        res = mod.fit()
        modgmmols = IVGMM(endog, exog, exog, nmoms=exog.shape[1])
        resgmmols = modgmmols.fit()
        #the next is the same as IV2SLS, (Z'Z)^{-1} as weighting matrix
        modgmmiv = IVGMM(endog, exog, instrument, nmoms=instrument.shape[1]) #same as mod
        resgmmiv = modgmmiv.fitgmm(np.ones(exog.shape[1], float),
                        weights=np.linalg.inv(np.dot(instrument.T, instrument)))
        modls = IV2SLS(endog, exog, instrument)
        resls = modls.fit()
        modols = OLS(endog, exog)
        resols = modols.fit()

        print '\nIV case'
        print 'params'
        print 'IV2SLS', resls.params
        print 'GMMIV ', resgmmiv # .params
        print 'GMM   ', res.params
        print 'diff  ', res.params - resls.params
        print 'OLS   ', resols.params
        print 'GMMOLS', resgmmols.params

        print '\nbse'
        print 'IV2SLS', resls.bse
        print 'GMM   ', mod.bse   #bse currently only attached to model not results
        print 'diff  ', mod.bse - resls.bse
Ejemplo n.º 26
0
    def fitjoint(self):
        '''fit a joint fixed effects model to all observations

        The regression results are attached as `lsjoint`.

        The contrasts for overall and pairwise tests for equality of coefficients are
        attached as a dictionary `contrasts`. This also includes the contrasts for the test
        that the coefficients of a level are zero. ::

        >>> res.contrasts.keys()
        [(0, 1), 1, 'all', 3, (1, 2), 2, (1, 3), (2, 3), (0, 3), (0, 2)]

        The keys are based on the original names or labels of the groups.

        TODO: keys can be numpy scalars and then the keys cannot be sorted



        '''
        if not hasattr(self, 'weights'):
            self.fitbygroups()
        groupdummy = (self.groupsint[:, None] == self.uniqueint).astype(int)
        #order of dummy variables by variable - not used
        #dummyexog = self.exog[:,:,None]*groupdummy[:,None,1:]
        #order of dummy variables by grous - used
        dummyexog = self.exog[:, None, :] * groupdummy[:, 1:, None]
        exog = np.c_[self.exog,
                     dummyexog.reshape(self.exog.shape[0], -1)]  #self.nobs ??
        #Notes: I changed to drop first group from dummy
        #instead I want one full set dummies
        if self.het:
            weights = self.weights
            res = WLS(self.endog, exog, weights=weights).fit()
        else:
            res = OLS(self.endog, exog).fit()
        self.lsjoint = res
        contrasts = {}
        nvars = self.exog.shape[1]
        nparams = exog.shape[1]
        ndummies = nparams - nvars
        contrasts['all'] = np.c_[np.zeros((ndummies, nvars)), np.eye(ndummies)]
        for groupind, group in enumerate(
                self.unique[1:]):  #need enumerate if groups != groupsint
            groupind = groupind + 1
            contr = np.zeros((nvars, nparams))
            contr[:, nvars * groupind:nvars * (groupind + 1)] = np.eye(nvars)
            contrasts[group] = contr
            #save also for pairs, see next
            contrasts[(self.unique[0], group)] = contr

        #Note: I'm keeping some duplication for testing
        pairs = np.triu_indices(len(self.unique), 1)
        for ind1, ind2 in zip(
                *pairs):  #replace with group1, group2 in sorted(keys)
            if ind1 == 0:
                continue  # need comparison with benchmark/normalization group separate
            g1 = self.unique[ind1]
            g2 = self.unique[ind2]
            group = (g1, g2)
            contr = np.zeros((nvars, nparams))
            contr[:, nvars * ind1:nvars * (ind1 + 1)] = np.eye(nvars)
            contr[:, nvars * ind2:nvars * (ind2 + 1)] = -np.eye(nvars)
            contrasts[group] = contr

        self.contrasts = contrasts
Ejemplo n.º 27
0
def adfuller(x,
             maxlag=None,
             regression="c",
             autolag='AIC',
             store=False,
             regresults=False):
    '''Augmented Dickey-Fuller unit root test

    The Augmented Dickey-Fuller test can be used to test for a unit root in a
    univariate process in the presence of serial correlation.

    Parameters
    ----------
    x : array_like, 1d
        data series
    maxlag : int
        Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
    regression : str {'c','ct','ctt','nc'}
        Constant and trend order to include in regression
        * 'c' : constant only
        * 'ct' : constant and trend
        * 'ctt' : constant, and linear and quadratic trend
        * 'nc' : no constant, no trend
    autolag : {'AIC', 'BIC', 't-stat', None}
        * if None, then maxlag lags are used
        * if 'AIC' or 'BIC', then the number of lags is chosen to minimize the
          corresponding information criterium
        * 't-stat' based choice of maxlag.  Starts with maxlag and drops a
          lag until the t-statistic on the last lag length is significant at
          the 95 % level.
    store : bool
        If True, then a result instance is returned additionally to
        the adf statistic
    regresults : bool
        If True, the full regression results are returned.

    Returns
    -------
    adf : float
        Test statistic
    pvalue : float
        MacKinnon's approximate p-value based on MacKinnon (1994)
    usedlag : int
        Number of lags used.
    nobs : int
        Number of observations used for the ADF regression and calculation of
        the critical values.
    critical values : dict
        Critical values for the test statistic at the 1 %, 5 %, and 10 % levels.
        Based on MacKinnon (2010)
    icbest : float
        The maximized information criterion if autolag is not None.
    regresults : RegressionResults instance
        The
    resstore : (optional) instance of ResultStore
        an instance of a dummy class with results attached as attributes

    Notes
    -----
    The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
    root, with the alternative that there is no unit root. If the pvalue is
    above a critical size, then we cannot reject that there is a unit root.

    The p-values are obtained through regression surface approximation from
    MacKinnon 1994, but using the updated 2010 tables.
    If the p-value is close to significant, then the critical values should be
    used to judge whether to accept or reject the null.

    Examples
    --------
    see example script

    References
    ----------
    Greene
    Hamilton


    P-Values (regression surface approximation)
    MacKinnon, J.G. 1994.  "Approximate asymptotic distribution functions for
    unit-root and cointegration tests.  `Journal of Business and Economic
    Statistics` 12, 167-76.

    Critical values
    MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."  Queen's
    University, Dept of Economics, Working Papers.  Available at
    http://ideas.repec.org/p/qed/wpaper/1227.html
    '''
    trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
    if regression is None or isinstance(regression, int):
        regression = trenddict[regression]
    regression = regression.lower()
    if regression not in ['c', 'nc', 'ct', 'ctt']:
        raise ValueError("regression option %s not understood") % regression
    x = np.asarray(x)
    nobs = x.shape[0]

    if maxlag is None:
        #from Greene referencing Schwert 1989
        maxlag = int(round(12. * np.power(nobs / 100., 1 / 4.)))

    xdiff = np.diff(x)
    xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
    nobs = xdall.shape[0]

    xdall[:, 0] = x[-nobs - 1:-1]  # replace 0 xdiff with level of x
    xdshort = xdiff[-nobs:]

    if store:
        resstore = ResultsStore()
    if autolag:
        if regression != 'nc':
            fullRHS = add_trend(xdall, regression, prepend=True)
        else:
            fullRHS = xdall
        startlag = fullRHS.shape[1] - xdall.shape[1] + 1  # 1 for level

        #search for lag length with highest information criteria
        #Note: use the same number of observations to have comparable IC
        icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag, maxlag,
                                   autolag)

        #rerun ols with best autolag
        xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
        nobs = xdall.shape[0]
        xdall[:, 0] = x[-nobs - 1:-1]  # replace 0 xdiff with level of x
        xdshort = xdiff[-nobs:]
        usedlag = bestlag
    else:
        usedlag = maxlag
        icbest = None
    if regression != 'nc':
        resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
                                        regression)).fit()
    else:
        resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
    adfstat = resols.tvalues[0]
    #    adfstat = (resols.params[0]-1.0)/resols.bse[0]
    # the "asymptotically correct" z statistic is obtained as
    # nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
    # I think this is the statistic that is used for series that are integrated
    # for orders higher than I(1), ie., not ADF but cointegration tests.

    # Get approx p-value and critical values
    pvalue = mackinnonp(adfstat, regression=regression, N=1)
    critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
    critvalues = {
        "1%": critvalues[0],
        "5%": critvalues[1],
        "10%": critvalues[2]
    }
    if store:
        resstore.resols = resols
        resstore.usedlag = usedlag
        resstore.adfstat = adfstat
        resstore.critvalues = critvalues
        resstore.nobs = nobs
        resstore.H0 = "The coefficient on the lagged level equals 1"
        resstore.HA = "The coefficient on the lagged level < 1"
        resstore.icbest = icbest
        return adfstat, pvalue, critvalues, resstore
    else:
        if not autolag:
            return adfstat, pvalue, usedlag, nobs, critvalues
        else:
            return adfstat, pvalue, usedlag, nobs, critvalues, icbest
Ejemplo n.º 28
0
nobs = 100
lb, ub = -1, 2
x = np.linspace(lb, ub, nobs)
x = np.sin(x)
exog = x[:, None]**np.arange(order + 1)
y_true = exog.sum(1)
y = y_true + sigma_noise * np.random.randn(nobs)

#xind = np.argsort(x)
pmod = smoothers.PolySmoother(2, x)
pmod.fit(y)  #no return
y_pred = pmod.predict(x)
error = y - y_pred
mse = (error * error).mean()
print mse
res_ols = OLS(y, exog[:, :3]).fit()
print np.squeeze(pmod.coef) - res_ols.params

weights = np.ones(nobs)
weights[:nobs // 3] = 0.1
weights[-nobs // 5:] = 2

pmodw = smoothers.PolySmoother(2, x)
pmodw.fit(y, weights=weights)  #no return
y_predw = pmodw.predict(x)
error = y - y_predw
mse = (error * error).mean()
print mse
res_wls = WLS(y, exog[:, :3], weights=weights).fit()
print np.squeeze(pmodw.coef) - res_wls.params
Ejemplo n.º 29
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     res1 = OLS(data.endog, data.exog).fit()
     R2 = [[0, 1, -1, 0, 0, 0, 0], [0, 0, 0, 0, 1, -1, 0]]
     cls.Ftest1 = res1.f_test(R2)
Ejemplo n.º 30
0
 def fit_conditional(self, alpha):
     y = self.ar1filter(self.endog, alpha)
     x = self.ar1filter(self.exog, alpha)
     res = OLS(y, x).fit()
     return res.ssr  #res.llf
Ejemplo n.º 31
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     res1 = OLS(data.endog, data.exog).fit()
     R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
     cls.Ftest1 = res1.f_test(R2)
Ejemplo n.º 32
0
z = y_true  #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)

example = 1

if example == 1:
    m = AdditiveModel(d)
    m.fit(y)

    y_pred = m.results.predict(d)

for ss in m.smoothers:
    print ss.params

res_ols = OLS(y, exog_reduced).fit()
print res_ols.params

#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)

if example > 0:
    import matplotlib.pyplot as plt

    plt.figure()
    plt.plot(exog)

    y_pred = m.results.mu  # + m.results.alpha #m.results.predict(d)
    plt.figure()
    plt.subplot(2, 2, 1)
    plt.plot(y, '.', alpha=0.25)
    plt.plot(y_true, 'k-', label='true')
Ejemplo n.º 33
0
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
    '''four tests for granger causality of 2 timeseries

    all four tests give similar results
    `params_ftest` and `ssr_ftest` are equivalent based of F test which is
    identical to lmtest:grangertest in R

    Parameters
    ----------
    x : array, 2d, (nobs,2)
        data for test whether the time series in the second column Granger
        causes the time series in the first column
    maxlag : integer
        the Granger causality test results are calculated for all lags up to
        maxlag
    verbose : bool
        print results if true

    Returns
    -------
    results : dictionary
        all test results, dictionary keys are the number of lags. For each
        lag the values are a tuple, with the first element a dictionary with
        teststatistic, pvalues, degrees of freedom, the second element are
        the OLS estimation results for the restricted model, the unrestricted
        model and the restriction (contrast) matrix for the parameter f_test.

    Notes
    -----
    TODO: convert to class and attach results properly

    The Null hypothesis for grangercausalitytests is that the time series in
    the second column, x2, Granger causes the time series in the first column,
    x1. This means that past values of x2 have a statistically significant
    effect on the current value of x1, taking also past values of x1 into
    account, as regressors. We reject the null hypothesis of x2 Granger
    causing x1 if the pvalues are below a desired size of the test.

    'params_ftest', 'ssr_ftest' are based on F test

    'ssr_chi2test', 'lrtest' are based on chi-square test

    '''
    from scipy import stats  # lazy import

    resli = {}

    for mlg in range(1, maxlag + 1):
        result = {}
        if verbose:
            print '\nGranger Causality'
            print 'number of lags (no zero)', mlg
        mxlg = mlg  #+ 1 # Note number of lags starting at zero in lagmat

        # create lagmat of both time series
        dta = lagmat2ds(x, mxlg, trim='both', dropex=1)

        #add constant
        if addconst:
            dtaown = add_constant(dta[:, 1:mxlg + 1])
            dtajoint = add_constant(dta[:, 1:])
        else:
            raise ValueError('Not Implemented')
            dtaown = dta[:, 1:mxlg]
            dtajoint = dta[:, 1:]

        #run ols on both models without and with lags of second variable
        res2down = OLS(dta[:, 0], dtaown).fit()
        res2djoint = OLS(dta[:, 0], dtajoint).fit()

        #print results
        #for ssr based tests see: http://support.sas.com/rnd/app/examples/ets/granger/index.htm
        #the other tests are made-up

        # Granger Causality test using ssr (F statistic)
        fgc1 = (res2down.ssr -
                res2djoint.ssr) / res2djoint.ssr / (mxlg) * res2djoint.df_resid
        if verbose:
            print 'ssr based F test:         F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % \
              (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)
        result['ssr_ftest'] = (fgc1, stats.f.sf(fgc1, mxlg,
                                                res2djoint.df_resid),
                               res2djoint.df_resid, mxlg)

        # Granger Causality test using ssr (ch2 statistic)
        fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
        if verbose:
            print 'ssr based chi2 test:   chi2=%-8.4f, p=%-8.4f, df=%d' %  \
              (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
        result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)

        #likelihood ratio test pvalue:
        lr = -2 * (res2down.llf - res2djoint.llf)
        if verbose:
            print 'likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %  \
              (lr, stats.chi2.sf(lr, mxlg), mxlg)
        result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)

        # F test that all lag coefficients of exog are zero
        rconstr = np.column_stack((np.zeros((mxlg-1,mxlg-1)), np.eye(mxlg-1, mxlg-1),\
                                   np.zeros((mxlg-1, 1))))
        rconstr = np.column_stack((np.zeros((mxlg,mxlg)), np.eye(mxlg, mxlg),\
                                   np.zeros((mxlg, 1))))
        ftres = res2djoint.f_test(rconstr)
        if verbose:
            print 'parameter F test:         F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % \
              (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)
        result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
                                  np.squeeze(ftres.pvalue)[()], ftres.df_denom,
                                  ftres.df_num)

        resli[mxlg] = (result, [res2down, res2djoint, rconstr])

    return resli
Ejemplo n.º 34
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     cls.res1 = OLS(data.endog, data.exog).fit()
     cls.res2 = WLS(data.endog, data.exog).fit()
Ejemplo n.º 35
0
 def setupClass(cls):
     data = longley.load()
     data.exog = add_constant(data.exog)
     cls.res1 = OLS(data.endog, data.exog).fit()
     R = np.identity(7)
     cls.Ttest = cls.res1.t_test(R)
Ejemplo n.º 36
0
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
    '''four tests for granger causality of 2 timeseries

    all four tests give similar results
    `params_ftest` and `ssr_ftest` are equivalent based of F test which is identical to
    lmtest:grangertest in R

    Parameters
    ----------
    x : array, 2d, (nobs,2)
        data for test whether the time series in the second column Granger
        causes the time series in the first column
    maxlag : integer
        the Granger causality test results are calculated for all lags up to
        maxlag
    verbose : bool
        print results if true

    Returns
    -------
    results : dictionary
        all test results, dictionary keys are the number of lags. For each
        lag the values are a tuple, with the first element a dictionary with
        teststatistic, pvalues, degrees of freedom, the second element are
        the OLS estimation results for the restricted model, the unrestricted
        model and the restriction (contrast) matrix for the parameter f_test.

    Notes
    -----
    TODO: convert to class and attach results properly

    'params_ftest', 'ssr_ftest' are based on F test

    'ssr_chi2test', 'lrtest' are based on chi-square test

    '''
    from scipy import stats # lazy import

    resli = {}

    for mlg in range(1, maxlag+1):
        result = {}
        if verbose:
            print '\nGranger Causality'
            print 'number of lags (no zero)', mlg
        mxlg = mlg #+ 1 # Note number of lags starting at zero in lagmat

        # create lagmat of both time series
        dta = lagmat2ds(x, mxlg, trim='both', dropex=1)

        #add constant
        if addconst:
            dtaown = add_constant(dta[:,1:mxlg+1])
            dtajoint = add_constant(dta[:,1:])
        else:
            raise ValueError('Not Implemented')
            dtaown = dta[:,1:mxlg]
            dtajoint = dta[:,1:]

        #run ols on both models without and with lags of second variable
        res2down = OLS(dta[:,0], dtaown).fit()
        res2djoint = OLS(dta[:,0], dtajoint).fit()

        #print results
        #for ssr based tests see: http://support.sas.com/rnd/app/examples/ets/granger/index.htm
        #the other tests are made-up

        # Granger Causality test using ssr (F statistic)
        fgc1 = (res2down.ssr-res2djoint.ssr)/res2djoint.ssr/(mxlg)*res2djoint.df_resid
        if verbose:
            print 'ssr based F test:         F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % \
              (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)
        result['ssr_ftest'] = (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)

        # Granger Causality test using ssr (ch2 statistic)
        fgc2 = res2down.nobs*(res2down.ssr-res2djoint.ssr)/res2djoint.ssr
        if verbose:
            print 'ssr based chi2 test:   chi2=%-8.4f, p=%-8.4f, df=%d' %  \
              (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
        result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)

        #likelihood ratio test pvalue:
        lr = -2*(res2down.llf-res2djoint.llf)
        if verbose:
            print 'likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %  \
              (lr, stats.chi2.sf(lr, mxlg), mxlg)
        result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)

        # F test that all lag coefficients of exog are zero
        rconstr = np.column_stack((np.zeros((mxlg-1,mxlg-1)), np.eye(mxlg-1, mxlg-1),\
                                   np.zeros((mxlg-1, 1))))
        rconstr = np.column_stack((np.zeros((mxlg,mxlg)), np.eye(mxlg, mxlg),\
                                   np.zeros((mxlg, 1))))
        ftres = res2djoint.f_test(rconstr)
        if verbose:
            print 'parameter F test:         F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % \
              (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)
        result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
                                  np.squeeze(ftres.pvalue)[()],
                                  ftres.df_denom, ftres.df_num)

        resli[mxlg] = (result, [res2down, res2djoint, rconstr])

    return resli