Esempio n. 1
0
    def _stackX(self, k_ar, trend):
        """
        Private method to build the RHS matrix for estimation.

        Columns are trend terms then lags.
        """
        endog = self.endog
        X = lagmat(endog, maxlag=k_ar, trim='both')
        k_trend = util.get_trendorder(trend)
        if k_trend:
            X = add_trend(X, prepend=True, trend=trend)
        self.k_trend = k_trend
        return X
Esempio n. 2
0
def _make_arma_exog(endog, exog, trend):
    k_trend = 1 # overwritten if no constant
    if exog is None and trend == 'c':   # constant only
        exog = np.ones((len(endog),1))
    elif exog is not None and trend == 'c': # constant plus exogenous
        exog = add_trend(exog, trend='c', prepend=True)
    elif exog is not None and trend == 'nc':
        # make sure it's not holding constant from last run
        if exog.var() == 0:
            exog = None
        k_trend = 0
    if trend == 'nc':
        k_trend = 0
    return k_trend, exog
Esempio n. 3
0
def _make_arma_exog(endog, exog, trend):
    k_trend = 1  # overwritten if no constant
    if exog is None and trend == 'c':  # constant only
        exog = np.ones((len(endog), 1))
    elif exog is not None and trend == 'c':  # constant plus exogenous
        exog = add_trend(exog, trend='c', prepend=True)
    elif exog is not None and trend == 'nc':
        # make sure it's not holding constant from last run
        if exog.var() == 0:
            exog = None
        k_trend = 0
    if trend == 'nc':
        k_trend = 0
    return k_trend, exog
Esempio n. 4
0
def get_var_endog(y, lags, trend='c'):
    """
    Make predictor matrix for VAR(p) process

    Z := (Z_0, ..., Z_T).T (T x Kp)
    Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)

    Ref: Lutkepohl p.70 (transposed)
    """
    nobs = len(y)
    # Ravel C order, need to put in descending order
    Z = np.array([y[t - lags:t][::-1].ravel() for t in xrange(lags, nobs)])

    # Add constant, trend, etc.
    if trend != 'nc':
        Z = tsa.add_trend(Z, prepend=True, trend=trend)

    return Z
Esempio n. 5
0
def get_var_endog(y, lags, trend='c'):
    """
    Make predictor matrix for VAR(p) process

    Z := (Z_0, ..., Z_T).T (T x Kp)
    Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)

    Ref: Lutkepohl p.70 (transposed)
    """
    nobs = len(y)
    # Ravel C order, need to put in descending order
    Z = np.array([y[t-lags : t][::-1].ravel() for t in xrange(lags, nobs)])

    # Add constant, trend, etc.
    if trend != 'nc':
        Z = tsa.add_trend(Z, prepend=True, trend=trend)

    return Z
Esempio n. 6
0
    def _stackX(self, k_ar, trend):
        """
        Private method to build the RHS matrix for estimation.

        Columns are trend terms, then exogenous, then lags.
        """
        endog = self.endog
        exog = self.exog
        X = lagmat(endog, maxlag=k_ar, trim='both')
        if exog is not None:
            X = np.column_stack((exog[k_ar:, :], X))
        # Handle trend terms
        if trend == 'c':
            k_trend = 1
        elif trend == 'nc':
            k_trend = 0
        elif trend == 'ct':
            k_trend = 2
        elif trend == 'ctt':
            k_trend = 3
        if trend != 'nc':
            X = add_trend(X, prepend=True, trend=trend)
        self.k_trend = k_trend
        return X
Esempio n. 7
0
    def _stackX(self, k_ar, trend):
        """
        Private method to build the RHS matrix for estimation.

        Columns are trend terms, then exogenous, then lags.
        """
        endog = self.endog
        exog = self.exog
        X = lagmat(endog, maxlag=k_ar, trim='both')
        if exog is not None:
            X = np.column_stack((exog[k_ar:,:], X))
        # Handle trend terms
        if trend == 'c':
            k_trend = 1
        elif trend == 'nc':
            k_trend = 0
        elif trend == 'ct':
            k_trend = 2
        elif trend == 'ctt':
            k_trend = 3
        if trend != 'nc':
            X = add_trend(X,prepend=True, trend=trend)
        self.k_trend = k_trend
        return X
Esempio n. 8
0
    def fit(
        self,
        order,
        start_params=None,
        trend="c",
        method="css-mle",
        transparams=True,
        solver=None,
        maxiter=35,
        full_output=1,
        disp=5,
        callback=None,
        **kwargs
    ):
        """
        Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.

        Parameters
        ----------
        start_params : array-like, optional
            Starting parameters for ARMA(p,q).  If None, the default is given
            by ARMA._fit_start_params.  See there for more information.
        transparams : bool, optional
            Whehter or not to transform the parameters to ensure stationarity.
            Uses the transformation suggested in Jones (1980).  If False,
            no checking for stationarity or invertibility is done.
        method : str {'css-mle','mle','css'}
            This is the loglikelihood to maximize.  If "css-mle", the
            conditional sum of squares likelihood is maximized and its values
            are used as starting values for the computation of the exact
            likelihood via the Kalman filter.  If "mle", the exact likelihood
            is maximized via the Kalman Filter.  If "css" the conditional sum
            of squares likelihood is maximized.  All three methods use
            `start_params` as starting parameters.  See above for more
            information.
        trend : str {'c','nc'}
            Whehter to include a constant or not.  'c' includes constant,
            'nc' no constant.
        solver : str or None, optional
            Solver to be used.  The default is 'l_bfgs' (limited memory Broyden-
            Fletcher-Goldfarb-Shanno).  Other choices are 'bfgs', 'newton'
            (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
            'ncg' (non-conjugate gradient), and 'powell'.
            The limited memory BFGS uses m=30 to approximate the Hessian,
            projected gradient tolerance of 1e-7 and factr = 1e3.  These
            cannot currently be changed for l_bfgs.  See notes for more
            information.
        maxiter : int, optional
            The maximum number of function evaluations. Default is 35.
        tol : float
            The convergence tolerance.  Default is 1e-08.
        full_output : bool, optional
            If True, all output from solver will be available in
            the Results object's mle_retvals attribute.  Output is dependent
            on the solver.  See Notes for more information.
        disp : bool, optional
            If True, convergence information is printed.  For the default
            l_bfgs_b solver, disp controls the frequency of the output during
            the iterations. disp < 0 means no output in this case.
        callback : function, optional
            Called after each iteration as callback(xk) where xk is the current
            parameter vector.
        kwargs
            See Notes for keyword arguments that can be passed to fit.

        Returns
        -------
        `scikits.statsmodels.tsa.arima.ARMAResults` class

        See also
        --------
        scikits.statsmodels.model.LikelihoodModel.fit for more information
        on using the solvers.

        Notes
        ------
        If fit by 'mle', it is assumed for the Kalman Filter that the initial
        unkown state is zero, and that the inital variance is
        P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
        r, order = 'F')

        The below is the docstring from
        `scikits.statsmodels.LikelihoodModel.fit`
        """
        # enforce invertibility
        self.transparams = transparams

        self.method = method.lower()

        # get model order
        k_ar, k_ma = map(int, order)
        k_lags = max(k_ar, k_ma + 1)
        self.k_ar = k_ar
        self.k_ma = k_ma
        self.k_lags = k_lags
        endog = self.endog
        exog = self.exog
        k_exog = self.k_exog
        self.nobs = len(endog)  # this is overwritten if method is 'css'

        # handle exogenous variables
        k_trend = 1  # overwritten if no constant
        if exog is None and trend == "c":  # constant only
            exog = np.ones((len(endog), 1))
        elif exog is not None and trend == "c":  # constant plus exogenous
            exog = add_trend(exog, trend="c", prepend=True)
        elif exog is not None and trend == "nc":
            # make sure it's not holding constant from last run
            if exog.var() == 0:
                exog = None
            k_trend = 0
        if trend == "nc":
            k_trend = 0
        self.k_trend = k_trend
        self.exog = exog  # overwrites original exog from __init__
        k = k_trend + k_exog

        # choose objective function
        if method.lower() in ["mle", "css-mle"]:
            loglike = lambda params: -self.loglike_kalman(params)
            self.loglike = self.loglike_kalman
        if method.lower() == "css":
            loglike = lambda params: -self.loglike_css(params)
            self.loglike = self.loglike_css
            self.nobs = len(endog) - k_ar  # nobs for CSS excludes pre-sample

        if start_params is not None:
            start_params = np.asarray(start_params)

        else:
            if method.lower() != "css-mle":  # use Hannan-Rissanen start_params
                start_params = self._fit_start_params((k_ar, k_ma, k))
            else:  # use Hannan-Rissanen to get CSS start_params
                func = lambda params: -self.loglike_css(params)
                # start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
                start_params = self._fit_start_params((k_ar, k_ma, k))
                if transparams:
                    start_params = self._invtransparams(start_params)
                bounds = [(None,) * 2] * (k_ar + k_ma + k)
                mlefit = optimize.fmin_l_bfgs_b(
                    func, start_params, approx_grad=True, m=12, pgtol=1e-7, factr=1e3, bounds=bounds, iprint=-1
                )
                start_params = self._transparams(mlefit[0])

        if transparams:  # transform initial parameters to ensure invertibility
            start_params = self._invtransparams(start_params)

        if solver is None:  # use default limited memory bfgs
            bounds = [(None,) * 2] * (k_ar + k_ma + k)
            mlefit = optimize.fmin_l_bfgs_b(
                loglike, start_params, approx_grad=True, m=12, pgtol=1e-8, factr=1e2, bounds=bounds, iprint=disp
            )
            self.mlefit = mlefit
            params = mlefit[0]

        else:  # call the solver from LikelihoodModel
            mlefit = super(ARMA, self).fit(
                start_params,
                method=solver,
                maxiter=maxiter,
                full_output=full_output,
                disp=disp,
                callback=callback,
                **kwargs
            )
            self.mlefit = mlefit
            params = mlefit.params

        if transparams:  # transform parameters back
            params = self._transparams(params)

        self.transparams = False  # set to false so methods don't expect transf.

        normalized_cov_params = None  # TODO: fix this

        return ARMAResults(self, params, normalized_cov_params)
Esempio n. 9
0
    def fit(self, order, start_params=None, trend='c', method = "css-mle",
            transparams=True, solver=None, maxiter=35, full_output=1,
            disp=5, callback=None, **kwargs):
        """
        Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.

        Parameters
        ----------
        start_params : array-like, optional
            Starting parameters for ARMA(p,q).  If None, the default is given
            by ARMA._fit_start_params.  See there for more information.
        transparams : bool, optional
            Whehter or not to transform the parameters to ensure stationarity.
            Uses the transformation suggested in Jones (1980).  If False,
            no checking for stationarity or invertibility is done.
        method : str {'css-mle','mle','css'}
            This is the loglikelihood to maximize.  If "css-mle", the
            conditional sum of squares likelihood is maximized and its values
            are used as starting values for the computation of the exact
            likelihood via the Kalman filter.  If "mle", the exact likelihood
            is maximized via the Kalman Filter.  If "css" the conditional sum
            of squares likelihood is maximized.  All three methods use
            `start_params` as starting parameters.  See above for more
            information.
        trend : str {'c','nc'}
            Whehter to include a constant or not.  'c' includes constant,
            'nc' no constant.
        solver : str or None, optional
            Solver to be used.  The default is 'l_bfgs' (limited memory Broyden-
            Fletcher-Goldfarb-Shanno).  Other choices are 'bfgs', 'newton'
            (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
            'ncg' (non-conjugate gradient), and 'powell'.
            The limited memory BFGS uses m=30 to approximate the Hessian,
            projected gradient tolerance of 1e-7 and factr = 1e3.  These
            cannot currently be changed for l_bfgs.  See notes for more
            information.
        maxiter : int, optional
            The maximum number of function evaluations. Default is 35.
        tol : float
            The convergence tolerance.  Default is 1e-08.
        full_output : bool, optional
            If True, all output from solver will be available in
            the Results object's mle_retvals attribute.  Output is dependent
            on the solver.  See Notes for more information.
        disp : bool, optional
            If True, convergence information is printed.  For the default
            l_bfgs_b solver, disp controls the frequency of the output during
            the iterations. disp < 0 means no output in this case.
        callback : function, optional
            Called after each iteration as callback(xk) where xk is the current
            parameter vector.
        kwargs
            See Notes for keyword arguments that can be passed to fit.

        Returns
        -------
        `scikits.statsmodels.tsa.arima.ARMAResults` class

        See also
        --------
        scikits.statsmodels.model.LikelihoodModel.fit for more information
        on using the solvers.

        Notes
        ------
        If fit by 'mle', it is assumed for the Kalman Filter that the initial
        unkown state is zero, and that the inital variance is
        P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
        r, order = 'F')

        The below is the docstring from
        `scikits.statsmodels.LikelihoodModel.fit`
        """
        # enforce invertibility
        self.transparams = transparams

        self.method = method.lower()

        # get model order
        k_ar,k_ma = map(int,order)
        k_lags = max(k_ar,k_ma+1)
        self.k_ar = k_ar
        self.k_ma = k_ma
        self.k_lags = k_lags
        endog = self.endog
        exog = self.exog
        k_exog = self.k_exog
        self.nobs = len(endog) # this is overwritten if method is 'css'

        # handle exogenous variables
        k_trend = 1 # overwritten if no constant
        if exog is None and trend == 'c':   # constant only
            exog = np.ones((len(endog),1))
        elif exog is not None and trend == 'c': # constant plus exogenous
            exog = add_trend(exog, trend='c', prepend=True)
        elif exog is not None and trend == 'nc':
            # make sure it's not holding constant from last run
            if exog.var() == 0:
                exog = None
            k_trend = 0
        if trend == 'nc':
            k_trend = 0
        self.k_trend = k_trend
        self.exog = exog    # overwrites original exog from __init__
        k = k_trend + k_exog


        # choose objective function
        if method.lower() in ['mle','css-mle']:
            loglike = lambda params: -self.loglike_kalman(params)
            self.loglike = self.loglike_kalman
        if method.lower() == 'css':
            loglike = lambda params: -self.loglike_css(params)
            self.loglike = self.loglike_css
            self.nobs = len(endog) - k_ar #nobs for CSS excludes pre-sample

        if start_params is not None:
            start_params = np.asarray(start_params)

        else:
            if method.lower() != 'css-mle': # use Hannan-Rissanen start_params
                start_params = self._fit_start_params((k_ar,k_ma,k))
            else:   # use Hannan-Rissanen to get CSS start_params
                func = lambda params: -self.loglike_css(params)
                #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
                start_params = self._fit_start_params((k_ar,k_ma,k))
                if transparams:
                    start_params = self._invtransparams(start_params)
                bounds = [(None,)*2]*(k_ar+k_ma+k)
                mlefit = optimize.fmin_l_bfgs_b(func, start_params,
                            approx_grad=True, m=12, pgtol=1e-7, factr=1e3,
                            bounds = bounds, iprint=-1)
                start_params = self._transparams(mlefit[0])

        if transparams: # transform initial parameters to ensure invertibility
            start_params = self._invtransparams(start_params)

        if solver is None:  # use default limited memory bfgs
            bounds = [(None,)*2]*(k_ar+k_ma+k)
            mlefit = optimize.fmin_l_bfgs_b(loglike, start_params,
                    approx_grad=True, m=12, pgtol=1e-8, factr=1e2,
                    bounds=bounds, iprint=disp)
            self.mlefit = mlefit
            params = mlefit[0]

        else:   # call the solver from LikelihoodModel
            mlefit = super(ARMA, self).fit(start_params, method=solver,
                        maxiter=maxiter, full_output=full_output, disp=disp,
                        callback = callback, **kwargs)
            self.mlefit = mlefit
            params = mlefit.params

        if transparams: # transform parameters back
            params = self._transparams(params)

        self.transparams = False # set to false so methods don't expect transf.

        normalized_cov_params = None #TODO: fix this

        return ARMAResults(self, params, normalized_cov_params)