Пример #1
0
def get_logdet(m):
    from scikits.statsmodels.tools.compatibility import np_slogdet
    logdet = np_slogdet(m)

    if logdet[0] == -1:  # pragma: no cover
        raise ValueError("Matrix is not positive definite")
    elif logdet[0] == 0:  # pragma: no cover
        raise ValueError("Matrix is singular")
    else:
        logdet = logdet[1]

    return logdet
Пример #2
0
def get_logdet(m):
    from scikits.statsmodels.tools.compatibility import np_slogdet
    logdet = np_slogdet(m)

    if logdet[0] == -1: # pragma: no cover
        raise ValueError("Matrix is not positive definite")
    elif logdet[0] == 0: # pragma: no cover
        raise ValueError("Matrix is singular")
    else:
        logdet = logdet[1]

    return logdet
Пример #3
0
    def _loglike_mle(self, params):
        """
        Loglikelihood of AR(p) process using exact maximum likelihood
        """
        nobs = self.nobs
        Y = self.Y
        X = self.X
        endog = self.endog
        k_ar = self.k_ar
        k_trend = self.k_trend

        # reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar].copy()
        if k_trend:
            c = [params[0]] * k_ar
        else:
            c = [0]
        mup = np.asarray(c/(1-np.sum(params[k_trend:])))
        diffp = yp-mup[:,None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params)

        diffpVpinv = np.dot(np.dot(diffp.T,Vpinv),diffp).item()
        ssr = sumofsq(endog[k_ar:].squeeze() -np.dot(X,params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1./nobs * (diffpVpinv + ssr)
        self.sigma2 = sigma2
        logdet = np_slogdet(Vpinv)[1] #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)
        return loglike
Пример #4
0
    def loglike(self, params):
        """
        The loglikelihood of an AR(p) process

        Parameters
        ----------
        params : array
            The fitted parameters of the AR model

        Returns
        -------
        llf : float
            The loglikelihood evaluated at `params`

        Notes
        -----
        Contains constant term.  If the model is fit by OLS then this returns
        the conditonal maximum likelihood.

        .. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}

        If it is fit by MLE then the (exact) unconditional maximum likelihood
        is returned.

        .. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}

        where

        :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
        mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
        variance-covariance matrix of the first `p` observations.
        """
        #TODO: Math is on Hamilton ~pp 124-5
        #will need to be amended for inclusion of exogenous variables
        nobs = self.nobs
        Y = self.Y
        X = self.X
        if self.method == "cmle":
            ssr = sumofsq(Y.squeeze() - np.dot(X, params))
            sigma2 = ssr / nobs
            return -nobs/2 * (np.log(2*np.pi) + np.log(sigma2)) -\
                    ssr/(2*sigma2)
        endog = self.endog
        k_ar = self.k_ar

        if isinstance(params, tuple):
            # broyden (all optimize.nonlin return a tuple until rewrite commit)
            params = np.asarray(params)

# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar]
        lagstart = self.k_trend
        exog = self.exog
        if exog is not None:
            lagstart += exog.shape[1]
#            xp = exog[:k_ar]
        if self.k_trend == 1 and lagstart == 1:
            c = [params[0]] * k_ar  # constant-only no exogenous variables
        else:  #TODO: this isn't right
            #NOTE: when handling exog just demean and proceed as usual.
            c = np.dot(X[:k_ar, :lagstart], params[:lagstart])
        mup = np.asarray(c / (1 - np.sum(params[lagstart:])))
        diffp = yp - mup[:, None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params, lagstart)

        diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
        ssr = sumofsq(Y.squeeze() - np.dot(X, params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1. / nobs * (diffpVpinv + ssr)
        logdet = np_slogdet(Vpinv)[1]  #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)
        return loglike
Пример #5
0
    def loglike(self, params):
        """
        The loglikelihood of an AR(p) process

        Parameters
        ----------
        params : array
            The fitted parameters of the AR model

        Returns
        -------
        llf : float
            The loglikelihood evaluated at `params`

        Notes
        -----
        Contains constant term.  If the model is fit by OLS then this returns
        the conditonal maximum likelihood.

        .. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}

        If it is fit by MLE then the (exact) unconditional maximum likelihood
        is returned.

        .. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}

        where

        :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
        mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
        variance-covariance matrix of the first `p` observations.
        """
        #TODO: Math is on Hamilton ~pp 124-5
        #will need to be amended for inclusion of exogenous variables
        nobs = self.nobs
        Y = self.Y
        X = self.X
        if self.method == "cmle":
            ssr = sumofsq(Y.squeeze()-np.dot(X,params))
            sigma2 = ssr/nobs
            return -nobs/2 * (np.log(2*np.pi) + np.log(sigma2)) -\
                    ssr/(2*sigma2)
        endog = self.endog
        k_ar = self.k_ar

        if isinstance(params,tuple):
            # broyden (all optimize.nonlin return a tuple until rewrite commit)
            params = np.asarray(params)

# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar]
        lagstart = self.k_trend
        exog = self.exog
        if exog is not None:
            lagstart += exog.shape[1]
#            xp = exog[:k_ar]
        if self.k_trend == 1 and lagstart == 1:
            c = [params[0]] * k_ar # constant-only no exogenous variables
        else:   #TODO: this isn't right
                #NOTE: when handling exog just demean and proceed as usual.
            c = np.dot(X[:k_ar, :lagstart], params[:lagstart])
        mup = np.asarray(c/(1-np.sum(params[lagstart:])))
        diffp = yp-mup[:,None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params, lagstart)

        diffpVpinv = np.dot(np.dot(diffp.T,Vpinv),diffp).item()
        ssr = sumofsq(Y.squeeze() -np.dot(X,params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1./nobs * (diffpVpinv + ssr)
        logdet = np_slogdet(Vpinv)[1] #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)
        return loglike