コード例 #1
0
ファイル: ar_model.py プロジェクト: JerWatson/statsmodels
    def _loglike_mle(self, params):
        """
        Loglikelihood of AR(p) process using exact maximum likelihood
        """
        nobs = self.nobs
        X = self.X
        endog = self.endog
        k_ar = self.k_ar
        k_trend = self.k_trend

        # reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar].copy()
        if k_trend:
            c = [params[0]] * k_ar
        else:
            c = [0]
        mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
        diffp = yp - mup[:, None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params)

        diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
        ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1.0 / nobs * (diffpVpinv + ssr)
        self.sigma2 = sigma2
        logdet = slogdet(Vpinv)[1]  # TODO: add check for singularity
        loglike = -1 / 2.0 * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) - logdet + diffpVpinv / sigma2 + ssr / sigma2)
        return loglike
コード例 #2
0
    def sigma2(self):
        #TODO: allow for DOF correction if exog is included
        model = self.model
        if model.method == "cmle":  # do DOF correction
            return 1. / self.nobs * sumofsq(self.resid)
        else:  # we need to calculate the ssr for the pre-sample
            # see loglike for details
            lagstart = self.k_trend  #TODO: handle exog
            p = self.k_ar
            params = self.params
            meany = params[0] / (1 - params[lagstart:].sum())
            pre_resid = model.endog[:p] - meany
            # get presample var-cov
            Vpinv = model._presample_varcov(params, lagstart)
            diffpVpinv = np.dot(np.dot(pre_resid.T, Vpinv), pre_resid).item()
            ssr = sumofsq(self.resid[p:])  # in-sample ssr

            return 1 / self.nobs * (diffpVpinv + ssr)
コード例 #3
0
    def sigma2(self):
        #TODO: allow for DOF correction if exog is included
        model = self.model
        if model.method == "cmle": # do DOF correction
            return 1./self.nobs * sumofsq(self.resid)
        else: # we need to calculate the ssr for the pre-sample
              # see loglike for details
            lagstart = self.k_trend #TODO: handle exog
            p = self.k_ar
            params = self.params
            meany = params[0]/(1-params[lagstart:].sum())
            pre_resid = model.endog[:p] - meany
            # get presample var-cov
            Vpinv = model._presample_varcov(params, lagstart)
            diffpVpinv = np.dot(np.dot(pre_resid.T,Vpinv),pre_resid).item()
            ssr = sumofsq(self.resid[p:]) # in-sample ssr

            return 1/self.nobs * (diffpVpinv+ssr)
コード例 #4
0
ファイル: ar_model.py プロジェクト: JerWatson/statsmodels
 def _loglike_css(self, params):
     """
     Loglikelihood of AR(p) process using conditional sum of squares
     """
     nobs = self.nobs
     Y = self.Y
     X = self.X
     ssr = sumofsq(Y.squeeze() - np.dot(X, params))
     sigma2 = ssr / nobs
     return -nobs / 2 * (np.log(2 * np.pi) + np.log(sigma2)) - ssr / (2 * sigma2)
コード例 #5
0
ファイル: ar_model.py プロジェクト: TPLink32/spnk1
 def _loglike_css(self, params):
     """
     Loglikelihood of AR(p) process using conditional sum of squares
     """
     nobs = self.nobs
     Y = self.Y
     X = self.X
     ssr = sumofsq(Y.squeeze() - np.dot(X, params))
     sigma2 = ssr / nobs
     return (-nobs / 2 * (np.log(2 * np.pi) + np.log(sigma2)) - ssr /
             (2 * sigma2))
コード例 #6
0
ファイル: ar_model.py プロジェクト: zhisheng/statsmodels
    def _loglike_mle(self, params):
        """
        Loglikelihood of AR(p) process using exact maximum likelihood
        """
        nobs = self.nobs
        Y = self.Y
        X = self.X
        endog = self.endog
        k_ar = self.k_ar
        k_trend = self.k_trend

        # reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar].copy()
        if k_trend:
            c = [params[0]] * k_ar
        else:
            c = [0]
        mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
        diffp = yp - mup[:, None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params)

        diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
        ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1. / nobs * (diffpVpinv + ssr)
        self.sigma2 = sigma2
        logdet = np_slogdet(Vpinv)[1]  #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)
        return loglike
コード例 #7
0
ファイル: ar_model.py プロジェクト: TPLink32/spnk1
 def sigma2(self):
     model = self.model
     if model.method == "cmle":  # do DOF correction
         return 1. / self.nobs * sumofsq(self.resid)
     else:
         return self.model.sigma2
コード例 #8
0
ファイル: ar_model.py プロジェクト: AnaMP/statsmodels
 def sigma2(self):
     model = self.model
     if model.method == "cmle": # do DOF correction
         return 1./self.nobs * sumofsq(self.resid)
     else:
         return self.model.sigma2
コード例 #9
0
    def loglike(self, params):
        """
        The loglikelihood of an AR(p) process

        Parameters
        ----------
        params : array
            The fitted parameters of the AR model

        Returns
        -------
        llf : float
            The loglikelihood evaluated at `params`

        Notes
        -----
        Contains constant term.  If the model is fit by OLS then this returns
        the conditonal maximum likelihood.

        .. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}

        If it is fit by MLE then the (exact) unconditional maximum likelihood
        is returned.

        .. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}

        where

        :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
        mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
        variance-covariance matrix of the first `p` observations.
        """
        #TODO: Math is on Hamilton ~pp 124-5
        #will need to be amended for inclusion of exogenous variables
        nobs = self.nobs
        Y = self.Y
        X = self.X
        if self.method == "cmle":
            ssr = sumofsq(Y.squeeze() - np.dot(X, params))
            sigma2 = ssr / nobs
            return -nobs/2 * (np.log(2*np.pi) + np.log(sigma2)) -\
                    ssr/(2*sigma2)
        endog = self.endog
        k_ar = self.k_ar

        if isinstance(params, tuple):
            # broyden (all optimize.nonlin return a tuple until rewrite commit)
            params = np.asarray(params)

# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar]
        lagstart = self.k_trend
        exog = self.exog
        if exog is not None:
            lagstart += exog.shape[1]
#            xp = exog[:k_ar]
        if self.k_trend == 1 and lagstart == 1:
            c = [params[0]] * k_ar  # constant-only no exogenous variables
        else:  #TODO: this isn't right
            #NOTE: when handling exog just demean and proceed as usual.
            c = np.dot(X[:k_ar, :lagstart], params[:lagstart])
        mup = np.asarray(c / (1 - np.sum(params[lagstart:])))
        diffp = yp - mup[:, None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params, lagstart)

        diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
        ssr = sumofsq(Y.squeeze() - np.dot(X, params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1. / nobs * (diffpVpinv + ssr)
        logdet = np_slogdet(Vpinv)[1]  #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)
        return loglike
コード例 #10
0
    def loglike(self, params):
        """
        The unconditional loglikelihood of an AR(p) process

        Notes
        -----
        Contains constant term.
        """

        nobs = self.nobs
        avobs = self.avobs
        Y = self.Y
        X = self.X
        endog = self.endog
        penalty = self.penalty
        laglen = self.laglen

# Try reparamaterization:
# just goes to the edge of the boundary for Newton
# reparameterize to ensure stability -- Hamilton 5.9.1
#        if not np.all(params==0):
#            params = params/(1+np.abs(params))

        if isinstance(params,tuple):
            # broyden (all optimize.nonlin return a tuple until rewrite commit)
            params = np.asarray(params)

        usepenalty = False
        # http://en.wikipedia.org/wiki/Autoregressive_model
        roots = np.roots(np.r_[1,-params[1:]])
        mask = np.abs(roots) >= 1
        if np.any(mask) and penalty:
            mask = np.r_[False, mask]
#            signs = np.sign(params)
#            np.putmask(params, mask, .9999)
#            params *= signs
            usepenalty = True

        yp = endog[:laglen]
        mup = np.asarray([params[0]/(1-np.sum(params[1:]))]*laglen)
        #TODO: the above is only correct for constant-only case
        diffp = yp-mup[:,None]

        # get inv(Vp) Hamilton 5.3.7
        params0 = np.r_[-1, params[1:]]

        p = len(params) - 1 #TODO: change to trendorder? and above?
        p1 = p+1
        Vpinv = np.zeros((p,p))
        for i in range(1,p1):
            for j in range(1,p1):
                if i <= j and j <= p:
                    part1 = np.sum(params0[:i] * params0[j-i:j])
                    part2 = np.sum(params0[p1-j:p1+i-j]*params0[p1-i:])
                    Vpinv[i-1,j-1] = part1 - part2
        Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
        # this is correct to here

        diffpVpinv = np.dot(np.dot(diffp.T,Vpinv),diffp).item()
        ssr = sumofsq(Y.squeeze() -np.dot(X,params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1./avobs * (diffpVpinv + ssr)
        logdet = np_slogdet(Vpinv)[1] #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)

        if usepenalty:
        # subtract a quadratic penalty since we min the negative of loglike
        #NOTE: penalty coefficient should increase with iterations
        # this uses a static one of 1e3
            print "Penalized!"
            loglike -= 1000 *np.sum((mask*params)**2)
        return loglike
コード例 #11
0
    def loglike(self, params):
        """
        The loglikelihood of an AR(p) process

        Parameters
        ----------
        params : array
            The fitted parameters of the AR model

        Returns
        -------
        llf : float
            The loglikelihood evaluated at `params`

        Notes
        -----
        Contains constant term.  If the model is fit by OLS then this returns
        the conditonal maximum likelihood.

        .. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}

        If it is fit by MLE then the (exact) unconditional maximum likelihood
        is returned.

        .. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}

        where

        :math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
        mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
        variance-covariance matrix of the first `p` observations.
        """
        #TODO: Math is on Hamilton ~pp 124-5
        #will need to be amended for inclusion of exogenous variables
        nobs = self.nobs
        Y = self.Y
        X = self.X
        if self.method == "cmle":
            ssr = sumofsq(Y.squeeze()-np.dot(X,params))
            sigma2 = ssr/nobs
            return -nobs/2 * (np.log(2*np.pi) + np.log(sigma2)) -\
                    ssr/(2*sigma2)
        endog = self.endog
        k_ar = self.k_ar

        if isinstance(params,tuple):
            # broyden (all optimize.nonlin return a tuple until rewrite commit)
            params = np.asarray(params)

# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
        if self.transparams:
            params = self._transparams(params)

        # get mean and variance for pre-sample lags
        yp = endog[:k_ar]
        lagstart = self.k_trend
        exog = self.exog
        if exog is not None:
            lagstart += exog.shape[1]
#            xp = exog[:k_ar]
        if self.k_trend == 1 and lagstart == 1:
            c = [params[0]] * k_ar # constant-only no exogenous variables
        else:   #TODO: this isn't right
                #NOTE: when handling exog just demean and proceed as usual.
            c = np.dot(X[:k_ar, :lagstart], params[:lagstart])
        mup = np.asarray(c/(1-np.sum(params[lagstart:])))
        diffp = yp-mup[:,None]

        # get inv(Vp) Hamilton 5.3.7
        Vpinv = self._presample_varcov(params, lagstart)

        diffpVpinv = np.dot(np.dot(diffp.T,Vpinv),diffp).item()
        ssr = sumofsq(Y.squeeze() -np.dot(X,params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1./nobs * (diffpVpinv + ssr)
        logdet = np_slogdet(Vpinv)[1] #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)
        return loglike