示例#1
0
 def aic(self):
     logdet = np_slogdet(self.omega)
     if logdet[0] == -1:
         raise ValueError("Omega matrix is not positive definite")
     elif logdet[0] == 0:
         raise ValueError("Omega matrix is singluar")
     else:
         logdet = logdet[1]
     neqs = self.neqs
     trendorder = self.trendorder
     return logdet+ (2/self.avobs) * (self.laglen*neqs**2+trendorder*neqs)
示例#2
0
 def bic(self):
     logdet = np_slogdet(self.omega)
     if logdet[0] == -1:
         raise ValueError("Omega matrix is not positive definite")
     elif logdet[0] == 0:
         raise ValueError("Omega matrix is singluar")
     else:
         logdet = logdet[1]
     avobs = self.avobs
     neqs = self.neqs
     trendorder = self.trendorder
     return logdet+np.log(avobs)/avobs*(self.laglen*neqs**2 +
             neqs*trendorder)
示例#3
0
    def loglike(self, params, omega):
        """
        Returns the value of the VAR(p) log-likelihood.

        Parameters
        ----------
        params : array-like
            The parameter estimates
        omega : ndarray
            Sigma hat matrix.  Each element i,j is the average product of the
            OLS residual for variable i and the OLS residual for variable j or
            np.dot(resid.T,resid)/avobs.  There should be no correction for the
            degrees of freedom.


        Returns
        -------
        loglike : float
            The value of the loglikelihood function for a VAR(p) model

        Notes
        -----
        The loglikelihood function for the VAR(p) is

        .. math:: -\left(\frac{T}{2}\right)\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
        """
        params = np.asarray(params)
        omega = np.asarray(omega)
        logdet = np_slogdet(omega)
        if logdet[0] == -1:
            raise ValueError("Omega matrix is not positive definite")
        elif logdet[0] == 0:
            raise ValueError("Omega matrix is singluar")
        else:
            logdet = logdet[1]
        avobs = self.avobs
        neqs = self.neqs
        return -(avobs/2.)*(neqs*np.log(2*np.pi)+logdet+neqs)
示例#4
0
    def loglike(self, params):
        """
        The unconditional loglikelihood of an AR(p) process

        Notes
        -----
        Contains constant term.
        """

        nobs = self.nobs
        avobs = self.avobs
        Y = self.Y
        X = self.X
        endog = self.endog
        penalty = self.penalty
        laglen = self.laglen

# Try reparamaterization:
# just goes to the edge of the boundary for Newton
# reparameterize to ensure stability -- Hamilton 5.9.1
#        if not np.all(params==0):
#            params = params/(1+np.abs(params))

        if isinstance(params,tuple):
            # broyden (all optimize.nonlin return a tuple until rewrite commit)
            params = np.asarray(params)

        usepenalty = False
        # http://en.wikipedia.org/wiki/Autoregressive_model
        roots = np.roots(np.r_[1,-params[1:]])
        mask = np.abs(roots) >= 1
        if np.any(mask) and penalty:
            mask = np.r_[False, mask]
#            signs = np.sign(params)
#            np.putmask(params, mask, .9999)
#            params *= signs
            usepenalty = True

        yp = endog[:laglen]
        mup = np.asarray([params[0]/(1-np.sum(params[1:]))]*laglen)
        #TODO: the above is only correct for constant-only case
        diffp = yp-mup[:,None]

        # get inv(Vp) Hamilton 5.3.7
        params0 = np.r_[-1, params[1:]]

        p = len(params) - 1 #TODO: change to trendorder? and above?
        p1 = p+1
        Vpinv = np.zeros((p,p))
        for i in range(1,p1):
            for j in range(1,p1):
                if i <= j and j <= p:
                    part1 = np.sum(params0[:i] * params0[j-i:j])
                    part2 = np.sum(params0[p1-j:p1+i-j]*params0[p1-i:])
                    Vpinv[i-1,j-1] = part1 - part2
        Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
        # this is correct to here

        diffpVpinv = np.dot(np.dot(diffp.T,Vpinv),diffp).item()
        ssr = sumofsq(Y.squeeze() -np.dot(X,params))

        # concentrating the likelihood means that sigma2 is given by
        sigma2 = 1./avobs * (diffpVpinv + ssr)
        logdet = np_slogdet(Vpinv)[1] #TODO: add check for singularity
        loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
                logdet + diffpVpinv/sigma2 + ssr/sigma2)

        if usepenalty:
        # subtract a quadratic penalty since we min the negative of loglike
        #NOTE: penalty coefficient should increase with iterations
        # this uses a static one of 1e3
            print "Penalized!"
            loglike -= 1000 *np.sum((mask*params)**2)
        return loglike