Ejemplo n.º 1
0
    def step(self):
        self._index += 1
        if self._index % self.sleep_interval == 0:
            
            v = pm.value(self.v)
            m = pm.value(self.m)
            val = self.stochastic.value
            lp = pm.logp_of_set(self.other_children)
        
            # Choose a direction along which to step.
            dirvec = np.random.normal(size=self.n)
            dirvec /= np.sqrt(np.sum(dirvec**2))
        
            # Orthogonalize
            orthoproj = gramschmidt(dirvec)
            scaled_orthoproj = v*orthoproj.T
            pck = np.dot(dirvec, scaled_orthoproj.T)
            kck = np.linalg.inv(np.dot(scaled_orthoproj,orthoproj))
            pckkck = np.dot(pck,kck)

            # Figure out conditional variance
            condvar = np.dot(dirvec, dirvec*v) - np.dot(pck, pckkck)
            # condmean = np.dot(dirvec, m) + np.dot(pckkck, np.dot(orthoproj.T, (val-m)))
        
            # Compute slice of log-probability surface
            tries = np.linspace(-4*np.sqrt(condvar), 4*np.sqrt(condvar), 501)
            lps = 0*tries
        
            for i in xrange(len(tries)):
                new_val = val + tries[i]*dirvec
                self.stochastic.value = new_val
                try:
                    lps[i] = self.f_fr.logp + self.stochastic.logp
                except:
                    lps[i] = -np.inf              
            if np.all(np.isinf(lps)):
                raise ValueError, 'All -inf.'
            lps -= pm.flib.logsum(lps[True-np.isinf(lps)])          
            ps = np.exp(lps)
        
            index = pm.rcategorical(ps)
            new_val = val + tries[index]*dirvec
            self.stochastic.value = new_val
            
            try:
                lpp = pm.logp_of_set(self.other_children)
                if np.log(np.random.random()) < lpp - lp:
                    self.accepted += 1
                else:
                    self.stochastic.value = val
                    self.rejected += 1
                    
            except pm.ZeroProbability:
                self.stochastic.value = val
                self.rejected += 1
        self.logp_plus_loglike
Ejemplo n.º 2
0
 def get_logp_plus_loglike(self):
     return pm.logp_of_set(self.markov_blanket_no_f)
Ejemplo n.º 3
0
 def get_logp_plus_loglike(self):
     return pm.logp_of_set(self.markov_blanket_no_f)
Ejemplo n.º 4
0
def pymc_logp():
    return logp_of_set(model)
Ejemplo n.º 5
0
 def _get_logp_plus_loglike(self):
     return pm.logp_of_set(self.children)
Ejemplo n.º 6
0
 def _get_logp_plus_loglike(self):
     return pm.logp_of_set(self.children)
Ejemplo n.º 7
0
def pymc_logp():
    return logp_of_set(model)
Ejemplo n.º 8
0
 def get_logp_plus_loglike(self):
     return pm.logp_of_set(self.mb_for_logp) + self.evidence.value
Ejemplo n.º 9
0
def approximate_evidence(gmrf, Mc, ppc):
    "The INLA approximation \pi(y|\theta)"
    pygx = pm.logp_of_set(gmrf.extended_children)
    px = gmrf.logp
    pxgy = gmrf.parents['backend'].mvn_logp(gmrf.value, Mc, **ppc)
    return pygx + px - pxgy