Example #1
0
    def hastings_factor(self):
        """
        Compute the hastings factor.
        """
        tau = (self.adaptive_scale_factor * self.proposal_sd)**2
        cur_val = self.stochastic.value
        last_val = self.stochastic.last_value

        lp_for = pm.lognormal_like(cur_val, mu=np.log(last_val), tau=tau)
        lp_bak = pm.lognormal_like(last_val, mu=np.log(cur_val), tau=tau)

        if self.verbose > 1:
            print((self._id + ': Hastings factor %f' % (lp_bak - lp_for)))
        return lp_bak - lp_for
Example #2
0
    def hastings_factor(self):
        """
        Compute the hastings factor.
        """
        tau = (self.adaptive_scale_factor * self.proposal_sd) ** 2
        cur_val = self.stochastic.value
        last_val = self.stochastic.last_value

        lp_for = pm.lognormal_like(cur_val, mu=np.log(last_val), tau=tau)
        lp_bak = pm.lognormal_like(last_val, mu=np.log(cur_val), tau=tau)

        if self.verbose > 1:
            print self._id + ': Hastings factor %f' % (lp_bak - lp_for)
        return lp_bak - lp_for
Example #3
0
def get_likelihood_M0(map_M0, x, pwr, sigma, tau, obstype):
    A0 = get_variables_M0(map_M0)
    A0 = curve_fit_M0(x, pwr, A0, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M0(x, A0), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M0(x, A0), tau)
Example #4
0
def get_likelihood_M2(map_M2, x, pwr, sigma, tau, obstype):
    A2 = get_variables_M2(map_M2)
    A2 = curve_fit_M2(x, pwr, A2, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M2(x, A2), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M2(x, A2), tau)
Example #5
0
def get_likelihood_M1(map_M1, x, pwr, sigma, tau, obstype):
    A1 = get_variables_M1(map_M1)
    A1 = curve_fit_M1(x, pwr, A1, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M1(x, A1), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M1(x, A1), tau)
Example #6
0
    def integrand(y, x):

        #centered on x0,y0, delta around that 

        e1 = np.exp(pymc.lognormal_like(y+y0, np.log(m*(x+x0)), 1./sig**2))
        e2 = np.exp(pymc.mv_normal_cov_like(np.array([x,y]), np.array([0,0]), cov))
        return e1*e2
Example #7
0
def get_likelihood_M0(map_M0, x, pwr, sigma, obstype):
    tau = 1.0 / (sigma ** 2)
    A0 = get_variables_M0(map_M0)[0:3]
    A0 = curve_fit_M0(x, pwr, A0, sigma)
    if obstype == '.logiobs':
        return pymc.normal_like(pwr, get_spectrum_M0(x, A0), tau)
    else:
        return pymc.lognormal_like(pwr, get_spectrum_M0(x, A0), tau)
Example #8
0
def SIR_simplex_sample(mu, tau, cutoff, sum_val, N, N_proposals=1000, N_samps=1000):
    """
    Returns raw log-weights, indices chosen and SIR samples for sets of N draws
    from a truncated lognormal distribution, conditioned so that their sum is
    equal to sum_val.
    
    This SIR algorithm will fail miserably unless sum_val is relatively likely
    given N and the parameters of the lognormal distribution.
    
    :Parameters:
      - mu : float
        The mean parameter.
      - tau : float
        The precision parameter.
      - cutoff : float
        The truncation value.
      - sum_val : float
        The sum that is being conditioned on.
      - N : integer
        The number of variates in each vector
      - N_proposals : integer
        The number of vectors to propose.
      - N_samps : integer
        The number of vectors to return.
    """
    # Draw samples, compute missing values, evaluate log-weights.
    samps = np.exp(geto_truncnorm(mu, tau, log(cutoff), (N-1,N_proposals)))
    last_vals = sum_val - np.sum(samps,axis=0)
    weights = np.array([pm.lognormal_like(last_val_now, mu, tau) for last_val_now in last_vals])

    # Check that there are at least some positive weights.
    where_pos = np.where(weights>-1e308)
    if len(where_pos[0])==0:
        raise RuntimeError, 'No weights are positive. You have used a shitty value for N.'

    # Normalize and exponentiate log-weights.
    weights[where(last_vals>cutoff)]=-np.Inf
    weights -= log_sum(weights)
    
    # Append missing values to samples.
    samps = np.vstack((samps,last_vals))
    
    # Slice and return.
    ind=np.array(pm.rcategorical(p=np.exp(weights),size=N_samps),dtype=int)
    return weights, ind, samps[:,ind]
Example #9
0
 def y(value=0.01, x=x):
     return pymc.lognormal_like(value, mu=x[0], tau=1.)
Example #10
0
 def y(value=0.01, x=x):
     return pymc.lognormal_like(value, mu=x[0], tau=1.)
Example #11
0
def Y(x=X, z=z):
    return mc.lognormal_like(
        z - x,
        1,
        0.4,
    )