示例#1
0
def infer_arrival(amplitude,
                  time,
                  arrival_prior_probs,
                  fac,
                  chainsize=50000,
                  burnin=10000,
                  thin=4,
                  useboxcox=True):
    n = len(amplitude)

    dt = time[1] - time[0]
    t0 = time[0]

    # Box-Cox Transoformation to force datum normalization
    # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.boxcox.html
    # https://en.wikipedia.org/wiki/Power_transform#Box-Cox_transformation
    if useboxcox:
        where = np.isfinite(amplitude)

        eps = 0.1
        c = -np.nanmin(amplitude[where]) + eps
        aux, lmbd = boxcox(amplitude[where] + c)
        if lmbd != 0.0:
            amplitude = ((amplitude + c)**lmbd - 1.0) / lmbd
        else:
            amplitude = np.log(amplitude + c)

    a1, b1, a2, b2 = prior_parameters.gammas(amplitude, arrival_prior_probs,
                                             fac, 1.0 - fac)

    sigma2_1 = pm.InverseGamma("sigma2_1", a1, b1)
    sigma2_2 = pm.InverseGamma("sigma2_2", a2, b2)

    arrival = arrival_distribution.get_pymc_model(arrival_prior_probs)

    @pm.deterministic
    def precision(arrival=arrival, sigma2_1=sigma2_1, sigma2_2=sigma2_2):
        out = np.empty(n)
        out[:arrival] = sigma2_1
        out[arrival:] = sigma2_2
        return 1.0 / out

    observation = pm.Normal("obs",
                            0.0,
                            precision,
                            value=amplitude,
                            observed=True)
    model = pm.Model([observation, sigma2_1, sigma2_2, arrival], verbose=0)

    mcmc = pm.MCMC(model, verbose=0)
    mcmc.sample(chainsize, burnin, thin)

    arrival_samples = mcmc.trace('arrival')[:]

    return arrival_samples
示例#2
0
def fixture_model():
    with pm.Model() as model:
        n = 5
        dim = 4
        with pm.Model():
            cov = pm.InverseGamma("cov", alpha=1, beta=1)
            x = pm.Normal("x",
                          mu=np.ones((dim, )),
                          sigma=pm.math.sqrt(cov),
                          shape=(n, dim))
            eps = pm.HalfNormal("eps", np.ones((n, 1)), shape=(n, dim))
            mu = pm.Deterministic("mu", at.sum(x + eps, axis=-1))
            y = pm.Normal("y", mu=mu, sigma=1, shape=(n, ))
    return model, [cov, x, eps, y]
示例#3
0
def gmm_model(data, K, mu_0=0.0, alpha_0=0.1, beta_0=0.1, alpha=1.0):
    """
    K: number of component
    n_samples: number of n_samples
    n_features: number of features

    mu_0: prior mean of mu_k 
    alpha_0: alpha of Inverse Gamma tau_k 
    beta_0: beta of Inverse Gamma tau_k
    alpha = prior of dirichlet distribution phi_0

    latent variable:
    phi_0: shape = (K-1, ), dirichlet distribution
    phi: shape = (K, ), add K-th value back to phi_0
    z: shape = (n_samples, ), Categorical distribution, z[k] is component indicator 
    mu_k: shape = (K, n_features), normal distribution, mu_k[k] is mean of k-th component
    tau_k : shape = (K, n_features), inverse-gamma distribution, tau_k[k] is variance of k-th component
    """

    n_samples, n_features = data.shape

    # latent variables
    tau_k = pm.InverseGamma('tau_k',
                            alpha_0 * np.ones((K, n_features)),
                            beta_0 * np.ones((K, n_features)),
                            value=beta_0 * np.ones((K, n_features)))
    mu_k = pm.Normal('mu_k',
                     np.ones((K, n_features)) * mu_0,
                     tau_k,
                     value=np.ones((K, n_features)) * mu_0)
    phi_0 = pm.Dirichlet('phi_0', theta=np.ones(K) * alpha)

    @pm.deterministic(dtype=float)
    def phi(value=np.ones(K) / K, phi_0=phi_0):
        val = np.hstack((phi_0, (1 - np.sum(phi_0))))
        return val

    z = pm.Categorical('z',
                       p=phi,
                       value=pm.rcategorical(np.ones(K) / K, size=n_samples))

    # observed variables
    x = pm.Normal('x', mu=mu_k[z], tau=tau_k[z], value=data, observed=True)

    return pm.Model([mu_k, tau_k, phi_0, phi, z, x])
示例#4
0
文件: model.py 项目: hlxin/bayeschem
    dos_sub = interpolate_nrg(dos_sub, dos_sub_energy, energy)
    dos_ds.append(dos_sub)

    dos_ads = row.data['dos_ads'][1][0] + row.data['dos_ads'][1][1]
    dos_ads_energy = row.data['dos_ads'][0]
    dos_ads = interpolate_nrg(dos_ads, dos_ads_energy, energy)
    dos_adss.append(dos_ads)

# priors
dE_0 = pm.Normal('dE_0', -3.25, 1, value=-3.25)
eps_a = pm.Normal('eps_a', -5.0, 1, value=-5.0)
delta_0 = pm.Lognormal('delta_0', 1, 0.25, value=1.0)
alpha = pm.Uniform('alpha', 0, 1.0, value=0.036)
beta = pm.Lognormal('beta', 2, 1, value=2.1)

var_1 = pm.InverseGamma('var_1', 2.0, 0.05, value=0.05)
var_2 = pm.InverseGamma('var_2', 2.0, 0.1, value=0.1)

lamb = .01


@pm.stochastic(observed=True)
def custom_stochastic(eps_a=eps_a,
                      beta=beta,
                      delta_0=delta_0,
                      alpha=alpha,
                      dE_0=dE_0,
                      var_1=var_1,
                      var_2=var_2,
                      value=dos_adss):
示例#5
0
        model.loglike(proposed**2) - model.loglike(trace[s-1]**2) +
        prior_obs.logpdf(proposed[0]) + prior_level.logpdf(proposed[1]) -
        prior_obs.logpdf(trace[s-1, 0]) - prior_level.logpdf(trace[s-1, 1]))

    if acceptance_probability > uniform.rvs():
        trace[s] = proposed
        trace_accepts[s-1] = 1
    else:
        trace[s] = trace[s-1]

np.random.seed(SEED)

import pymc as mc

# Priors as "stochastic" elements
prior_obs = mc.InverseGamma('obs', 3, 300)
prior_level = mc.InverseGamma('level', 3, 120)

# Create the model for likelihood evaluation
model = MLELocalLevel(nile)

# Create the "data" component (stochastic and observed)
@mc.stochastic(dtype=sm.tsa.statespace.MLEModel, observed=True)
def loglikelihood(value=model, obs_std=prior_obs, level_std=prior_level):
    return value.loglike([obs_std**2, level_std**2])

# Create the PyMC model
pymc_model = mc.Model((prior_obs, prior_level, loglikelihood))

# Create a PyMC sample and perform sampling
sampler = mc.MCMC(pymc_model)
def KellyModel(x, xerr, y, yerr, xycovar, parts, ngauss = 3):
    #Implementation of Kelly07 model, but without nondetection support



    #Prior as defined in section 6.1 of Kelly07

    alpha = pymc.Uninformative('alpha', value = np.random.uniform(-1, 1))
    parts['alpha'] = alpha
    beta = pymc.Uninformative('beta', value = np.random.uniform(-np.pi/2, np.pi/2))
    parts['beta'] = beta

    sigint2 = pymc.Uniform('sigint2', 1e-4, 1.)
    parts['sigint2'] = sigint2
    
    piprior = pymc.Dirichlet('pi', np.ones(ngauss))
    parts['piprior'] = piprior

    @pymc.deterministic(trace=False)
    def pis(piprior = piprior):
        lastpi = 1. - np.sum(piprior)
        allpi = np.zeros(ngauss)
        allpi[:-1] = piprior
        allpi[-1] = lastpi
        return allpi
    parts['pis'] = pis

    mu0 = pymc.Uninformative('mu0', np.random.uniform(-1, 1))
    parts['mu0'] = mu0
    w2 = pymc.Uniform('w2', 1e-4, 1e4)
    parts['w2'] = w2


    xvars = pymc.InverseGamma('xvars', 0.5, w2, size=ngauss+1)  #dropping the 1/2 factor on w2, because I don't think it matters
    parts['xvars'] = xvars

    @pymc.deterministic(trace=False)
    def tauU2(xvars = xvars):
        return 1./xvars[-1]
    parts['tauU2'] = tauU2

    xmus = pymc.Normal('xmus', mu0, tauU2, size = ngauss)
    parts['xmus'] = xmus

    

    @pymc.observed
    def likelihood(value = 0., x = x, xerr2 = xerr**2, y = y, yerr2 = yerr**2, xycovar = xycovar, 
                   alpha = alpha, beta = beta, sigint2 = sigint2, pis = pis, xmus = xmus, xvars = xvars):

        return stats.kelly_like(x = x, 
                                xerr2 = xerr2,
                                y = y,
                                yerr2 = yerr2,
                                xycovar = xycovar,
                                alpha = alpha,
                                beta = beta,
                                sigint2 = sigint2,
                                pis = pis,
                                mus = xmus,
                                tau2 = xvars[:-1])

    parts['likelihood'] = likelihood
示例#7
0
    def __init__(self, snobj, filters=None, inc_var=False, **args):
        '''Create an MCMC sampler based on a sn object. The specified filters
      are fit using the model that is currently selected. Uniform
      priors are assumed for the parameters unless overridden by assigning
      pymc Stochastics through **args.'''

        self.sn = snobj
        if filters is None:
            filters = list(self.sn.data.keys())

        self.model = snobj.model
        self.model.args = {}
        self.model._fbands = filters
        self.model.setup()
        params = []
        paramnames = list(self.model.parameters.keys())
        # First, setup stochastics for our parameters
        for param in paramnames:
            if param in args:
                params.append(args[param])
                del args[param]
                continue
            if param == 'dm15':
                params.append(pymc.Uniform('dm15', 0.7, 2.0))
            elif param == 'st':
                params.append(pymc.Uniform('st', 0.25, 1.22))
            elif param == 'Tmax':
                t0 = min([self.sn.data[f].MJD.min() for f in self.sn.data])
                t1 = max([self.sn.data[f].MJD.max() for f in self.sn.data])
                params.append(pymc.Uniform('Tmax', t0 - 30, t1 + 30))
            elif param == 'EBVhost':
                params.append(pymc.Uniform('EBVhost', 0, 10.))
            elif param == 'DM':
                params.append(pymc.Uniform('DM', 0, 100))
            elif param.find('max') > 0:
                params.append(pymc.Uniform(str(param), 10., 30.))
            else:
                raise AttributeError(
                    "Error, parameter %s not recognized. Update MCMC package" %
                    (param))
            if self.model.parameters[param] is None:
                params[-1].value = self.model.guess(param)
            else:
                params[-1].value = self.model.parameters[param]
        params = pymc.Container(params)

        # now setup intrinsic variances for each filter
        if inc_var:
            vars = pymc.InverseGamma('taus',
                                     alpha=0.5,
                                     beta=0.1**2,
                                     value=np.random.uniform(
                                         0, 0.1**2, size=len(filters)))
        else:
            vars = np.array([0.0] * len(filters))

        # The data stochastic that maps parameters to observations
        @pymc.data
        @pymc.stochastic
        def model(params=params,
                  vars=vars,
                  paramnames=paramnames,
                  filters=filters,
                  value=1.0):
            # Set the parameters in the model
            for i, param in enumerate(paramnames):
                if debug:
                    print("setting ", param, " to ", params[i])
                self.model.parameters[param] = params[i]

            logp = 0
            numpts = 0
            for i, f in enumerate(filters):
                mod, err, mask = self.model(f, self.sn.data[f].MJD)
                m = mask * self.sn.data[f].mask
                if not np.sometrue(m):
                    continue
                numpts += np.sum(m)
                tau = np.power(vars[i] + np.power(self.sn.data[f].e_mag, 2),
                               -1)
                logp += pymc.normal_like(self.sn.data[f].mag[m], mod[m],
                                         tau[m])
            #if numpts < len(paramnames):
            #   return -np.inf
            return logp

        pymc.MCMC.__init__(self, locals(), **args)

        # Setup the step methods
        # 1) params will be AdaptiveMetropolis, so we need to setup initial
        #    scales. If the model has been fit, use error, otherwise guess.
        def_scales = {
            'Tmax': 0.5**2,
            'st': 0.001**2,
            'dm15': 0.001**2,
            'max': 0.01**2,
            'DM': 0.01**2,
            'EBVhost': 0.01**2
        }
        scales = {}
        for i, par in enumerate(self.paramnames):
            if par in self.model.errors and self.model.errors[par] > 0:
                scales[self.params[i]] = self.model.errors[par]
            else:
                if par in def_scales:
                    scales[self.params[i]] = def_scales[par]
                elif par[0] == "T" and par[-3:] == "max":
                    scales[self.params[i]] = def_scales['Tmax']
                elif par[-3:] == "max":
                    scales[self.params[i]] = def_scales['max']
                else:
                    scales[self.params[i]] = self.params[i].value / 10.
        self.use_step_method(pymc.AdaptiveMetropolis,
                             self.params,
                             scales=scales,
                             delay=1000,
                             interval=1000)

        if inc_var:
            self.use_step_method(
                pymc.AdaptiveMetropolis, [self.vars],
                scales={self.vars: self.vars.value * 0 + 0.005**2})