示例#1
0
    def _build_model(self, data):
        data = _data_df2dict(data)
        with pm.Model() as model:
            # Priors
            # NOTE: we need another variable if we deal with losses, which goes
            # to the value function
            β = pm.Bound(pm.Normal, lower=0)('beta', mu=1, sd=1000)
            γ = pm.Bound(pm.Normal, lower=0)('gamma', mu=0, sd=1000)
            τ = pm.Bound(pm.Normal, lower=0)('tau', mu=0, sd=1000)

            # TODO: pay attention to the choice function & it's params
            α = pm.Exponential('alpha', lam=1)
            ϵ = 0.01

            value_diff = (self._value_function(γ, data['B']) -
                          self._value_function(γ, data['A']))
            time_diff = (self._time_weighing_function(τ, data['DB']) -
                         self._time_weighing_function(τ, data['DA']))
            diff = value_diff - β * time_diff

            # Choice function: psychometric
            P_chooseB = pm.Deterministic('P_chooseB',
                                         choice_func_psychometric2(α, ϵ, diff))
            # Likelihood of observations
            r_likelihood = pm.Bernoulli('r_likelihood',
                                        p=P_chooseB,
                                        observed=data['R'])

        return model
示例#2
0
def build_model(model_name, data_set, property_types):

    # Build the likelihood operator.
    surrogate_model = StollWerthSurrogate(data_set.molecular_weight)
    log_likelihood = StollWerthOp(data_set, property_types, surrogate_model)

    # Define the potentially 'fixed' model constants.
    bond_length = data_set.bond_length.to(unit.nanometer).magnitude
    quadrupole = 0.0

    # Build the model
    with pymc3.Model() as model:

        epsilon = pymc3.Bound(pymc3.Exponential, 0.0)("epsilon",
                                                      lam=1.0 / 400.0)
        sigma = pymc3.Bound(pymc3.Exponential, 0.0)("sigma", lam=1.0 / 5.0)

        if model_name == "AUA" or model_name == "AUA+Q":
            bond_length = pymc3.Bound(pymc3.Exponential, 0.0)("bond_length",
                                                              lam=1.0 / 3.0)
        if model_name == "AUA+Q":
            quadrupole = pymc3.Bound(pymc3.Exponential, 0.0)("quadrupole",
                                                             lam=1.0)

        theta = tt.as_tensor_variable(
            [epsilon, sigma, bond_length, quadrupole])

        pymc3.DensityDist(
            "likelihood",
            lambda v: log_likelihood(v),
            observed={"v": theta},
        )

    return model
示例#3
0
        def submodel2(x, y, yerr, parent):
            with pm.Model(name="granulation", model=parent) as submodel:
                # The parameters of SHOTerm kernel for non-periodicity granulation
                mean2 = pm.Normal("mean2", mu=0.0, sd=10.0)
                #logz = pm.Uniform("logz", lower=np.log(2 * np.pi / 4), upper=np.log(2*np.pi/vgp.min_period))
                #sigma = pm.HalfCauchy("sigma", 3.0)
                #logw0 = pm.Normal("logw0", mu=logz, sd=2.0)
                logw0 = pm.Bound(pm.Normal,
                                 lower=np.log(2 * np.pi / 2.5),
                                 upper=np.log(2 * np.pi / self.min_period))(
                                     "logw0", mu=np.log(2 * np.pi / 0.8), sd=1)
                logSw4 = pm.Normal("logSw4",
                                   mu=np.log(np.var(y) * (2 * np.pi / 2)**4),
                                   sd=5)
                logs22 = pm.Normal("logs22", mu=np.log(np.mean(yerr)), sd=2.0)
                logQ = pm.Bound(pm.Normal,
                                lower=np.log(1 / 2),
                                upper=np.log(2))("logQ",
                                                 mu=np.log(1 / np.sqrt(2)),
                                                 sd=1)

                kernel2 = xo.gp.terms.SHOTerm(log_Sw4=logSw4,
                                              log_w0=logw0,
                                              log_Q=logQ)
                gp2 = xo.gp.GP(kernel2, x, yerr**2 + tt.exp(logs22))

                loglike2 = gp2.log_likelihood(y - mean2)

            return logw0, logQ, gp2, loglike2
    def set_prior(self, param, abund=False, name_param=None):

        # Read distribution configuration
        dist_name = self.priorDict[param][0]
        dist_loc, dist_scale = self.priorDict[param][1], self.priorDict[param][2]
        dist_norm, dist_reLoc = self.priorDict[param][3], self.priorDict[param][4]

        # Load the corresponding probability distribution
        probDist = getattr(pymc3, dist_name)

        if abund:
            priorFunc = probDist(name_param, dist_loc, dist_scale) * dist_norm + dist_reLoc

        elif probDist.__name__ in ['HalfCauchy']:  # These distributions only have one parameter
            priorFunc = probDist(param, dist_loc, shape=self.total_regions) * dist_norm + dist_reLoc

        elif probDist.__name__ == 'Uniform':
            # priorFunc = probDist(param, lower=dist_loc, upper=dist_scale) * dist_norm + dist_reLoc
            if param == 'logOH':
                priorFunc = pymc3.Bound(pymc3.Normal, lower=7.1, upper=9.1)('logOH', mu=8.0, sigma=1.0, testval=8.1)
            if param == 'logU':
                priorFunc = pymc3.Bound(pymc3.Normal, lower=-4.0, upper=-1.5)('logU', mu=-2.75, sigma=1.5, testval=-2.75)
            if param == 'logNO':
                priorFunc = pymc3.Bound(pymc3.Normal, lower=-2.0, upper=0.0)('logNO', mu=-1.0, sigma=0.5, testval=-1.0)
        else:
            priorFunc = probDist(param, dist_norm, dist_scale, shape=self.total_regions) * dist_norm + dist_reLoc

        self.paramDict[param] = priorFunc

        return
def v2_model(observations,
             nulls,
             null_sd,
             null_b,
             null_dispersed_prob,
             iter_count=2000,
             tune_iters=2000):
    with pm.Model() as model:
        # Probability of being a DE gene
        de_prob = pm.Beta('de_prob', alpha=1., beta=5.)

        # Probability of being downregulated
        down_prob = pm.Beta('down_prob', alpha=1., beta=1.)

        dispersed_prob = null_dispersed_prob

        mu_pos = pm.Lognormal('mu_pos', mu=-3, sd=1.)
        mu_neg = pm.Lognormal('mu_neg', mu=-3, sd=1.)
        sd_pos = pm.Gamma('sd_pos', alpha=0.01, beta=1.)
        sd_neg = pm.Gamma('sd_neg', alpha=0.01, beta=1.)
        nu_pos = pm.Gamma('nu_pos', alpha=5., beta=1.)
        nu_neg = pm.Gamma('nu_neg', alpha=5., beta=1.)

        spike_component = pm.Normal.dist(mu=0., sd=null_sd)
        slab_component = pm.Laplace.dist(mu=0., b=null_b)

        # Sample from Gaussian-Laplace mixture for null (spike-and-slab mixture)
        pm.Mixture('null',
                   comp_dists=[spike_component, slab_component],
                   w=tt.as_tensor([1. - dispersed_prob, dispersed_prob]),
                   observed=nulls)

        pos_component = pm.Bound(pm.StudentT, lower=0.).dist(mu=mu_pos,
                                                             sd=sd_pos,
                                                             nu=nu_pos)
        neg_component = pm.Bound(pm.StudentT, upper=0.).dist(mu=-mu_neg,
                                                             sd=sd_neg,
                                                             nu=nu_neg)

        pm.Mixture('obs',
                   w=tt.as_tensor([(1. - de_prob) * (1. - dispersed_prob),
                                   (1. - de_prob) * dispersed_prob,
                                   de_prob * (1. - down_prob),
                                   de_prob * down_prob]),
                   comp_dists=[
                       spike_component, slab_component, pos_component,
                       neg_component
                   ],
                   observed=observations)

        pm.Deterministic('log_prob', model.logpt)

        for RV in model.basic_RVs:
            print(RV.name, RV.logp(model.test_point))

        trace = pm.sample(iter_count, tune=tune_iters, chains=4)
        ppc = pm.sample_ppc(trace, samples=iter_count, model=model)

    return ({'trace': trace, 'ppc': ppc})
示例#6
0
def case_count_model_us_states(df):

    # Normalize inputs in a way that is sensible:

    # People per test: normalize to South Korea
    # assuming S.K. testing is "saturated"
    ppt_sk = np.log10(51500000. / 250000)
    df['people_per_test_normalized'] = (
        np.log10(df['people_per_test_7_days_ago']) - ppt_sk)

    n = len(df)

    # For each country, let:
    # c_obs = number of observed cases
    c_obs = df['num_pos_7_days_ago'].values
    # c_star = number of true cases

    # d_obs = number of observed deaths
    d_obs = df[['death', 'num_pos_7_days_ago']].min(axis=1).values
    # people per test
    people_per_test = df['people_per_test_normalized'].values

    covid_case_count_model = pm.Model()

    with covid_case_count_model:

        # Priors:
        mu_0 = pm.Beta('mu_0', alpha=1, beta=100, testval=0.01)
        # sig_0 = pm.Uniform('sig_0', lower=0.0, upper=mu_0 * (1 - mu_0))
        alpha = pm.Bound(pm.Normal, lower=0.0)(
            'alpha', mu=8, sigma=3, shape=1)
        beta = pm.Bound(pm.Normal, upper=0.0)(
            'beta', mu=-1, sigma=1, shape=1)
        # beta = pm.Normal('beta', mu=0, sigma=1, shape=3)
        sigma = pm.HalfNormal('sigma', sigma=0.5, testval=0.1)
        # sigma_1 = pm.HalfNormal('sigma_1', sigma=2, testval=0.1)

        # Model probability of case under-reporting as logistic regression:
        mu_model_logit = alpha + beta * people_per_test
        tau_logit = pm.Normal('tau_logit',
                              mu=mu_model_logit,
                              sigma=sigma,
                              shape=n)
        tau = np.exp(tau_logit) / (np.exp(tau_logit) + 1)

        c_star = c_obs / tau

        # Binomial likelihood:
        d = pm.Binomial('d',
                        n=c_star,
                        p=mu_0,
                        observed=d_obs)

    return covid_case_count_model
示例#7
0
    def get_model(self, vfield0, sigma_vfield0, rlim=1 * u.kpc):
        # Number of prior mixture components:
        with pm.Model() as model:

            # True distance:
            BoundedR = pm.Bound(UniformSpaceDensity,
                                lower=0,
                                upper=rlim.to_value(u.pc))
            r = BoundedR("r", rlim.to_value(u.pc), shape=(self.N, 1))

            # Milky Way velocity distribution
            K = vfield0.shape[0]
            w = pm.Dirichlet('w', a=np.ones(K))

            # Set up means and variances:
            meanvs = []
            sigvs = []
            for k in range(K):
                vtmp = pm.Normal(f'vmean{k}', vfield0[k], 10., shape=3)  # HACK

                BoundedNormal = pm.Bound(pm.Normal, lower=1.5, upper=5.3)
                lnstmp = BoundedNormal(f'lns{k}', np.log(sigma_vfield0[k]),
                                       0.2)
                stmp = pm.Deterministic(f'vsig{k}', tt.exp(lnstmp))

                meanvs.append(vtmp)
                sigvs.append(stmp)

            pvdists = []
            for k in range(K):
                pvtmp = pm.MvNormal.dist(meanvs[k],
                                         tau=np.eye(3) * 1 / sigvs[k]**2,
                                         shape=3)
                pvdists.append(pvtmp)
            vxyz = pm.Mixture('vxyz',
                              w=w,
                              comp_dists=pvdists,
                              shape=(self.N, 3))

            # Velocity in tangent plane coordinates
            vtan = tt.batched_dot(self.Ms, vxyz)

            model_pm = vtan[:, :2] / r * pc_mas_yr_per_km_s
            model_rv = vtan[:, 2:3]
            model_y = tt.concatenate((1000 / r, model_pm, model_rv), axis=1)

            pm.Deterministic('model_y', model_y)
            # val = pm.MvNormal('like', mu=model_y, tau=Cinv, observed=y)
            dy = self.ys - model_y
            pm.Potential(
                'chisq',
                -0.5 * tt.batched_dot(dy, tt.batched_dot(self.Cinvs, dy)))

        return model
def hierarchical_model(**kwargs):
    '''
     Hierarchical model for roi
    '''

    data = load_dataset(group=kwargs['group'],
                        roi=kwargs['roi'],
                        task=kwargs['task'],
                        condition=kwargs['condition'],
                        path=kwargs['path'])

    subject_ids = data.subject.unique()
    subject_idx = np.array([idx - 1 for idx in data.subject.values])
    subject_idx = handle_irregular_idx(subject_idx)
    n_subjects = len(subject_ids)

    with pm.Model() as hierarchical_model:

        # Hyperpriors for group nodes
        mu_a = pm.Normal('mu_alpha', mu=0., sd=1)
        mu_b = pm.Normal('mu_beta', mu=0., sd=1)

        #sigma_a = pm.HalfCauchy('sigma_alpha', 1)
        #sigma_b = pm.HalfCauchy('sigma_b', 1)

        sigma_a = pm.Uniform('sigma_alpha', 0, 100)
        sigma_b = pm.Uniform('sigma_beta', 0, 100)

        # Parameters for each subject
        BoundedNormalAlpha = pm.Bound(pm.Normal, lower=0, upper=5)
        BoundedNormalBeta = pm.Bound(pm.Normal, lower=0, upper=1)

        a = BoundedNormalAlpha('alpha', mu=mu_a, sd=sigma_a, shape=n_subjects)
        b = BoundedNormalBeta('beta', mu=mu_b, sd=sigma_b, shape=n_subjects)

        # Model error
        #eps = pm.HalfCauchy('eps', 1)

        eps = pm.Uniform('eps', 0, 100)

        # Model prediction of voxel activation
        x_est = data.thresh_x.values * a[
            subject_idx] - data.n_activity.values * b[
                subject_idx] + data.noise.values

        # Data likelihood
        y = pm.Normal('y', mu=x_est, sd=eps, observed=data.raw_x.values)

    return hierarchical_model
示例#9
0
def make_model(spec: ModelSpec, dat, basis, unfold, aPosterior):
    with pm.Model() as model:
        # Prior for alpha
        if isinstance(spec.alphaPrior, float):
            alpha = spec.alphaPrior
        elif isinstance(spec.alphaPrior, AlphaLognormal):
            alphaLN = aPosterior.lognormal(scale=spec.alphaPrior.scale)
            alpha = pm.Lognormal('alpha', mu=alphaLN.mu, sd=alphaLN.sig)
        elif spec.alphaPrior is None:
            alpha = pm.HalfFlat('alpha')
        else:
            raise Exception("Unknown prior for alpha")
        # Prior for phi
        nPhi = len(basis)
        om = unfold.omegas[0].mat
        chol = np.linalg.cholesky(np.linalg.inv(om))
        low = np.repeat(0, nPhi)
        if spec.phiPrior == "positive":
            phiDistr = pm.Bound(pm.MvNormal, lower=low)
        elif spec.phiPrior == "any":
            phiDistr = pm.MvNormal
        phi = phiDistr('phi',
                       mu=np.zeros(nPhi),
                       chol=chol / np.sqrt(alpha),
                       shape=nPhi)
        # Experimental data
        f = pm.Normal(
            'f',
            mu=pm.math.dot(unfold.K, phi),
            sd=dat['err'].values,
            shape=len(dat),
            observed=dat['cnt'].values,
        )
    return model
示例#10
0
    def singleband_gp(self, lower=5, upper=50, seed=42):
        # x, y, yerr = make_data_nice(x, y, yerr)
        np.random.seed(seed)

        with pm.Model() as model:

            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2",
                              mu=2 * np.log(np.mean(self.yerr)),
                              sd=2.0)

            # A term to describe the non-periodic variability
            logSw4 = pm.Normal("logSw4", mu=np.log(np.var(self.y)), sd=5.0)
            logw0 = pm.Normal("logw0", mu=np.log(2 * np.pi / 10), sd=5.0)

            # The parameters of the RotationTerm kernel
            logamp = pm.Normal("logamp", mu=np.log(np.var(self.y)), sd=5.0)
            BoundedNormal = pm.Bound(pm.Normal,
                                     lower=np.log(lower),
                                     upper=np.log(upper))
            logperiod = BoundedNormal("logperiod",
                                      mu=np.log(self.init_period),
                                      sd=5.0)
            logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
            logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
            mix = xo.distributions.UnitUniform("mix")

            # Track the period as a deterministic
            period = pm.Deterministic("period", tt.exp(logperiod))

            # Set up the Gaussian Process model
            kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4,
                                         log_w0=logw0,
                                         Q=1 / np.sqrt(2))
            kernel += xo.gp.terms.RotationTerm(log_amp=logamp,
                                               period=period,
                                               log_Q0=logQ0,
                                               log_deltaQ=logdeltaQ,
                                               mix=mix)
            gp = xo.gp.GP(kernel,
                          self.x,
                          self.yerr**2 + tt.exp(logs2),
                          mean=mean)

            # Compute the Gaussian Process likelihood and add it into the
            # the PyMC3 model as a "potential"
            gp.marginal("gp", observed=self.y)

            # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

            # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)

        self.model = model
        self.map_soln = model
        return map_soln, model
def get_bayesian_model(model_type,
                       Y,
                       shots,
                       m_gates,
                       mu_AB,
                       cov_AB,
                       alpha_ref,
                       alpha_lower=0.5,
                       alpha_upper=0.999,
                       alpha_testval=0.9,
                       p_lower=0.9,
                       p_upper=0.999,
                       p_testval=0.95,
                       RvsI=None,
                       IvsR=None,
                       sigma_theta=0.004):
    # Bayesian model
    # from https://iopscience.iop.org/arti=RvsI, cle/10.1088/1367-2630/17/1/013042/pdf
    # see https://docs.pymc.io/api/model.html

    RB_model = pm.Model()
    with RB_model:
        total_shots = np.full(Y.shape, shots)

        #Priors for unknown model parameters
        alpha = pm.Uniform("alpha",
                           lower=alpha_lower,
                           upper=alpha_upper,
                           testval=alpha_ref)

        BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)

        AB = BoundedMvNormal("AB",
                             mu=mu_AB,
                             testval=mu_AB,
                             cov=np.diag(cov_AB),
                             shape=(2))

        if model_type == "hierarchical":
            GSP = AB[0] * alpha**m_gates + AB[1]
            theta = pm.Beta("GSP", mu=GSP, sigma=sigma_theta, shape=Y.shape[1])
            # Likelihood (sampling distribution) of observations
            p = pm.Binomial("Counts_h", p=theta, observed=Y, n=total_shots)

        elif model_type == "tilde":
            p_tilde = pm.Uniform("p_tilde",
                                 lower=p_lower,
                                 upper=p_upper,
                                 testval=p_testval)
            GSP = AB[0] * (RvsI * alpha**m_gates + IvsR *
                           (alpha * p_tilde)**m_gates) + AB[1]
            # Likelihood (sampling distribution) of observations
            p = pm.Binomial("Counts_t", p=GSP, observed=Y, n=total_shots)

        else:  # defaul model "pooled"
            GSP = AB[0] * alpha**m_gates + AB[1]
            # Likelihood (sampling distribution) of observations
            p = pm.Binomial("Counts_p", p=GSP, observed=Y, n=total_shots)

    return RB_model
def get_bayesian_model_hierarchical(
        model_type,
        Y):  # modified for accelerated BM with EPCest as extra parameter
    # Bayesian model
    # from https://iopscience.iop.org/article/10.1088/1367-2630/17/1/013042/pdf
    # see https://docs.pymc.io/api/model.html

    RBH_model = pm.Model()
    with RBH_model:

        #Priors for unknown model parameters
        alpha = pm.Uniform("alpha",
                           lower=alpha_lower,
                           upper=alpha_upper,
                           testval=alpha_ref)

        BoundedMvNormal = pm.Bound(pm.MvNormal, lower=0.0)

        AB = BoundedMvNormal("AB",
                             mu=mu_AB,
                             testval=mu_AB,
                             cov=np.diag(cov_AB),
                             shape=(2))

        # Expected value of outcome

        GSP = AB[0] * alpha**m_gates + AB[1]

        total_shots = np.full(Y.shape, shots)
        theta = pm.Beta("GSP", mu=GSP, sigma=sigma_theta, shape=Y.shape[1])

        # Likelihood (sampling distribution) of observations
        p = pm.Binomial("Counts", p=theta, observed=Y, n=total_shots)

    return RBH_model
示例#13
0
        def _gen_d_vars_pm(tmñ=(), fmt_nmbrs='{}'):
            egr = {}
            norm = {}
            for p, líms in líms_paráms.items():
                nmbr = fmt_nmbrs.format(p)
                if aprioris is None:
                    if líms[0] is líms[1] is None:
                        final = pm.Normal(name=nmbr, mu=0, sd=100 ** 2, shape=tmñ)
                    else:
                        dist_norm = pm.Normal(name='transf' + nmbr, mu=0, sd=1, shape=tmñ)
                        if líms[0] is None:
                            final = pm.Deterministic(nmbr, líms[1] - pm.math.exp(dist_norm))
                        elif líms[1] is None:
                            final = pm.Deterministic(nmbr, líms[0] + pm.math.exp(dist_norm))
                        else:
                            final = pm.Deterministic(
                                nmbr, (pm.math.tanh(dist_norm) / 2 + 0.5) * (líms[1] - líms[0]) + líms[0]
                            )
                        norm[p] = dist_norm
                else:
                    dist, prms = aprioris[p]
                    if (líms[0] is not None or líms[1] is not None) and dist != pm.Uniform:
                        acotada = pm.Bound(dist, lower=líms[0], upper=líms[1])
                        final = acotada(nmbr, shape=tmñ, **prms)
                    else:
                        if dist == pm.Uniform:
                            prms['lower'] = max(prms['lower'], líms[0])
                            prms['upper'] = min(prms['upper'], líms[1])
                        final = dist(nmbr, shape=tmñ, **prms)

                egr[p] = final
            for p, v in egr.items():
                if p not in norm:
                    norm[p] = v
            return {'final': egr, 'norm': norm}
示例#14
0
def sample_explore(n=20, ntrials=25, steepness_alpha=1., steepness_beta=1., x_midpoint_prec=4., yscale_alpha=1., yscale_beta=.5):
    
    trials = np.arange(ntrials)
    steepness_dist = pm.Gamma.dist(steepness_alpha, steepness_beta)
    BoundNormal = pm.Bound(pm.Normal, lower=0, upper=ntrials)
    x_midpoint_dist = BoundNormal.dist(mu=ntrials/2., sd=ntrials/x_midpoint_prec)
    yscale_dist = pm.Gamma.dist(yscale_alpha, yscale_beta)
    
    steepness = steepness_dist.random(size=n)
    x_midpoint = x_midpoint_dist.random(size=n)
    yscale = yscale_dist.random(size=n)
    
    position = trials[:,None] - x_midpoint[None,:]
    growth = np.exp(-steepness * position)
    denom = 1 + growth * yscale
    explore_param = 1 - (1. / denom)
    
    x = np.linspace(0,30)
    steepness_pdf = np.exp(steepness_dist.logp(x).eval())
    x_midpoint_pdf = np.exp(x_midpoint_dist.logp(x).eval())
    yscale_pdf = np.exp(yscale_dist.logp(x).eval())
    
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8,5))
    ax1.plot(x, steepness_pdf)
    ax1.plot(x, x_midpoint_pdf)
    ax1.plot(x, yscale_pdf)
    ax2.plot(explore_param)
示例#15
0
    def granulation_model(self):
        peak = self.period_prior()
        x = self.lc.lcf.time
        y = self.lc.lcf.flux
        yerr = self.lc.lcf.flux_err
        with pm.Model() as model:
            # The mean flux of the time series
            mean = pm.Normal("mean", mu=0.0, sd=10.0)

            # A jitter term describing excess white noise
            logs2 = pm.Normal("logs2", mu=2*np.log(np.min(sigmaclip(yerr)[0])), sd=1.0)

            logw0 = pm.Bound(pm.Normal, lower=-0.5, upper=np.log(2 * np.pi / self.min_period))("logw0", mu=0.0, sd=5)
            logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y)), sd=5)
            kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2))

            #GP model
            gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2))

            # Compute the Gaussian Process likelihood and add it into the
    	    # the PyMC3 model as a "potential"
            pm.Potential("loglike", gp.log_likelihood(y - mean))

    	    # Compute the mean model prediction for plotting purposes
            pm.Deterministic("pred", gp.predict())

    	    # Optimize to find the maximum a posteriori parameters
            map_soln = xo.optimize(start=model.test_point)
        return model, map_soln
示例#16
0
        def submodel1(x,y,yerr,parent):
            with pm.Model(name="rotation_", model=parent) as submodel:
                # The mean flux of the time series
                mean1 = pm.Normal("mean1", mu=0.0, sd=10.0)

                # A jitter term describing excess white noise
                logs21 = pm.Normal("logs21", mu=np.log(np.mean(yerr)), sd=2.0)

                # The parameters of the RotationTerm kernel
                logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
                #logperiod = pm.Uniform("logperiod", lower=np.log(vgp.min_period), upper=np.log(vgp.max_period))
                logperiod = pm.Bound(pm.Normal, lower=np.log(self.min_period),
                    upper=np.log(self.max_period))("logperiod", mu=np.log(peak["period"]), sd=1.0)
                logQ0 = pm.Uniform("logQ0", lower=-15, upper=5)
                logdeltaQ = pm.Uniform("logdeltaQ", lower=-15, upper=5)
                mix = pm.Uniform("mix", lower=0, upper=1.0)

                # Track the period as a deterministic
                period = pm.Deterministic("period", tt.exp(logperiod))
    
                kernel1 = xo.gp.terms.RotationTerm(
                    log_amp=logamp,
                    period=period,
                    log_Q0=logQ0,
                    log_deltaQ=logdeltaQ,
                    mix=mix)
            
                gp1 = xo.gp.GP(kernel1, x, yerr**2 + tt.exp(logs21))

                # Compute the Gaussian Process likelihood and add it into the
                # the PyMC3 model as a "potential"
                loglike1 = gp1.log_likelihood(y - mean1)
                #pred1  = pm.Deterministic("pred1", gp1.predict())
        
            return logperiod, logQ0, gp1, loglike1
示例#17
0
    def test_bounded(self):
        # A bit crude...
        BoundedNormal = pm.Bound(pm.Normal, upper=0)

        def ref_rand(size, tau):
            return -st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)
        pymc3_random(BoundedNormal, {'tau': Rplus}, ref_rand=ref_rand)
def bayes_model(training_samples) -> pm.model.Model:
    """
    Solve for posterior distributions using pymc3
    """
    with pm.Model() as model:

        # Priors for the mu parameter of the
        # Poisson distribution P.
        # Note: mu = mean(P)
        mu_goal_for = pm.Uniform(
            'mu_goal_for', 0, 5*60
        )
        mu_goal_against = pm.Uniform(
            'mu_goal_against', 0, 5*60
        )
        mu_no_goal = pm.Uniform(
            'mu_no_goal', 0, 5*60
        )
        
        # Observations to train the model on
        obs_goal_for = pm.Poisson(
            'obs_goal_for',
            mu=mu_goal_for,
            observed=training_samples[0],
        )
        obs_goal_against = pm.Poisson(
            'obs_goal_against',
            mu=mu_goal_against,
            observed=training_samples[1],
        )
        obs_no_goal = pm.Poisson(
            'obs_no_goal',
            mu=mu_no_goal,
            observed=training_samples[2],
        )
        
        # Outcome probabilities
        p_goal_for = pm.Bound(pm.Poisson, upper=5*60)('p_goal_for', mu=mu_goal_for)
        p_goal_against = pm.Bound(pm.Poisson, upper=5*60)('p_goal_against', mu=mu_goal_against)
        p_no_goal = pm.Bound(pm.Poisson, upper=5*60)('p_no_goal', mu=mu_no_goal)
        
        # Fit model
        step = pm.Metropolis()
        trace = pm.sample(18000, step=step)
        
    return model, trace
示例#19
0
    def test_bounded_dist(self):
        with pm.Model() as model:
            BoundedNormal = pm.Bound(pm.Normal, lower=0.0)
            x = BoundedNormal("x", mu=aet.zeros((3, 1)), sd=1 * aet.ones((3, 1)), shape=(3, 1))

        with model:
            prior_trace = pm.sample_prior_predictive(5)
            assert prior_trace["x"].shape == (5, 3, 1)
示例#20
0
        def _gen_d_vars_pm_jer():
            dists_base = _gen_d_vars_pm(tmñ=(len(set(nv_jerarquía[0])), ),
                                        fmt_nmbrs='mu_{}_nv_' +
                                        str(len(nv_jerarquía) - 1))

            egr = {}

            for p, líms in líms_paráms.items():
                mu = dists_base[p]
                sg = pm.HalfNormal(name='sg_{}'.format(p), sd=10,
                                   shape=(1, ))  # type: pm.model.TransformedRV
                if líms[0] is líms[1] is None:
                    for í, nv in enumerate(nv_jerarquía[:-1]):
                        últ_niv = í == (len(nv_jerarquía) - 2)
                        tmñ_nv = nv.shape

                        nmbr_mu = p if últ_niv else 'mu_{}_nv_{}'.format(
                            p,
                            len(nv_jerarquía) - 2 - í)
                        nmbr_sg = 'sg_{}_nv_{}'.format(
                            p,
                            len(nv_jerarquía) - 2 - í)

                        mu = pm.Normal(name=nmbr_mu,
                                       mu=mu[nv],
                                       sd=sg[nv],
                                       shape=tmñ_nv)
                        if not últ_niv:
                            sg = pm.HalfNormal(name=nmbr_sg.format(p, í),
                                               sd=obs_y.values.ptp(),
                                               shape=tmñ_nv)
                else:
                    for í, nv in enumerate(nv_jerarquía[:-1]):
                        tmñ_nv = nv.shape
                        últ_niv = í == (len(nv_jerarquía) - 2)
                        nmbr_mu = p if últ_niv else 'mu_{}_nv_{}'.format(
                            p,
                            len(nv_jerarquía) - 2 - í)
                        nmbr_sg = 'sg_{}_nv_{}'.format(
                            p,
                            len(nv_jerarquía) - 2 - í)

                        acotada = pm.Bound(pm.Normal,
                                           lower=líms[0],
                                           upper=líms[1])
                        mu = acotada(nmbr_mu,
                                     mu=mu[nv],
                                     sd=sg[nv],
                                     shape=tmñ_nv)
                        if not últ_niv:
                            sg = pm.HalfNormal(name=nmbr_sg.format(p, í),
                                               sd=obs_y.values.ptp(),
                                               shape=tmñ_nv)

                egr[p] = mu

            return egr
示例#21
0
def harmonic_fit_mcmc(time, X, frq, mask=None, axis=0, basetime=None,         **kwargs):
    """
    Harmonic fitting using Bayesian inference
    """
    tday = 86400.
    
    # Convert the time to days
    dtime = SecondsSince(time, basetime=basetime )
    dtime /= tday
    
    # Convert the frequencies to radians / day
    omega = [ff*tday for ff in frq]
    
    # Number of parameters
    n_params = 2*len(omega) + 1
    nomega = len(omega)
    
    print('Number of Parametrs: %d\n'%n_params, omega)

    with pm.Model() as my_model:
        ###
        # Create priors for each of our variables
        BoundNormal = pm.Bound(pm.Normal, lower=0.0)

        # Mean
        beta_mean = pm.Normal('beta_mean', mu=0, sd=1)
        
        
        beta_re = pm.Normal('beta_re', mu=1., sd = 5., shape=nomega)
        beta_im = pm.Normal('beta_im', mu=1., sd = 5., shape=nomega)

        #beta_s=[beta_mean]

        # Harmonics
        #for n in range(0,2*len(omega),2):
        #    beta_s.append(pm.Normal('beta_%d_re'%(n//2), mu=1., sd = 5.))
        #    beta_s.append(pm.Normal('beta_%d_im'%(n//2), mu=1., sd = 5.))
        
        # The mean function
        mu_x = sine_model(beta_mean, beta_re, beta_im, omega, dtime)
        
        ###
        # Generate the likelihood function using the deterministic variable as the mean
        sigma = pm.HalfNormal('sigma',5.)
        X_obs = pm.Normal('X_obs', mu=mu_x, sd=sigma, observed=X)
        
        mp = pm.find_MAP()
        print(mp)
        
        # Inference step...
        step = None
        start = None
        trace = pm.sample(500, tune=1000, start = start, step=step, cores=2,
                         )#nuts_kwargs=dict(target_accept=0.95, max_treedepth=16, k=0.5))
    
    # Return the trace and the parameter stats
    return trace,  my_model, omega, dtime 
def harmonic_fit_mcmc_arn(time, X, frq, arn=1, mask=None, axis=0, basetime=None,  **kwargs):
    """
    Harmonic fitting using Bayesian inference
    
    Model the errors using an auto-regressive model
    """
    tday = 86400.
    # Convert the time to days
    dtime = SecondsSince(time, basetime=basetime )
    nt = dtime.shape[0]
    dtime /= tday
    
    # Convert the frequencies to radians / day
    omega = [ff*tday for ff in frq]
    #omega = frq
    
    # Number of parameters
    n_params = 2*len(omega) + 1
    
    print('Number of Parametrs: %d\n'%n_params, omega)

    with pm.Model() as my_model:
        ###
        # Create priors for each of our variables
        BoundNormal = pm.Bound(pm.Normal, lower=0.0)

        # Mean
        beta_mean = pm.Normal('beta_mean', mu=0, sd=1)
        beta_s=[beta_mean]

        # Harmonics
        for n in range(0,2*len(omega),2):
            beta_s.append(pm.Normal('beta_%d_re'%(n//2), mu=1., sd = 5.))
            beta_s.append(pm.Normal('beta_%d_im'%(n//2), mu=1., sd = 5.))

        ###
        # Generate the likelihood function using the deterministic variable as the mean
        mu_x = sine_model_notrend(beta_s, omega, dtime)

        
        # Use an autoregressive model for the error term
        beta = pm.Normal('beta', mu=0, sigma=1., shape=arn)
        #sigma = pm.InverseGamma('sigma',1,1)
        sigma = pm.HalfNormal('sigma',1)

        X_obs = pm.AR('X_obs', beta, sigma=sigma, observed=X - mu_x)
        

        
        # Inference step...
        step = None
        start = None
        trace = pm.sample(500, tune=1000, start = start, step=step, cores=2,
                         return_inferencedata=False)#nuts_kwargs=dict(target_accept=0.95, max_treedepth=16, k=0.5))

    # Return the trace and the parameter stats
    return trace,  my_model, omega, dtime
示例#23
0
    def _powerlaw_prior(self, key):
        """
        Map the bilby PowerLaw prior to a PyMC3 style function
        """

        # check prior is a PowerLaw
        pymc3, STEP_METHODS, floatX = self._import_external_sampler()
        theano, tt, as_op = self._import_theano()
        if isinstance(self.priors[key], PowerLaw):

            # check power law is set
            if not hasattr(self.priors[key], 'alpha'):
                raise AttributeError("No 'alpha' attribute set for PowerLaw prior")

            if self.priors[key].alpha < -1.:
                # use Pareto distribution
                palpha = -(1. + self.priors[key].alpha)

                return pymc3.Bound(
                    pymc3.Pareto, upper=self.priors[key].minimum)(
                    key, alpha=palpha, m=self.priors[key].maximum)
            else:
                class Pymc3PowerLaw(pymc3.Continuous):
                    def __init__(self, lower, upper, alpha, testval=1):
                        falpha = alpha
                        self.lower = lower = tt.as_tensor_variable(floatX(lower))
                        self.upper = upper = tt.as_tensor_variable(floatX(upper))
                        self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))

                        if falpha == -1:
                            self.norm = 1. / (tt.log(self.upper / self.lower))
                        else:
                            beta = (1. + self.alpha)
                            self.norm = 1. / (beta * (tt.pow(self.upper, beta) -
                                                      tt.pow(self.lower, beta)))

                        transform = pymc3.distributions.transforms.interval(
                            lower, upper)

                        super(Pymc3PowerLaw, self).__init__(
                            transform=transform, testval=testval)

                    def logp(self, value):
                        upper = self.upper
                        lower = self.lower
                        alpha = self.alpha

                        return pymc3.distributions.dist_math.bound(
                            alpha * tt.log(value) + tt.log(self.norm),
                            lower <= value, value <= upper)

                return Pymc3PowerLaw(key, lower=self.priors[key].minimum,
                                     upper=self.priors[key].maximum,
                                     alpha=self.priors[key].alpha)
        else:
            raise ValueError("Prior for '{}' is not a Power Law".format(key))
示例#24
0
    def __init__(self, light_curve, rotation_period, n_spots, contrast,
                 latitude_cutoff=10, partition_lon=True):
        pm.gp.mean.Mean.__init__(self)

        if contrast is None:
            contrast = pm.TruncatedNormal("contrast", lower=0.01, upper=0.99,
                                          testval=0.4, mu=0.5, sigma=0.5)

        self.f0 = pm.TruncatedNormal("f0", mu=1, sigma=1,
                                     testval=light_curve.flux.max(),
                                     lower=-1, upper=2)

        self.eq_period = pm.TruncatedNormal("P_eq",
                                            lower=0.8 * rotation_period,
                                            upper=1.2 * rotation_period,
                                            mu=rotation_period,
                                            sigma=0.2 * rotation_period,
                                            testval=rotation_period)

        BoundedHalfNormal = pm.Bound(pm.HalfNormal, lower=1e-6, upper=0.99)
        self.shear = BoundedHalfNormal("shear", sigma=0.2, testval=0.01)

        self.comp_inclination = pm.Uniform("comp_inc",
                                           lower=np.radians(0),
                                           upper=np.radians(90),
                                           testval=np.radians(1))

        if partition_lon:
            lon_lims = 2 * np.pi * np.arange(n_spots + 1) / n_spots
            lower = lon_lims[:-1]
            upper = lon_lims[1:]
        else:
            lower = 0
            upper = 2 * np.pi

        self.lon = pm.Uniform("lon",
                              lower=lower,
                              upper=upper,
                              shape=(1, n_spots))
        self.lat = pm.TruncatedNormal("lat",
                                      lower=np.radians(latitude_cutoff),
                                      upper=np.radians(180 - latitude_cutoff),
                                      mu=np.pi / 2,
                                      sigma=np.pi / 2,
                                      shape=(1, n_spots))
        self.rspot = BoundedHalfNormal("R_spot",
                                       sigma=0.2,
                                       shape=(1, n_spots),
                                       testval=0.3)
        self.contrast = contrast
        self.spot_period = self.eq_period / (1 - self.shear *
                                             pm.math.sin(self.lat - np.pi / 2) ** 2)
        self.sin_lat = pm.math.sin(self.lat)
        self.cos_lat = pm.math.cos(self.lat)
        self.sin_c_inc = pm.math.sin(self.comp_inclination)
        self.cos_c_inc = pm.math.cos(self.comp_inclination)
示例#25
0
def graded_response_model(dataset, n_categories):
    """Defines the mcmc model for the graded response model.
    
    Args:
        dataset: [n_items, n_participants] 2d array of measured responses
        n_categories: number of polytomous values (i.e. Number of Likert Levels)

    Returns:
        model: PyMC3 model to run
    """
    n_items, n_people = dataset.shape
    n_levels = n_categories - 1

    # Need small deviation in offset to
    # fit into pymc framework
    mu_value = linspace(-0.1, 0.1, n_levels)

    # Run through 0, K - 1
    observed = dataset - dataset.min()

    graded_mcmc_model = pm.Model()

    with graded_mcmc_model:
        # Ability Parameters
        ability = pm.Normal("Ability", mu=0, sigma=1, shape=n_people)

        # Discrimination multilevel prior
        rayleigh_scale = pm.Lognormal("Rayleigh_Scale",
                                      mu=0,
                                      sigma=1 / 4,
                                      shape=1)
        discrimination = pm.Bound(Rayleigh, lower=0.25)(name='Discrimination',
                                                        beta=rayleigh_scale,
                                                        offset=0.25,
                                                        shape=n_items)

        # Threshold multilevel prior
        sigma_difficulty = pm.HalfNormal('Difficulty_SD', sigma=1, shape=1)
        for ndx in range(n_items):
            thresholds = pm.Normal(
                f"Thresholds{ndx}",
                mu=mu_value,
                sigma=sigma_difficulty,
                shape=n_levels,
                transform=pm.distributions.transforms.ordered)

            # Compute the log likelihood
            kernel = discrimination[ndx] * ability
            probabilities = pm.OrderedLogistic(f'Log_Likelihood{ndx}',
                                               cutpoints=thresholds,
                                               eta=kernel,
                                               observed=observed[ndx])

    return graded_mcmc_model
示例#26
0
    def _build_model(self, data):
        data = _data_df2dict(data)
        with pm.Model() as model:
            # Priors
            k = pm.Bound(pm.Normal, lower=-0.005)('k', mu=0.001, sd=0.5)
            s = pm.Bound(pm.Normal, lower=0)('s', mu=1, sd=2)
            α = pm.Exponential('alpha', lam=1)
            ϵ = 0.01
            # Value functions
            VA = pm.Deterministic('VA', data['A'] * self._df(k, s, data['DA']))
            VB = pm.Deterministic('VB', data['B'] * self._df(k, s, data['DB']))
            # Choice function: psychometric
            P_chooseB = pm.Deterministic(
                'P_chooseB', choice_func_psychometric(α, ϵ, VA, VB))
            # Likelihood of observations
            r_likelihood = pm.Bernoulli('r_likelihood',
                                        p=P_chooseB,
                                        observed=data['R'])

        return model
示例#27
0
    def SIR_training(self, sequence, totalpopulation):
        self.popu = totalpopulation
        self.data = sequence[:]
        acc_infect = sequence[:, 0] / totalpopulation
        basic_model = pm.Model()
        n = len(acc_infect)
        I = acc_infect[0]
        R = 0
        S = 1 - I
        with basic_model:
            BoundedNormal = pm.Bound(pm.Normal, lower=0.0, upper=1.0)
            BoundedNormal2 = pm.Bound(pm.Normal, lower=1.0, upper=10.0)
            theta = []
            r0 = BoundedNormal2('R_0', mu=self.r0, sigma=0.72)
            gamma = BoundedNormal('gamma', mu=self.gamma, sigma=0.02)
            beta = pm.Deterministic('beta', r0 * gamma)
            ka = pm.Gamma('ka', 2, 0.0001)
            Lambda1 = pm.Gamma('Lambda1', 2, 0.0001)
            qu = pm.Uniform('qu', lower=0.1, upper=1.0)

            theta.append(
                pm.Deterministic('theta_' + str(0), pm.math.stack([S, I, R])))
            for i in range(1, n):
                states = theta[i - 1]
                solve_theta = pm.Deterministic(
                    'solve_theta_' + str(i),
                    ka * pm.math.stack([
                        states[0] - qu * beta * states[0] * states[1],
                        states[1] + qu * beta * states[0] * states[1] -
                        gamma * states[1], states[2] + gamma * states[1]
                    ]))
                theta.append(
                    pm.Dirichlet('theta_' + str(i), a=solve_theta, shape=(3)))
                real_infect = pm.Beta('real_infect_' + str(i),
                                      Lambda1 * theta[i][1],
                                      Lambda1 * (1 - theta[i][1]),
                                      observed=acc_infect[i])

            step = pm.Metropolis()
            Trace = pm.sample(2000, cores=16, chains=1, init='auto', step=step)
            self.trace = Trace
def pymc_train(X_train, Y_train, X_test, Y_test, penalty='L2'):

    fs = np.sign(np.asarray([pearsonr(x, Y_train)[0] for x in X_train.T]))

    with pm.Model() as logistic_model:

        if penalty is 'L2':
            b = pm.Bound(pm.Normal, lower=0)('b',
                                             sd=1.,
                                             shape=X_train.shape[0])
        else:
            b = pm.Bound(pm.Laplace, lower=0)('b',
                                              b=1.,
                                              shape=X_train.shape[0])

        u = pm.Normal('u', 0, sd=10)
        bs = pm.Deterministic('bs', tt.mul(fs, b))

        p = pm.math.invlogit(u + tt.dot(X_train, bs))

        likelihood = pm.Bernoulli('likelihood', p, observed=Y_train)

        return pm.sample(10000)
示例#29
0
    def mock(self,
             muB,
             sigmaB,
             muC,
             sigmaC,
             noise,
             n_galaxies,
             phi_limit=np.pi / 2.0):
        """
        Generate a mock dataset
        """

        # Define number of galaxies in the mock dataset and the uncertainty in the observation of q
        self.n_galaxies = n_galaxies
        self.sigmaq = noise

        # Galaxy orientation is isotropic in space. mu=cos(theta)
        self.mu = np.random.rand(self.n_galaxies)
        self.phi = phi_limit * np.random.rand(self.n_galaxies)

        # Generate truncated normal distributions using PYMC3
        tmpB = pm.Bound(pm.Normal, lower=0.0, upper=1.0)
        tmpC = pm.Bound(pm.Normal, lower=0.0, upper=1.0)

        # Sample from these distributions
        B = tmpB.dist(mu=muB, sd=sigmaB).random(size=self.n_galaxies)
        C = tmpC.dist(mu=muC, sd=sigmaC).random(size=self.n_galaxies)

        BC = np.vstack([B, C])
        BC = np.sort(BC, axis=0)
        self.C, self.B = BC[0, :], BC[1, :]

        # Compute axial ratios
        q = self.axial_ratio(self.B, self.C, self.mu, self.phi)

        # Generate fake observations by adding Gaussian noise
        self.qobs = q + self.sigmaq * np.random.randn(self.n_galaxies)
示例#30
0
def analyze_robust1(data):
    with pm.Model() as model:
        # priors might be adapted here to be less flat
        mu = pm.Normal('mu',
                       mu=0.,
                       sd=100.,
                       shape=2,
                       testval=np.median(data.T, axis=1))
        bound_sigma = pm.Bound(pm.Normal, lower=0.)
        sigma = bound_sigma('sigma',
                            mu=0.,
                            sd=100.,
                            shape=2,
                            testval=mad(data, axis=0))
        rho = pm.Uniform('r', lower=-1., upper=1., testval=0)
        cov = pm.Deterministic('cov', covariance(sigma, rho))
        bound_nu = pm.Bound(pm.Gamma, lower=1.)
        nu = bound_nu('nu', alpha=2, beta=10)
        mult_t = pm.MvStudentT('mult_t',
                               nu=nu,
                               mu=mu,
                               Sigma=cov,
                               observed=data)
    return model