Exemple #1
0
    def track_1var_2par_ode_ess(self):
        def freefall(y, t, p):
            return 2.0 * p[1] - p[0] * y[0]

        # Times for observation
        times = np.arange(0, 10, 0.5)
        y = np.array([
            -2.01, 9.49, 15.58, 16.57, 27.58, 32.26, 35.13, 38.07, 37.36,
            38.83, 44.86, 43.58, 44.59, 42.75, 46.9, 49.32, 44.06, 49.86,
            46.48, 48.18
        ]).reshape(-1, 1)

        ode_model = pm.ode.DifferentialEquation(func=freefall,
                                                times=times,
                                                n_states=1,
                                                n_theta=2,
                                                t0=0)
        with pm.Model() as model:
            # Specify prior distributions for some of our model parameters
            sigma = pm.HalfCauchy("sigma", 1)
            gamma = pm.Lognormal("gamma", 0, 1)
            # If we know one of the parameter values, we can simply pass the value.
            ode_solution = ode_model(y0=[0], theta=[gamma, 9.8])
            # The ode_solution has a shape of (n_times, n_states)
            Y = pm.Normal("Y", mu=ode_solution, sd=sigma, observed=y)

            t0 = time.time()
            trace = pm.sample(500, tune=1000, chains=2, cores=2, random_seed=0)
            tot = time.time() - t0
        ess = pm.ess(trace)
        return np.mean([ess.sigma, ess.gamma]) / tot
Exemple #2
0
 def track_glm_hierarchical_ess(self, init):
     with glm_hierarchical_model():
         start, step = pm.init_nuts(init=init, chains=self.chains, progressbar=False, random_seed=123)
         t0 = time.time()
         trace = pm.sample(draws=self.draws, step=step, cores=4, chains=self.chains,
                           start=start, random_seed=100, progressbar=False,
                           compute_convergence_checks=False)
         tot = time.time() - t0
     ess = float(pm.ess(trace, var_names=['mu_a'])['mu_a'].values)
     return ess / tot
Exemple #3
0
 def track_glm_hierarchical_ess(self, step):
     with glm_hierarchical_model():
         if step is not None:
             step = step()
         t0 = time.time()
         trace = pm.sample(draws=self.draws, step=step, cores=4, chains=4,
                           random_seed=100, progressbar=False,
                           compute_convergence_checks=False)
         tot = time.time() - t0
     ess = float(pm.ess(trace, var_names=['mu_a'])['mu_a'].values)
     return ess / tot
def extract_mcmc_chain_from_pymc3_trace(trace, var_names, ndraws, nburn,
                                        njobs):
    nvars = len(var_names)
    samples = np.empty((nvars, (ndraws - nburn) * njobs))
    effective_sample_size = np.empty(nvars)
    for ii in range(nvars):
        samples[ii, :] = trace.get_values(var_names[ii],
                                          burn=nburn,
                                          chains=np.arange(njobs))
        effective_sample_size[ii] = pm.ess(trace)[var_names[ii]].values

    return samples, effective_sample_size
Exemple #5
0
 def track_marginal_mixture_model_ess(self, init):
     model, start = mixture_model()
     with model:
         _, step = pm.init_nuts(init=init, chains=self.chains,
                                progressbar=False, random_seed=123)
         start = [{k: v for k, v in start.items()} for _ in range(self.chains)]
         t0 = time.time()
         trace = pm.sample(draws=self.draws, step=step, cores=4, chains=self.chains,
                           start=start, random_seed=100, progressbar=False,
                           compute_convergence_checks=False)
         tot = time.time() - t0
     ess = pm.ess(trace, var_names=['mu'])['mu'].values.min()  # worst case
     return ess / tot
Exemple #6
0
 def ess(self, dim=0, burn=50, clip=0, is_ess=True):
     ess = pymc3.ess(self.fetch_samples()[burn:len(self.samples) - clip,
                                          dim].detach().numpy())
     if self.shadow and is_ess == True:
         weights = torch.cat(self.radon_nikodym[burn:len(self.samples) -
                                                clip],
                             dim=0).reshape(-1, 1).detach()
         normed_weights = weights / weights.sum()
         is_ess = torch.sum(normed_weights**2).reciprocal()
         adjusted_ess = ess * is_ess / weights.shape[0]
         return adjusted_ess
     else:
         return ess
Exemple #7
0
def extract_mcmc_chain_from_pymc3_trace(trace,var_names,ndraws,nburn,njobs):
    nvars = len(var_names)
    samples = np.empty((nvars,(ndraws-nburn)*njobs))
    effective_sample_size = -np.ones(nvars)
    for ii in range(nvars):
        samples[ii,:]=trace.get_values(
            var_names[ii],burn=nburn,chains=np.arange(njobs))
        try:
            effective_sample_size[ii]=pm.ess(trace)[var_names[ii]].values
        except:
            print('could not compute ess. likely issue with theano')


    return samples,effective_sample_size
def run(steppers, p):
    steppers = set(steppers)
    traces = {}
    effn = {}
    runtimes = {}

    with pm.Model() as model:
        if USE_XY:
            x = pm.Flat("x")
            y = pm.Flat("y")
            mu = np.array([0.0, 0.0])
            cov = np.array([[1.0, p], [p, 1.0]])
            z = pm.MvNormal.dist(mu=mu, cov=cov,
                                 shape=(2, )).logp(tt.stack([x, y]))
            pot = pm.Potential("logp_xy", z)
            start = {"x": 0, "y": 0}
        else:
            mu = np.array([0.0, 0.0])
            cov = np.array([[1.0, p], [p, 1.0]])
            z = pm.MvNormal("z", mu=mu, cov=cov, shape=(2, ))
            start = {"z": [0, 0]}

        for step_cls in steppers:
            name = step_cls.__name__
            t_start = time.time()
            mt = pm.sample(draws=10000,
                           chains=16,
                           parallelize=False,
                           step=step_cls(),
                           start=start)
            runtimes[name] = time.time() - t_start
            print("{} samples across {} chains".format(
                len(mt) * mt.nchains, mt.nchains))
            traces[name] = mt
            en = pm.ess(mt)
            print(f"effective: {en}\r\n")
            if USE_XY:
                effn[name] = np.mean(en["x"]) / len(mt) / mt.nchains
            else:
                effn[name] = np.mean(en["z"]) / len(mt) / mt.nchains
    return traces, effn, runtimes
Exemple #9
0
def run(steppers, p):
    steppers = set(steppers)
    traces = {}
    effn = {}
    runtimes = {}

    with pm.Model() as model:
        if USE_XY:
            x = pm.Flat('x')
            y = pm.Flat('y')
            mu = np.array([0., 0.])
            cov = np.array([[1., p], [p, 1.]])
            z = pm.MvNormal.dist(mu=mu, cov=cov,
                                 shape=(2, )).logp(tt.stack([x, y]))
            pot = pm.Potential('logp_xy', z)
            start = {'x': 0, 'y': 0}
        else:
            mu = np.array([0., 0.])
            cov = np.array([[1., p], [p, 1.]])
            z = pm.MvNormal('z', mu=mu, cov=cov, shape=(2, ))
            start = {'z': [0, 0]}

        for step_cls in steppers:
            name = step_cls.__name__
            t_start = time.time()
            mt = pm.sample(draws=10000,
                           chains=16,
                           parallelize=False,
                           step=step_cls(),
                           start=start)
            runtimes[name] = time.time() - t_start
            print('{} samples across {} chains'.format(
                len(mt) * mt.nchains, mt.nchains))
            traces[name] = mt
            en = pm.ess(mt)
            print('effective: {}\r\n'.format(en))
            if USE_XY:
                effn[name] = np.mean(en['x']) / len(mt) / mt.nchains
            else:
                effn[name] = np.mean(en['z']) / len(mt) / mt.nchains
    return traces, effn, runtimes
Exemple #10
0
 def test_neff(self):
     if hasattr(self, "min_n_eff"):
         n_eff = pm.ess(self.trace[self.burn :])
         for var in n_eff:
             npt.assert_array_less(self.min_n_eff, n_eff[var])
Exemple #11
0
    def _run_convergence_checks(self, trace, model):
        if trace.nchains == 1:
            msg = ("Only one chain was sampled, this makes it impossible to "
                   "run some convergence checks")
            warn = SamplerWarning(WarningType.BAD_PARAMS, msg, 'info', None,
                                  None, None)
            self._add_warnings([warn])
            return

        from pymc3 import rhat, ess

        valid_name = [rv.name for rv in model.free_RVs + model.deterministics]
        varnames = []
        for rv in model.free_RVs:
            rv_name = rv.name
            if is_transformed_name(rv_name):
                rv_name2 = get_untransformed_name(rv_name)
                rv_name = rv_name2 if rv_name2 in valid_name else rv_name
            if rv_name in trace.varnames:
                varnames.append(rv_name)

        self._ess = ess = ess(trace, var_names=varnames)
        self._rhat = rhat = rhat(trace, var_names=varnames)

        warnings = []
        rhat_max = max(val.max() for val in rhat.values())
        if rhat_max > 1.4:
            msg = ("The rhat statistic is larger than 1.4 for some "
                   "parameters. The sampler did not converge.")
            warn = SamplerWarning(WarningType.CONVERGENCE, msg, 'error', None,
                                  None, rhat)
            warnings.append(warn)
        elif rhat_max > 1.2:
            msg = ("The rhat statistic is larger than 1.2 for some "
                   "parameters.")
            warn = SamplerWarning(WarningType.CONVERGENCE, msg, 'warn', None,
                                  None, rhat)
            warnings.append(warn)
        elif rhat_max > 1.05:
            msg = ("The rhat statistic is larger than 1.05 for some "
                   "parameters. This indicates slight problems during "
                   "sampling.")
            warn = SamplerWarning(WarningType.CONVERGENCE, msg, 'info', None,
                                  None, rhat)
            warnings.append(warn)

        eff_min = min(val.min() for val in ess.values())
        n_samples = len(trace) * trace.nchains
        if eff_min < 200 and n_samples >= 500:
            msg = ("The estimated number of effective samples is smaller than "
                   "200 for some parameters.")
            warn = SamplerWarning(WarningType.CONVERGENCE, msg, 'error', None,
                                  None, ess)
            warnings.append(warn)
        elif eff_min / n_samples < 0.1:
            msg = ("The number of effective samples is smaller than "
                   "10% for some parameters.")
            warn = SamplerWarning(WarningType.CONVERGENCE, msg, 'warn', None,
                                  None, ess)
            warnings.append(warn)
        elif eff_min / n_samples < 0.25:
            msg = ("The number of effective samples is smaller than "
                   "25% for some parameters.")
            warn = SamplerWarning(WarningType.CONVERGENCE, msg, 'info', None,
                                  None, ess)
            warnings.append(warn)

        self._add_warnings(warnings)