コード例 #1
0
 def run_dynesty(self, kwargs={}):
     # dynesty solver
     solver = DynamicNestedSampler(self._core_likelihood, self.prior,
                                   len(self._activelist))
     solver.run_nested(**kwargs)
     results = solver.results
     names = sorted(self._activelist)
     for i in range(len(names)):
         low, high = self.paramrange[names[i]]
         results.samples[:, i] = umap(results.samples[:, i], [low, high])
     return results
コード例 #2
0
 def inference(self, nlive, nworkers=1):
     """
     Use Dynamic Nested Sampling to infer the posterior
     """
     with Pool(nworkers) as pool:
         sampler = DynamicNestedSampler(self.loglikelihood,
                                        self.prior_transfom,
                                        len(self.prior),
                                        pool=pool,
                                        nlive=nlive)
         sampler.run_nested()
         res = sampler.results
     return res
コード例 #3
0
 def posterior_sampler(self, t, sample_idx, ndims=2):
     nlive = 1024  # number of (initial) live points
     bound = 'multi'  # use MutliNest algorithm
     sample = 'rwalk'
     self.X = np.zeros(shape=(self.N, len(sample_idx)))
     self.S = np.zeros(shape=(self.N, len(sample_idx)))
     for i in range(self.N):
         _, sample_trajectories = self.simulate(t, self.action, sample_idx)
         self.X[i, :] = sample_trajectories[:, 0]
         self.S[i, :] = sample_trajectories[:, 1]
     dsampler = DynamicNestedSampler(self.loglikelihood,
                                     self.prior_transform,
                                     ndims,
                                     bound=bound)
     dsampler.run_nested(nlive_init=nlive)
     res = dsampler.results
     weights = np.exp(res['logwt'] - res['logz'][-1])
     samples_dynesty = resample_equal(res.samples, weights)
     return dsampler
コード例 #4
0
    def execute(self):
        from dynesty import NestedSampler, DynamicNestedSampler

        ndim = self.pipeline.nvaried

        sampling_method = None if self.sample == "auto" else self.sample

        if self.mode == "static":
            sampler = NestedSampler(log_probability_function,
                                    prior_transform,
                                    ndim,
                                    nlive=self.nlive,
                                    bound=self.bound,
                                    sample=sampling_method,
                                    update_interval=self.update_interval,
                                    first_update={
                                        'min_ncall': self.min_ncall,
                                        'min_eff': self.min_eff
                                    },
                                    queue_size=self.queue_size,
                                    pool=self.pool)

            sampler.run_nested(dlogz=self.dlogz)

        else:
            sampler = DynamicNestedSampler(
                log_probability_function,
                prior_transform,
                ndim,
                bound=self.bound,
                sample=sampling_method,
                # update_interval = self.update_interval,
                queue_size=self.queue_size,
                pool=self.pool)
            sampler.run_nested(dlogz_init=self.dlogz)

        results = sampler.results
        results.summary()

        for sample, logwt, logl in zip(results['samples'], results['logwt'],
                                       results['logl']):
            self.output.parameters(sample, logwt, logl)

        self.output.final("ncall", results['ncall'])
        self.output.final("efficiency", results['eff'])
        self.output.final("log_z", results['logz'])
        self.output.final("log_z_err", results['logzerr'])

        self.converged = True
コード例 #5
0
ファイル: fit21cm.py プロジェクト: guillochon/fit-21cm
    ('b0', (0, 4000)),
    ('b1', (-2, 2)),
    ('b2', (-2, 2)),
    ('b3', (-2, 2)),
    ('b4', (-1500, 1500)),
    ('A', (0, 10)),
    ('nu0', (min_nu, max_nu)),
    ('w', (0, 200)),
    ('tau', (0, 100)),
    ('sigma', (0, 10))))

ndim = len(list(free_vars.keys()))

nu_c = (max_nu + min_nu) / 2.0

dsampler = DynamicNestedSampler(log_like, ptform, ndim, sample='rwalk')
dsampler.run_nested(dlogz_init=0.001, nlive_init=1000)

res = dsampler.results

weights = res['logwt']
weights -= np.max(weights)

# plt.plot(xdata, ydata, color='black', lw=1.5)
corner_weights = []
corner_vars = []
rms = []
for si, samp in enumerate(res['samples']):
    if weights[si] < -7:
        continue
    corner_weights.append(np.exp(weights[si]))
コード例 #6
0
def main(args=None):
    if args.results == "none":
        ndim = len(JOINT_PRIOR)
        date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        image_positions = pd.read_csv(args.image_positions)
        pprint(image_positions)
        # A=0, B=1, C=2, D=3
        x_image = image_positions["theta_x"].to_numpy()
        y_image = image_positions["theta_y"].to_numpy()
        quad_model = QuadPseudoNIELensModel(x_image, y_image, args.plate_scale)
        time_delays = pd.read_csv(args.time_delays)
        pprint(time_delays)

        # Expected: DA=0, DB=1, DC=2 (index)
        sigma_t = time_delays["sigma"].to_numpy()  # units of days
        delta_t = time_delays["delta_t"].to_numpy()
        joint_loglikelihood = joint_loglikelihood_func(quad_model, delta_t,
                                                       sigma_t)

        with Pool(args.nworkers) as pool:
            sampler = DynamicNestedSampler(joint_loglikelihood,
                                           joint_prior_transform,
                                           ndim,
                                           pool=pool,
                                           nlive=args.nlive,
                                           queue_size=args.nworkers)
            sampler.run_nested()
            res = sampler.results

        # save results
        with open(f"../results/joint_result_{date}.p", "wb") as f:
            pickle.dump(res, f)
    else:  # we just want to plot the result from an older run
        with open(args.results, "rb") as f:
            res = pickle.load(f)
        ndim = res.samples.shape[1]

    if args.plot_results:
        # trace plot
        fig, axs = dyplot.traceplot(
            res,
            show_titles=True,
            trace_cmap='plasma',
            connect=True,
            connect_highlight=range(5),
            labels=LABELS,
        )
        fig.tight_layout(pad=2.0)
        fig.savefig("../figures/joint_inference_trace_plot.png",
                    bbox_inches="tight")

        # corner points plot
        fig, axes = plt.subplots(ndim - 1, ndim - 1, figsize=(15, 15))
        axes.reshape([ndim - 1, ndim - 1])
        fg, ax = dyplot.cornerpoints(res,
                                     cmap='plasma',
                                     kde=False,
                                     fig=(fig, axes),
                                     labels=LABELS)
        fg.savefig("../figures/joint_inference_cornerpoints.png",
                   bbox_inches="tight")

        # corner plot
        fig, axes = plt.subplots(ndim, ndim, figsize=(15, 15))
        axes.reshape([ndim, ndim])
        fg, ax = dyplot.cornerplot(res,
                                   fig=(fig, axes),
                                   color="b",
                                   labels=LABELS,
                                   show_titles=True)
        fg.savefig("../figures/joint_inference_corner_plot.png",
                   bbox_inches="tight")

        #### marginalized posterior #####
        Ddt = res.samples[:, 0]  # histogram of Ddt
        weights = np.exp(
            res['logwt'] -
            res['logz'][-1])  # posterior probability (Bayes theorem)

        # eliminate outliers with low and high + estimate confidance interval
        low, fifth, median, ninety_fifth, high = weighted_quantile(
            Ddt, [0.0001, 0.05, 0.5, 0.95, 0.9999], weights)
        error_plus = ninety_fifth - median  # error estimate with 5th percentile
        error_minus = median - fifth  # error estimate with 95th percentile
        good = (Ddt > low) & (Ddt < high)  # remove outliers
        Ddt = Ddt[good]
        weights = weights[good]

        plt.figure(figsize=(8, 8))
        plt.hist(Ddt, bins=100, weights=weights)
        plt.title(r"$D_{\Delta t}$=%.2f$^{+%.2f}_{-%.2f}$" %
                  (median, error_plus, error_minus))
        plt.xlabel(r"$D_{\Delta t}$")
        plt.savefig("../figures/marginalized_posterior_Ddt.png")

        # assume a flat LambdCDM model (with negligible radiation)

        # We need to model kappa_ext for this step
        def integrand(z):
            return 1 / np.sqrt(args.omega_m * (1 + z)**3 + args.omega_l)

        Dd = quad(integrand, 0, args.z_lens)[0] / (1 + args.z_lens)
        Ds = quad(integrand, 0, args.z_source)[0] / (1 + args.z_source)
        Dds = quad(integrand, args.z_lens,
                   args.z_source)[0] / (1 + args.z_source)
        factor = (1 + args.z_lens) * Ds * Dd / Dds
        H0 = (c * factor / Ddt / u.Mpc).to(u.km / u.s / u.Mpc).value

        plt.figure(figsize=(8, 8))
        fifth, median, ninety_fifth = weighted_quantile(
            H0, [0.05, 0.5, 0.95], weights)
        error_plus = ninety_fifth - median
        error_minus = median - fifth
        plt.hist(H0, bins=100, weights=weights)
        plt.title(r"$H_0$=%.2f$^{+%.2f}_{-%.2f}$" %
                  (median, error_plus, error_minus))
        plt.xlabel(r"$H_0$ [km s$^{-1}$ Mpc$^{-1}$]")
        plt.savefig("../figures/marginalized_posterior_H0.png")
コード例 #7
0
    def sample(self, quiet=False):
        """
        sample using the UltraNest numerical integration method
        :rtype: 

        :returns: 

        """
        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        param_names = list(self._free_parameters.keys())

        ndim = len(param_names)

        self._kwargs["ndim"] = ndim

        loglike, dynesty_prior = self._construct_unitcube_posterior(return_copy=True)

        # check if we are doing to do things in parallel

        if threeML_config["parallel"]["use_parallel"]:

            c = ParallelClient()
            view = c[:]

            self._kwargs["pool"] = view
            self._kwargs["queue_size"] = len(view)

        sampler = DynamicNestedSampler(loglike, dynesty_prior, **self._kwargs)

        self._sampler_kwargs["print_progress"] = loud

        with use_astromodels_memoization(False):
            log.debug("Start dynestsy run")
            sampler.run_nested(**self._sampler_kwargs)
            log.debug("Dynesty run done")

        self._sampler = sampler

        results = self._sampler.results

        # draw posterior samples
        weights = np.exp(results["logwt"] - results["logz"][-1])

        SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))

        rstate = np.random

        if abs(np.sum(weights) - 1.0) > SQRTEPS:  # same tol as in np.random.choice.
            raise ValueError("Weights do not sum to 1.")

        # Make N subdivisions and choose positions with a consistent random offset.
        nsamples = len(weights)
        positions = (rstate.random() + np.arange(nsamples)) / nsamples

        # Resample the data.
        idx = np.zeros(nsamples, dtype=np.int)
        cumulative_sum = np.cumsum(weights)
        i, j = 0, 0
        while i < nsamples:
            if positions[i] < cumulative_sum[j]:
                idx[i] = j
                i += 1
            else:
                j += 1

        samples_dynesty = results["samples"][idx]

        self._raw_samples = samples_dynesty

        # now do the same for the log likes

        logl_dynesty = results["logl"][idx]

        self._log_like_values = logl_dynesty

        self._log_probability_values = self._log_like_values + np.array(
            [self._log_prior(samples) for samples in self._raw_samples]
        )

        self._marginal_likelihood = self._sampler.results["logz"][-1] / np.log(10.0)

        self._build_results()

        # Display results
        if loud:
            self._results.display()

        # now get the marginal likelihood

        return self.samples
コード例 #8
0
    def run(self, walker_data):
        """Use nested sampling to determine posteriors."""
        from dynesty import DynamicNestedSampler
        from dynesty.dynamicsampler import stopping_function, weight_function
        from mosfit.fitter import ln_likelihood, draw_from_icdf

        prt = self._printer

        if len(walker_data):
            prt.message('nester_not_use_walkers', warning=True)

        ndim = self._model._num_free_parameters

        if self._num_walkers:
            self._nwalkers = self._num_walkers
        else:
            self._nwalkers = 2 * ndim

        self._nlive = 20 * ndim

        self._lnprob = None
        self._lnlike = None

        prt.message('nmeas_nfree', [self._model._num_measurements, ndim])

        nested_dlogz_init = self._cc
        post_thresh = self._cc

        max_iter = self._iterations if self._ct is None else np.inf
        if max_iter <= 0:
            return

        s_exception = None
        iter_denom = None if self._ct is not None else self._iterations

        # Save a few things from the dynesty run for diagnostic purposes.
        scales = []

        try:
            sampler = DynamicNestedSampler(ln_likelihood,
                                           draw_from_icdf,
                                           ndim,
                                           pool=self._pool,
                                           sample='rwalk',
                                           queue_size=max(self._pool.size, 1))
            # Perform initial sample.
            ncall = sampler.ncall
            self._niter = sampler.it - 1
            for li, res in enumerate(
                    sampler.sample_initial(dlogz=nested_dlogz_init,
                                           nlive=self._nlive)):
                ncall0 = ncall
                (worst, ustar, vstar, loglstar, logvol, logwt, self._logz,
                 logzvar, h, nc, worst_it, propidx, propiter, eff,
                 delta_logz) = res

                ncall += nc
                self._niter += 1
                max_iter -= 1

                if max_iter < 0:
                    break

                if (self._fitter._maximum_walltime is not False and
                        self.time_running() > self._fitter._maximum_walltime):
                    prt.message('exceeded_walltime', warning=True)
                    break

                self._results = sampler.results

                scales.append(sampler.results.scale)

                kmat = self._get_best_kmat()
                # The above added 1 call.
                ncall += 1

                self._e_logz = np.sqrt(logzvar)
                prt.status(self,
                           'baseline',
                           kmat=kmat,
                           iterations=[self._niter, iter_denom],
                           nc=ncall - ncall0,
                           ncall=ncall,
                           eff=eff,
                           logz=[
                               self._logz, self._e_logz, delta_logz,
                               nested_dlogz_init
                           ],
                           loglstar=[loglstar],
                           time_running=self.time_running(),
                           maximum_walltime=self._fitter._maximum_walltime)

            if max_iter >= 0:
                prt.status(self,
                           'starting_batches',
                           kmat=kmat,
                           iterations=[self._niter, iter_denom],
                           nc=ncall - ncall0,
                           ncall=ncall,
                           eff=eff,
                           logz=[
                               self._logz, self._e_logz, delta_logz,
                               nested_dlogz_init
                           ],
                           loglstar=[loglstar],
                           time_running=self.time_running(),
                           maximum_walltime=self._fitter._maximum_walltime)

            n = 0
            while max_iter >= 0:
                n += 1
                if (self._fitter._maximum_walltime is not False and
                        self.time_running() > self._fitter._maximum_walltime):
                    prt.message('exceeded_walltime', warning=True)
                    break

                self._results = sampler.results

                scales.append(sampler.results.scale)

                stop, stop_vals = stopping_function(
                    self._results,
                    return_vals=True,
                    args={'post_thresh': post_thresh})
                stop_post, stop_evid, stop_val = stop_vals
                if not stop:
                    logl_bounds = weight_function(self._results)
                    self._logz, self._e_logz = self._results.logz[
                        -1], self._results.logzerr[-1]
                    for res in sampler.sample_batch(
                            logl_bounds=logl_bounds,
                            nlive_new=int(np.ceil(self._nlive / 2))):
                        (worst, ustar, vstar, loglstar, nc, worst_it, propidx,
                         propiter, eff) = res
                        ncall0 = ncall

                        ncall += nc
                        self._niter += 1
                        max_iter -= 1

                        self._results = sampler.results

                        kmat = self._get_best_kmat()
                        # The above added 1 call.
                        ncall += 1

                        prt.status(
                            self,
                            'batching',
                            kmat=kmat,
                            iterations=[self._niter, iter_denom],
                            batch=n,
                            nc=ncall - ncall0,
                            ncall=ncall,
                            eff=eff,
                            logz=[self._logz, self._e_logz],
                            loglstar=[
                                logl_bounds[0], loglstar, logl_bounds[1]
                            ],
                            stop=stop_val,
                            time_running=self.time_running(),
                            maximum_walltime=self._fitter._maximum_walltime)

                        if max_iter < 0:
                            break
                    sampler.combine_runs()
                else:
                    break

                # self._results.summary()
                # prt.nester_status(self, desc='sampling')

        except (KeyboardInterrupt, SystemExit):
            prt.message('ctrl_c', error=True, prefix=False, color='!r')
            s_exception = sys.exc_info()
        except Exception:
            print('Scale history:')
            print(scales)
            pickle.dump(
                sampler.results,
                open(self._fitter._event_name + '-dynesty.pickle', 'wb'))
            self._pool.close()
            raise

        if max_iter < 0:
            prt.message('max_iter')

        if s_exception is not None:
            self._pool.close()
            if (not prt.prompt('mc_interrupted')):
                sys.exit()

        sampler.reset()
        gc.collect()
コード例 #9
0
    norm = -0.5 * M * LN2PI - M * LNSIGMA

    # chi-squared (data, sigma and x are global variables defined early on in this notebook)
    chisq = np.sum(((data - straight_line(x, m, c)) / sigma)**2)

    return norm - 0.5 * chisq


nlive = 1024  # number of (initial) live points
bound = 'multi'  # use MutliNest algorithm
sample = 'rwalk'  # use the random walk to draw new samples
ndims = 2  # two parameters

dsampler = DynamicNestedSampler(loglikelihood_dynesty,
                                prior_transform,
                                ndims,
                                bound=bound,
                                sample=sample)
dsampler.run_nested(nlive_init=nlive)
dres = dsampler.results

dlogZdynesty = dres.logz[-1]  # value of logZ
dlogZerrdynesty = dres.logzerr[
    -1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using dynamic sampler) is {} ± {}'.format(
    dlogZdynesty, dlogZerrdynesty))

# get the posterior samples
dweights = np.exp(dres['logwt'] - dres['logz'][-1])
コード例 #10
0
ファイル: samplers.py プロジェクト: qbaghi/bayesdawn
    def extended_dynamic_nested_sampler(*args, **kwargs):

        return DynamicNestedSampler(*args, **kwargs)
コード例 #11
0
                                    log_gamma_bg=log_bg,
                                    C_bounds=[1., 10.],
                                    log_C=False)
# =====================================================================

# if __name__ == '__main__':

# sample from the target distribution
t0 = time.time()
npool = 7
with ProcessPoolExecutor(max_workers=npool) as executor:

    sampler = DynamicNestedSampler(infer.lnlike,
                                   infer.prior_transform,
                                   ndim,
                                   pool=executor,
                                   queue_size=npool,
                                   bound='multi',
                                   sample='unif')

    sampler.run_nested(
        dlogz_init=0.05,
        nlive_init=1000,
        nlive_batch=100,
        maxiter=maxiter,
        use_stop=False,
        # wt_kwargs={'pfrac': 1.0}
    )

    res = sampler.results
    pickle.dump(res, open(chain_file, "wb"))
コード例 #12
0
    m, c = theta # unpack the parameters

    # normalisation
    norm = -0.5*M*LN2PI - M*LNSIGMA

    # chi-squared (data, sigma and x are global variables defined early on in this notebook)
    chisq = np.sum(((data-straight_line(x, m, c))/sigma)**2)

    return norm - 0.5*chisq

nlive = 1024      # number of (initial) live points
bound = 'multi'   # use MutliNest algorithm
sample = 'rwalk'  # use the random walk to draw new samples
ndims = 2         # two parameters

dsampler = DynamicNestedSampler(loglikelihood_dynesty, prior_transform, ndims,
                                bound=bound, sample=sample)
dsampler.run_nested(nlive_init=nlive)
dres = dsampler.results

dlogZdynesty = dres.logz[-1]        # value of logZ
dlogZerrdynesty = dres.logzerr[-1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using dynamic sampler) is {} ± {}'.format(dlogZdynesty, dlogZerrdynesty))

# get the posterior samples
dweights = np.exp(dres['logwt'] - dres['logz'][-1])
dpostsamples = resample_equal(dres.samples, dweights)

print('Number of posterior samples (using dynamic sampler) is {}'.format(dpostsamples.shape[0]))