コード例 #1
0
 def run_dynesty(self, kwargs={}):
     # dynesty solver
     solver = DynamicNestedSampler(self._core_likelihood, self.prior,
                                   len(self._activelist))
     solver.run_nested(**kwargs)
     results = solver.results
     names = sorted(self._activelist)
     for i in range(len(names)):
         low, high = self.paramrange[names[i]]
         results.samples[:, i] = umap(results.samples[:, i], [low, high])
     return results
コード例 #2
0
 def inference(self, nlive, nworkers=1):
     """
     Use Dynamic Nested Sampling to infer the posterior
     """
     with Pool(nworkers) as pool:
         sampler = DynamicNestedSampler(self.loglikelihood,
                                        self.prior_transfom,
                                        len(self.prior),
                                        pool=pool,
                                        nlive=nlive)
         sampler.run_nested()
         res = sampler.results
     return res
コード例 #3
0
 def posterior_sampler(self, t, sample_idx, ndims=2):
     nlive = 1024  # number of (initial) live points
     bound = 'multi'  # use MutliNest algorithm
     sample = 'rwalk'
     self.X = np.zeros(shape=(self.N, len(sample_idx)))
     self.S = np.zeros(shape=(self.N, len(sample_idx)))
     for i in range(self.N):
         _, sample_trajectories = self.simulate(t, self.action, sample_idx)
         self.X[i, :] = sample_trajectories[:, 0]
         self.S[i, :] = sample_trajectories[:, 1]
     dsampler = DynamicNestedSampler(self.loglikelihood,
                                     self.prior_transform,
                                     ndims,
                                     bound=bound)
     dsampler.run_nested(nlive_init=nlive)
     res = dsampler.results
     weights = np.exp(res['logwt'] - res['logz'][-1])
     samples_dynesty = resample_equal(res.samples, weights)
     return dsampler
コード例 #4
0
ファイル: fit21cm.py プロジェクト: guillochon/fit-21cm
    ('b1', (-2, 2)),
    ('b2', (-2, 2)),
    ('b3', (-2, 2)),
    ('b4', (-1500, 1500)),
    ('A', (0, 10)),
    ('nu0', (min_nu, max_nu)),
    ('w', (0, 200)),
    ('tau', (0, 100)),
    ('sigma', (0, 10))))

ndim = len(list(free_vars.keys()))

nu_c = (max_nu + min_nu) / 2.0

dsampler = DynamicNestedSampler(log_like, ptform, ndim, sample='rwalk')
dsampler.run_nested(dlogz_init=0.001, nlive_init=1000)

res = dsampler.results

weights = res['logwt']
weights -= np.max(weights)

# plt.plot(xdata, ydata, color='black', lw=1.5)
corner_weights = []
corner_vars = []
rms = []
for si, samp in enumerate(res['samples']):
    if weights[si] < -7:
        continue
    corner_weights.append(np.exp(weights[si]))
    corner_vars.append(samp)
コード例 #5
0
def main(args=None):
    if args.results == "none":
        ndim = len(JOINT_PRIOR)
        date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        image_positions = pd.read_csv(args.image_positions)
        pprint(image_positions)
        # A=0, B=1, C=2, D=3
        x_image = image_positions["theta_x"].to_numpy()
        y_image = image_positions["theta_y"].to_numpy()
        quad_model = QuadPseudoNIELensModel(x_image, y_image, args.plate_scale)
        time_delays = pd.read_csv(args.time_delays)
        pprint(time_delays)

        # Expected: DA=0, DB=1, DC=2 (index)
        sigma_t = time_delays["sigma"].to_numpy()  # units of days
        delta_t = time_delays["delta_t"].to_numpy()
        joint_loglikelihood = joint_loglikelihood_func(quad_model, delta_t,
                                                       sigma_t)

        with Pool(args.nworkers) as pool:
            sampler = DynamicNestedSampler(joint_loglikelihood,
                                           joint_prior_transform,
                                           ndim,
                                           pool=pool,
                                           nlive=args.nlive,
                                           queue_size=args.nworkers)
            sampler.run_nested()
            res = sampler.results

        # save results
        with open(f"../results/joint_result_{date}.p", "wb") as f:
            pickle.dump(res, f)
    else:  # we just want to plot the result from an older run
        with open(args.results, "rb") as f:
            res = pickle.load(f)
        ndim = res.samples.shape[1]

    if args.plot_results:
        # trace plot
        fig, axs = dyplot.traceplot(
            res,
            show_titles=True,
            trace_cmap='plasma',
            connect=True,
            connect_highlight=range(5),
            labels=LABELS,
        )
        fig.tight_layout(pad=2.0)
        fig.savefig("../figures/joint_inference_trace_plot.png",
                    bbox_inches="tight")

        # corner points plot
        fig, axes = plt.subplots(ndim - 1, ndim - 1, figsize=(15, 15))
        axes.reshape([ndim - 1, ndim - 1])
        fg, ax = dyplot.cornerpoints(res,
                                     cmap='plasma',
                                     kde=False,
                                     fig=(fig, axes),
                                     labels=LABELS)
        fg.savefig("../figures/joint_inference_cornerpoints.png",
                   bbox_inches="tight")

        # corner plot
        fig, axes = plt.subplots(ndim, ndim, figsize=(15, 15))
        axes.reshape([ndim, ndim])
        fg, ax = dyplot.cornerplot(res,
                                   fig=(fig, axes),
                                   color="b",
                                   labels=LABELS,
                                   show_titles=True)
        fg.savefig("../figures/joint_inference_corner_plot.png",
                   bbox_inches="tight")

        #### marginalized posterior #####
        Ddt = res.samples[:, 0]  # histogram of Ddt
        weights = np.exp(
            res['logwt'] -
            res['logz'][-1])  # posterior probability (Bayes theorem)

        # eliminate outliers with low and high + estimate confidance interval
        low, fifth, median, ninety_fifth, high = weighted_quantile(
            Ddt, [0.0001, 0.05, 0.5, 0.95, 0.9999], weights)
        error_plus = ninety_fifth - median  # error estimate with 5th percentile
        error_minus = median - fifth  # error estimate with 95th percentile
        good = (Ddt > low) & (Ddt < high)  # remove outliers
        Ddt = Ddt[good]
        weights = weights[good]

        plt.figure(figsize=(8, 8))
        plt.hist(Ddt, bins=100, weights=weights)
        plt.title(r"$D_{\Delta t}$=%.2f$^{+%.2f}_{-%.2f}$" %
                  (median, error_plus, error_minus))
        plt.xlabel(r"$D_{\Delta t}$")
        plt.savefig("../figures/marginalized_posterior_Ddt.png")

        # assume a flat LambdCDM model (with negligible radiation)

        # We need to model kappa_ext for this step
        def integrand(z):
            return 1 / np.sqrt(args.omega_m * (1 + z)**3 + args.omega_l)

        Dd = quad(integrand, 0, args.z_lens)[0] / (1 + args.z_lens)
        Ds = quad(integrand, 0, args.z_source)[0] / (1 + args.z_source)
        Dds = quad(integrand, args.z_lens,
                   args.z_source)[0] / (1 + args.z_source)
        factor = (1 + args.z_lens) * Ds * Dd / Dds
        H0 = (c * factor / Ddt / u.Mpc).to(u.km / u.s / u.Mpc).value

        plt.figure(figsize=(8, 8))
        fifth, median, ninety_fifth = weighted_quantile(
            H0, [0.05, 0.5, 0.95], weights)
        error_plus = ninety_fifth - median
        error_minus = median - fifth
        plt.hist(H0, bins=100, weights=weights)
        plt.title(r"$H_0$=%.2f$^{+%.2f}_{-%.2f}$" %
                  (median, error_plus, error_minus))
        plt.xlabel(r"$H_0$ [km s$^{-1}$ Mpc$^{-1}$]")
        plt.savefig("../figures/marginalized_posterior_H0.png")
コード例 #6
0
    def sample(self, quiet=False):
        """
        sample using the UltraNest numerical integration method
        :rtype: 

        :returns: 

        """
        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        param_names = list(self._free_parameters.keys())

        ndim = len(param_names)

        self._kwargs["ndim"] = ndim

        loglike, dynesty_prior = self._construct_unitcube_posterior(return_copy=True)

        # check if we are doing to do things in parallel

        if threeML_config["parallel"]["use_parallel"]:

            c = ParallelClient()
            view = c[:]

            self._kwargs["pool"] = view
            self._kwargs["queue_size"] = len(view)

        sampler = DynamicNestedSampler(loglike, dynesty_prior, **self._kwargs)

        self._sampler_kwargs["print_progress"] = loud

        with use_astromodels_memoization(False):
            log.debug("Start dynestsy run")
            sampler.run_nested(**self._sampler_kwargs)
            log.debug("Dynesty run done")

        self._sampler = sampler

        results = self._sampler.results

        # draw posterior samples
        weights = np.exp(results["logwt"] - results["logz"][-1])

        SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))

        rstate = np.random

        if abs(np.sum(weights) - 1.0) > SQRTEPS:  # same tol as in np.random.choice.
            raise ValueError("Weights do not sum to 1.")

        # Make N subdivisions and choose positions with a consistent random offset.
        nsamples = len(weights)
        positions = (rstate.random() + np.arange(nsamples)) / nsamples

        # Resample the data.
        idx = np.zeros(nsamples, dtype=np.int)
        cumulative_sum = np.cumsum(weights)
        i, j = 0, 0
        while i < nsamples:
            if positions[i] < cumulative_sum[j]:
                idx[i] = j
                i += 1
            else:
                j += 1

        samples_dynesty = results["samples"][idx]

        self._raw_samples = samples_dynesty

        # now do the same for the log likes

        logl_dynesty = results["logl"][idx]

        self._log_like_values = logl_dynesty

        self._log_probability_values = self._log_like_values + np.array(
            [self._log_prior(samples) for samples in self._raw_samples]
        )

        self._marginal_likelihood = self._sampler.results["logz"][-1] / np.log(10.0)

        self._build_results()

        # Display results
        if loud:
            self._results.display()

        # now get the marginal likelihood

        return self.samples
コード例 #7
0
    chisq = np.sum(((data - straight_line(x, m, c)) / sigma)**2)

    return norm - 0.5 * chisq


nlive = 1024  # number of (initial) live points
bound = 'multi'  # use MutliNest algorithm
sample = 'rwalk'  # use the random walk to draw new samples
ndims = 2  # two parameters

dsampler = DynamicNestedSampler(loglikelihood_dynesty,
                                prior_transform,
                                ndims,
                                bound=bound,
                                sample=sample)
dsampler.run_nested(nlive_init=nlive)
dres = dsampler.results

dlogZdynesty = dres.logz[-1]  # value of logZ
dlogZerrdynesty = dres.logzerr[
    -1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using dynamic sampler) is {} ± {}'.format(
    dlogZdynesty, dlogZerrdynesty))

# get the posterior samples
dweights = np.exp(dres['logwt'] - dres['logz'][-1])
dpostsamples = resample_equal(dres.samples, dweights)

print('Number of posterior samples (using dynamic sampler) is {}'.format(
コード例 #8
0
t0 = time.time()
npool = 7
with ProcessPoolExecutor(max_workers=npool) as executor:

    sampler = DynamicNestedSampler(infer.lnlike,
                                   infer.prior_transform,
                                   ndim,
                                   pool=executor,
                                   queue_size=npool,
                                   bound='multi',
                                   sample='unif')

    sampler.run_nested(
        dlogz_init=0.05,
        nlive_init=1000,
        nlive_batch=100,
        maxiter=maxiter,
        use_stop=False,
        # wt_kwargs={'pfrac': 1.0}
    )

    res = sampler.results
    pickle.dump(res, open(chain_file, "wb"))

t_run = (time.time() - t0) / 60.
print('\n============================================')
print("Sampling took {0:.10f} mins".format(t_run))
print('============================================')

# =====================================================================
# Plots
if plot:
コード例 #9
0
    # normalisation
    norm = -0.5*M*LN2PI - M*LNSIGMA

    # chi-squared (data, sigma and x are global variables defined early on in this notebook)
    chisq = np.sum(((data-straight_line(x, m, c))/sigma)**2)

    return norm - 0.5*chisq

nlive = 1024      # number of (initial) live points
bound = 'multi'   # use MutliNest algorithm
sample = 'rwalk'  # use the random walk to draw new samples
ndims = 2         # two parameters

dsampler = DynamicNestedSampler(loglikelihood_dynesty, prior_transform, ndims,
                                bound=bound, sample=sample)
dsampler.run_nested(nlive_init=nlive)
dres = dsampler.results

dlogZdynesty = dres.logz[-1]        # value of logZ
dlogZerrdynesty = dres.logzerr[-1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using dynamic sampler) is {} ± {}'.format(dlogZdynesty, dlogZerrdynesty))

# get the posterior samples
dweights = np.exp(dres['logwt'] - dres['logz'][-1])
dpostsamples = resample_equal(dres.samples, dweights)

print('Number of posterior samples (using dynamic sampler) is {}'.format(dpostsamples.shape[0]))

# Now run with the static sampler