示例#1
0
    def _samples(self, kappa, beta=1, return_full=False, print_progress=False):
        r"""
        Draws samples from the surrogate model (optionally the acquisition
        function). Calls `dynesty.NestedSampler`. Runs on a single thread even
        if `self.nthreads > 1`.

        Parameters
        ----------
        kappa : int
            Acquisition function parameter. See class documentation for more
            information.
        beta : float, optional
            Parallel tempering parameter :math:`L^{\beta}`, where :math:`L`
            is the target function. By default 1.
        return_full : bool, optional
            Whether to also return the sampled log-target values and the
            target evidence.
        print_progress : bool, optional
            Whether to print the sampler's progress bar.

        Returns
        -------
        samples : np.ndarray
            Sampled points from the surrogate model.
        logtarget : np.ndarray
            Optionally returned if `return_full=True`, the surrogate model
            target values.
        logz : int
            Optionally returned if `return_full=True`, the surrogate model
            evidence.
        """
        sampler = NestedSampler(self.surrogate_predict,
                                self._prior_transform,
                                ndim=len(self.params),
                                logl_kwargs={
                                    'kappa': kappa,
                                    'beta': beta
                                },
                                rstate=self.generator,
                                **self._sampler_kwargs)
        sampler.run_nested(print_progress=print_progress)

        results = sampler.results
        logz = results.logz[-1]
        weights = numpy.exp(results.logwt - logz)
        # Resample from the posterior
        samples = dyfunc.resample_equal(results.samples, weights)
        if return_full:
            logtarget = self.surrogate_predict(samples)
            # We're only interested in the contribution to the evidence the
            # likelihood (our target function). The uniform prior is used only
            # to provide samples, hence undo its contribution.
            logprior = -numpy.log(self._prior_max - self._prior_min).sum()
            logz -= logprior
            return samples, logtarget, logz
        return samples
示例#2
0
    def construct_analytical_functions(self):
        """Construct invertible functions based on interpolations."""
        import warnings
        # from dynesty import DynamicNestedSampler
        from dynesty import NestedSampler
        from collections import OrderedDict

        import numpy as np

        warnings.filterwarnings("ignore")

        self._free_vars = OrderedDict(
            (('r0', (0.25, 1.5)), ('mpow', (0, 5)), ('mtrunning', (-5, 5)),
             ('mrunning', (-5, 5)), ('tpow', (-5, 5)), ('trunning', (-5, 5)),
             ('tmrunning', (-5, 5)), ('zpow', (-5, 5))))

        self._min_ilms, self._max_ilms = np.log10(np.min(self._ims)), np.log10(
            np.max(self._ims))
        self._min_ilzs, self._max_ilzs = np.min(self._ilzs), np.max(self._ilzs)

        self._mlzs, self._mms, self._mts = np.meshgrid(self._ilzs,
                                                       self._ims,
                                                       self._its,
                                                       indexing='ij')

        self._ndim = len(list(self._free_vars.keys()))

        dsampler = NestedSampler(self.rad_log_like,
                                 self.ptform,
                                 self._ndim,
                                 sample='rwalk')

        # dsampler.run_nested(dlogz_init=0.01)
        dsampler.run_nested(dlogz=1000)

        res = dsampler.results

        bbest = res['samples'][-1]

        prt_ts = np.linspace(0, 1, 5)
        test_masses = 10.0**np.linspace(self._min_ilms, self._max_ilms, 3)
        test_lzs = np.linspace(self._min_ilzs, self._max_ilzs, 3)
        for tlz in test_lzs:
            for tm in test_masses:
                print('Radii for logz = {} and m = {}'.format(tlz, tm))
                print(self._radius_rgi([[tlz, tm, x] for x in prt_ts]))
                print(self.rad_func(bbest, tlz, tm, prt_ts))
        max_frac_err = np.max(
            np.abs(
                self.rad_func(bbest, self._mlzs, self._mms, self._mts) -
                self._irs) / self._irs)

        print('Maximum fractional error: {:.1%}'.format(max_frac_err))
示例#3
0
def nestle_multi_cos():
        with closing(Pool(processes=24)) as pool:
    # Run nestedsampling
                sampler = NestedSampler(log_likelihood_cosine, prior_transform_cos, 17, 
                            bound='balls', nlive=1024,sample='rwalk',pool=pool,queue_size=24)
#                t0 = time.time()
                sampler.run_nested(dlogz=tol, print_progress=False) # don't output progress bar
#                t1 = time.time()
                pool.terminate
        res=sampler.results
        print (res.summary())
        print(res.logz)
        return res.logz[-1],res.logzerr[-1]
示例#4
0
    def execute(self):
        from dynesty import NestedSampler, DynamicNestedSampler

        ndim = self.pipeline.nvaried

        sampling_method = None if self.sample == "auto" else self.sample

        if self.mode == "static":
            sampler = NestedSampler(log_probability_function,
                                    prior_transform,
                                    ndim,
                                    nlive=self.nlive,
                                    bound=self.bound,
                                    sample=sampling_method,
                                    update_interval=self.update_interval,
                                    first_update={
                                        'min_ncall': self.min_ncall,
                                        'min_eff': self.min_eff
                                    },
                                    queue_size=self.queue_size,
                                    pool=self.pool)

            sampler.run_nested(dlogz=self.dlogz)

        else:
            sampler = DynamicNestedSampler(
                log_probability_function,
                prior_transform,
                ndim,
                bound=self.bound,
                sample=sampling_method,
                # update_interval = self.update_interval,
                queue_size=self.queue_size,
                pool=self.pool)
            sampler.run_nested(dlogz_init=self.dlogz)

        results = sampler.results
        results.summary()

        for sample, logwt, logl in zip(results['samples'], results['logwt'],
                                       results['logl']):
            self.output.parameters(sample, logwt, logl)

        self.output.final("ncall", results['ncall'])
        self.output.final("efficiency", results['eff'])
        self.output.final("log_z", results['logz'])
        self.output.final("log_z_err", results['logzerr'])

        self.converged = True
示例#5
0
    def sampler_setup(self):
        # see jupyter notebook memopy_testing_evidence_analytical (env_working)
        # for background, reference and visualisations of this test
        sigma = 0.001

        def loglikelihood(theta):
            r2 = np.sum(theta**2)
            logl = - r2 / (2 * sigma**2)
            return logl

        def logprior_transform_2_sphere(hypercube):
            # transforms a 2-dimenional vector of uniform[0, 1] random variables
            # into a two-dimensional uniform vector of the 2-sphere interior
            u = hypercube[0]
            v = hypercube[1]

            r = u**0.5  # sqrt function
            theta = 2* np.pi * v

            x = r*np.cos(theta)
            y = r*np.sin(theta)
            return np.array([x, y])

        # theoretical results
        # logevid_analytical = -13.122363377404328
        # logl_max_analytical = 0.0
        # theta means = 0.0, 0.0

        nlive = 1000      # number of live points
        bound = 'multi'   # use MutliNest algorithm for bounds
        ndims = 2         # two parameters
        sample = 'unif'   # unif or random walk sampling or ...
        tol = 0.01        # the stopping criterion

        sampler = NestedSampler(loglikelihood, logprior_transform_2_sphere, ndims,
                                bound=bound, sample=sample, nlive=nlive)
        sampler.run_nested(dlogz=tol, print_progress=False) # don't output progress bar
        return sampler
示例#6
0
def run_inference(beta,
                  age,
                  S,
                  nlive=1500,
                  lamscale=1.0,
                  muscale=0.05,
                  gammascale=0.05,
                  verbose=False):

    # set the std of the halfnormal priors on lam, mu, gamma
    scales = np.array([lamscale, muscale, gammascale])
    ndims = 7 + 2 * S + 1

    # create dummy functions which takes only the params as an argument to pass
    # to dynesty
    prior_function = lambda flat_prior: prior_transform(flat_prior, scales)
    loglikelihood_function = lambda params: loglikelihood(params, beta, S, age)

    t0 = time()

    print('Performing Dynesty sampling')
    sampler = NestedSampler(loglikelihood_function,
                            prior_function,
                            ndims,
                            bound='multi',
                            sample='rwalk',
                            nlive=nlive)
    sampler.run_nested(print_progress=verbose)
    res = sampler.results

    t1 = time()
    timesampler = int(t1 - t0)

    print("\nTime taken to run Dynesty is {} seconds".format(timesampler))

    print(res.summary())

    return res
示例#7
0
文件: model.py 项目: dioph/lightspot
 def run(self, nlive=1000, cores=None, filename=None, **kwargs):
     merge = "no"
     if filename is not None and os.path.isfile(filename):
         doit = input(f"There seems to be a file named {filename}. "
                      f"Would you like to run anyway? [y/n] ").lower()
         if doit in ["no", "n"]:
             with open(filename, "br") as file:
                 self.results = pickle.load(file)
             return
     if cores is None or cores > MAX_CORES:
         cores = MAX_CORES
     try:
         with Pool(cores) as pool:
             sampler = NestedSampler(
                 self.loglike,
                 self.sample,
                 self.N,
                 npdim=self.ndim,
                 nlive=nlive,
                 pool=pool,
                 queue_size=cores,
                 **kwargs,
             )
             sampler.run_nested()
     except KeyboardInterrupt:
         pass
     if filename is not None and os.path.isfile(filename):
         merge = input("Merge new run with previous data? [y/n] ").lower()
     if merge in ["no", "n"]:
         self.results = sampler.results
     else:
         with open(filename, "br") as file:
             res = pickle.load(file)
         self.results = merge_runs([sampler.results, res])
     if filename is not None:
         with open(filename, "bw") as file:
             pickle.dump(self.results, file)
示例#8
0
    def run_multinest(self, transit_bins, transit_depths, transit_errors,
                      eclipse_bins, eclipse_depths, eclipse_errors,
                      fit_info,
                      include_condensation=True, rad_method="xsec",
                      maxiter=None, maxcall=None, nlive=100,
                      num_final_samples=100,
                      **dynesty_kwargs):
        '''Runs nested sampling to retrieve atmospheric parameters.

        Parameters
        ----------
        transit_bins : array_like, shape (N,2)
            Wavelength bins, where wavelength_bins[i][0] is the start
            wavelength and wavelength_bins[i][1] is the end wavelength for
            bin i.
        transit_depths : array_like, length N
            Measured transit depths for the specified wavelength bins
        transit_errors : array_like, length N
            Errors on the aforementioned transit depths
        eclipse_bins : array_like, shape (N,2)
            Wavelength bins, where wavelength_bins[i][0] is the start
            wavelength and wavelength_bins[i][1] is the end wavelength for
            bin i.
        eclipse_depths : array_like, length N
            Measured eclipse depths for the specified wavelength bins
        eclipse_errors : array_like, length N
            Errors on the aforementioned eclipse depths
        fit_info : :class:`.FitInfo` object
            Tells us what parameters to
            freely vary, and in what range those parameters can vary. Also
            sets default values for the fixed parameters.
        include_condensation : bool, optional
            When determining atmospheric abundances, whether to include
            condensation.
        rad_method : string, optional
            "xsec" for opacity sampling, "ktables" for correlated k       
        nlive : int
            Number of live points to use for nested sampling
        **dynesty_kwargs : keyword arguments to pass to dynesty's NestedSampler

        Returns
        -------
        result : RetrievalResult object
        '''
        transit_calc = None
        eclipse_calc = None
        if transit_bins is not None:
            transit_calc = TransitDepthCalculator(
                include_condensation=include_condensation, method=rad_method)
            transit_calc.change_wavelength_bins(transit_bins)
            self._validate_params(fit_info, transit_calc)
        if eclipse_bins is not None:
            eclipse_calc = EclipseDepthCalculator(
                include_condensation=include_condensation, method=rad_method)
            eclipse_calc.change_wavelength_bins(eclipse_bins)

        def transform_prior(cube):
            new_cube = np.zeros(len(cube))
            for i in range(len(cube)):
                new_cube[i] = fit_info._from_unit_interval(i, cube[i])
            return new_cube

        def multinest_ln_like(cube):
            ln_like = self._ln_like(cube, transit_calc, eclipse_calc, fit_info, transit_depths, transit_errors,
                                 eclipse_depths, eclipse_errors)
            if np.random.randint(100) == 0:
                print("\nEvaluated params: {}".format(self.pretty_print(fit_info)))
            return ln_like

        num_dim = fit_info._get_num_fit_params()
        sampler = NestedSampler(multinest_ln_like, transform_prior, num_dim, bound='multi',
                                update_interval=float(num_dim), nlive=nlive, **dynesty_kwargs)
        sampler.run_nested(maxiter=maxiter, maxcall=maxcall)
        result = sampler.results
        
        result.logp = result.logl + np.array([fit_info._ln_prior(params) for params in result.samples])
        best_params_arr = result.samples[np.argmax(result.logp)]

        normalized_weights = np.exp(result.logwt - np.max(result.logwt))
        normalized_weights /= np.sum(normalized_weights)
        result.weights = normalized_weights                                

        equal_samples = dynesty.utils.resample_equal(result.samples, result.weights)
        np.random.shuffle(equal_samples)
        write_param_estimates_file(
            equal_samples,
            best_params_arr,
            np.max(result.logp),
            fit_info.fit_param_names)
        
        best_fit_transit_depths, best_fit_transit_info, best_fit_eclipse_depths, best_fit_eclipse_info = self._ln_like(
            best_params_arr, transit_calc, eclipse_calc, fit_info,
            transit_depths, transit_errors,
            eclipse_depths, eclipse_errors, ret_best_fit=True)

        retrieval_result = RetrievalResult(
            result, "dynesty",
            transit_bins, transit_depths, transit_errors,
            eclipse_bins, eclipse_depths, eclipse_errors,
            best_fit_transit_depths, best_fit_transit_info,
            best_fit_eclipse_depths, best_fit_eclipse_info,
            fit_info)
        
        retrieval_result.random_transit_depths = []
        retrieval_result.random_eclipse_depths = []
        for params in equal_samples[0:num_final_samples]:
            _, transit_info, _, eclipse_info = self._ln_like(
                params, transit_calc, eclipse_calc, fit_info,
                transit_depths, transit_errors,
                eclipse_depths, eclipse_errors, ret_best_fit=True)
            if transit_depths is not None:
                retrieval_result.random_transit_depths.append(transit_info["unbinned_depths"])
            if eclipse_depths is not None:
                retrieval_result.random_eclipse_depths.append(eclipse_info["unbinned_eclipse_depths"])
                    
        with open("retrieval_result.pkl", "wb") as f:
            pickle.dump(retrieval_result, f)            

        return retrieval_result
示例#9
0
    def sample(self, quiet=False):
        """
        sample using the UltraNest numerical integration method
        :rtype: 

        :returns: 

        """
        if not self._is_setup:

            log.info("You forgot to setup the sampler!")
            return

        loud = not quiet

        self._update_free_parameters()

        param_names = list(self._free_parameters.keys())

        ndim = len(param_names)

        self._kwargs["ndim"] = ndim

        loglike, dynesty_prior = self._construct_unitcube_posterior(return_copy=True)

        # check if we are doing to do things in parallel

        if threeML_config["parallel"]["use_parallel"]:

            c = ParallelClient()
            view = c[:]

            self._kwargs["pool"] = view
            self._kwargs["queue_size"] = len(view)

        sampler = NestedSampler(loglike, dynesty_prior, **self._kwargs)

        self._sampler_kwargs["print_progress"] = loud

        with use_astromodels_memoization(False):
            log.debug("Start dynesty run")
            sampler.run_nested(**self._sampler_kwargs)
            log.debug("Dynesty run done")

        self._sampler = sampler

        results = self._sampler.results

        # draw posterior samples
        weights = np.exp(results["logwt"] - results["logz"][-1])

        SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))

        rstate = np.random

        if abs(np.sum(weights) - 1.0) > SQRTEPS:  # same tol as in np.random.choice.
            raise ValueError("Weights do not sum to 1.")

        # Make N subdivisions and choose positions with a consistent random offset.
        nsamples = len(weights)
        positions = (rstate.random() + np.arange(nsamples)) / nsamples

        # Resample the data.
        idx = np.zeros(nsamples, dtype=np.int)
        cumulative_sum = np.cumsum(weights)
        i, j = 0, 0
        while i < nsamples:
            if positions[i] < cumulative_sum[j]:
                idx[i] = j
                i += 1
            else:
                j += 1

        samples_dynesty = results["samples"][idx]

        self._raw_samples = samples_dynesty

        # now do the same for the log likes

        logl_dynesty = results["logl"][idx]

        self._log_like_values = logl_dynesty

        self._log_probability_values = self._log_like_values + np.array(
            [self._log_prior(samples) for samples in self._raw_samples]
        )

        self._marginal_likelihood = self._sampler.results["logz"][-1] / np.log(10.0)

        self._build_results()

        # Display results
        if loud:
            self._results.display()

        # now get the marginal likelihood

        return self.samples
示例#10
0
            model_transformation = model_transformation_4
            ndim = 12 + ic_cantons + 1 + 1

        print("+++++++++++++++++++++++++++++")
        print("+++   Nested Sampling     +++")
        print(" Case : ", args.case)
        print(" nlive: ", args.nlive)
        print(" dlogz: ", args.dlogz)
        print(" cores: ", args.cores)
        print("+++++++++++++++++++++++++++++")

        t = -time.time()
        from pathlib import Path
        Path("case" + str(args.case)).mkdir(parents=True, exist_ok=True)
        fname = 'case' + str(args.case) + '/samples_' + str(
            args.case) + '.pickle'
        pool = MyPool(args.cores)
        sampler = NestedSampler(model,
                                model_transformation,
                                ndim,
                                nlive=args.nlive,
                                bound='multi',
                                pool=pool)
        sampler.run_nested(maxiter=1e8, dlogz=args.dlogz, add_live=True)
        res = sampler.results
        res.summary()
        with open(fname, 'wb') as f:
            pickle.dump(res, f)
        t += time.time()
        print("Total time=", t)
示例#11
0
def dynestyfitter(lc, model, meta, log, **kwargs):
    """Perform sampling using dynesty.

    Parameters
    ----------
    lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
        The lightcurve data object
    model: eureka.S5_lightcurve_fitting.models.CompositeModel
        The composite model to fit
    meta: MetaClass
        The metadata object
    log: logedit.Logedit
        The open log in which notes from this step can be added.
    **kwargs:
        Arbitrary keyword arguments.

    Returns
    -------
    best_model: eureka.S5_lightcurve_fitting.models.CompositeModel
        The composite model after fitting

    Notes
    -----
    History:

    - December 29, 2021 Taylor Bell
        Updated documentation. Reduced repeated code.
    - January 7-22, 2022 Megan Mansfield
        Adding ability to do a single shared fit across all channels
    - February 23-25, 2022 Megan Mansfield
        Added log-uniform and Gaussian priors.
    - February 28-March 1, 2022 Caroline Piaulet
        Adding scatter_ppm parameter. 
    """
    # Group the different variable types
    freenames, freepars, prior1, prior2, priortype, indep_vars = group_variables(
        model)
    if hasattr(meta, 'old_fitparams') and meta.old_fitparams is not None:
        freepars = load_old_fitparams(meta, log, lc.channel, freenames)

    # DYNESTY
    nlive = meta.run_nlive  # number of live points
    bound = meta.run_bound  # use MutliNest algorithm for bounds
    ndims = len(freepars)  # two parameters
    sample = meta.run_sample  # uniform sampling
    tol = meta.run_tol  # the stopping criterion

    start_lnprob = lnprob(freepars, lc, model, prior1, prior2, priortype,
                          freenames)
    log.writelog(f'Starting lnprob: {start_lnprob}', mute=(not meta.verbose))

    # START DYNESTY
    l_args = [lc, model, freenames]

    log.writelog('Running dynesty...')

    min_nlive = int(np.ceil(ndims * (ndims + 1) // 2))
    if nlive < min_nlive:
        log.writelog(
            f'**** WARNING: You should set run_nlive to at least {min_nlive} ****'
        )

    if hasattr(meta, 'ncpu') and meta.ncpu > 1:
        pool = Pool(meta.ncpu)
        queue_size = meta.ncpu
    else:
        meta.ncpu = 1
        pool = None
        queue_size = None
    sampler = NestedSampler(ln_like,
                            ptform,
                            ndims,
                            pool=pool,
                            queue_size=queue_size,
                            bound=bound,
                            sample=sample,
                            nlive=nlive,
                            logl_args=l_args,
                            ptform_args=[prior1, prior2, priortype])
    sampler.run_nested(dlogz=tol, print_progress=True)  # output progress bar
    res = sampler.results  # get results dictionary from sampler
    if meta.ncpu > 1:
        pool.close()
        pool.join()

    logZdynesty = res.logz[-1]  # value of logZ
    logZerrdynesty = res.logzerr[
        -1]  # estimate of the statistcal uncertainty on logZ

    log.writelog('', mute=(not meta.verbose))
    # Need to temporarily redirect output since res.summar() prints rather than returns a string
    old_stdout = sys.stdout
    sys.stdout = mystdout = StringIO()
    res.summary()
    sys.stdout = old_stdout
    log.writelog(mystdout.getvalue(), mute=(not meta.verbose))

    # get function that resamples from the nested samples to give sampler with equal weight
    # draw posterior samples
    weights = np.exp(res['logwt'] - res['logz'][-1])
    samples = resample_equal(res.samples, weights)
    log.writelog('Number of posterior samples is {}'.format(len(samples)),
                 mute=(not meta.verbose))

    # Compute the medians and uncertainties
    fit_params = []
    upper_errs = []
    lower_errs = []
    for i in range(ndims):
        q = np.percentile(samples[:, i], [16, 50, 84])
        lower_errs.append(q[0])
        fit_params.append(q[1])
        upper_errs.append(q[2])
    fit_params = np.array(fit_params)
    upper_errs = np.array(upper_errs) - fit_params
    lower_errs = fit_params - np.array(lower_errs)

    model.update(fit_params, freenames)
    if "scatter_ppm" in freenames:
        ind = [
            i for i in np.arange(len(freenames))
            if freenames[i][0:11] == "scatter_ppm"
        ]
        for chan in range(len(ind)):
            lc.unc_fit[chan * lc.time.size:(chan + 1) *
                       lc.time.size] = fit_params[ind[chan]] * 1e-6
    elif "scatter_mult" in freenames:
        ind = [
            i for i in np.arange(len(freenames))
            if freenames[i][0:12] == "scatter_mult"
        ]
        for chan in range(len(ind)):
            lc.unc_fit[chan * lc.time.size:(chan + 1) *
                       lc.time.size] = fit_params[
                           ind[chan]] * lc.unc[chan * lc.time.size:(chan + 1) *
                                               lc.time.size]
    else:
        lc.unc_fit = lc.unc

    # Save the fit ASAP so plotting errors don't make you lose everything
    save_fit(meta,
             lc,
             model,
             'dynesty',
             fit_params,
             freenames,
             samples,
             upper_errs=upper_errs,
             lower_errs=lower_errs)

    end_lnprob = lnprob(fit_params, lc, model, prior1, prior2, priortype,
                        freenames)
    log.writelog(f'Ending lnprob: {end_lnprob}', mute=(not meta.verbose))

    # plot using corner.py
    if meta.isplots_S5 >= 5:
        plots.plot_corner(samples, lc, meta, freenames, fitter='dynesty')

    # Make a new model instance
    best_model = copy.copy(model)
    best_model.components[0].update(fit_params, freenames)

    #Plot GP fit + components
    if model.GP:
        plots.plot_GP_components(lc, model, meta, fitter='dynesty')

    # Plot fit
    if meta.isplots_S5 >= 1:
        plots.plot_fit(lc, model, meta, fitter='dynesty')

    # Compute reduced chi-squared
    chi2red = computeRedChiSq(lc, log, model, meta, freenames)

    log.writelog('\nDYNESTY RESULTS:')
    for i in range(ndims):
        if 'scatter_mult' in freenames[i]:
            chan = freenames[i].split('_')[-1]
            if chan.isnumeric():
                chan = int(chan)
            else:
                chan = 0
            scatter_ppm = fit_params[i] * np.ma.median(
                lc.unc[chan * lc.time.size:(chan + 1) * lc.time.size]) * 1e6
            scatter_ppm_upper = upper_errs[i] * np.ma.median(
                lc.unc[chan * lc.time.size:(chan + 1) * lc.time.size]) * 1e6
            scatter_ppm_lower = lower_errs[i] * np.ma.median(
                lc.unc[chan * lc.time.size:(chan + 1) * lc.time.size]) * 1e6
            log.writelog('{0}: {1} (+{2}, -{3}); {4} (+{5}, -{6}) ppm'.format(
                freenames[i], fit_params[i], upper_errs[i], lower_errs[i],
                scatter_ppm, scatter_ppm_upper, scatter_ppm_lower))
        else:
            log.writelog('{0}: {1} (+{2}, -{3})'.format(
                freenames[i], fit_params[i], upper_errs[i], lower_errs[i]))
    log.writelog('')

    # Plot Allan plot
    if meta.isplots_S5 >= 3:
        plots.plot_rms(lc, model, meta, fitter='dynesty')

    # Plot residuals distribution
    if meta.isplots_S5 >= 3:
        plots.plot_res_distr(lc, model, meta, fitter='dynesty')

    best_model.__setattr__('chi2red', chi2red)
    best_model.__setattr__('fit_params', fit_params)

    return best_model
示例#12
0
def MCMC_diagnostic(path, ndim, p, loglike, ptform, galname, nlive, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=nlive,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': nlive,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        # Save nested data
        # obtain KL divergence
        klds = []
        for i in range(500):
            kld = dyfunc.kld_error(res1, error='simulate')
            klds.append(kld[-1])
        print(np.mean(klds))
        res1['KLval'] = np.mean(klds)
        with open(path + '/result_nested_P' + '{}'.format(nlive) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        fg, ax = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),  # 91.8,98.3,8.88,6.5,60,60
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=None,
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/cornerplot_' + galname + nparams + '.png')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    # Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.025, 0.5, 0.975], weights=weights)
        for samps in samples.T
    ]
    print(quantiles)

    # vrotsigma
    sigmavrot1_l = [i for i in samples[:, 0] if (i - MaP[0]) < 0]
    sigmavrot1_r = [i for i in samples[:, 0] if (i - MaP[0]) > 0]
    sigmavrot2_l = [i for i in samples[:, 1] if (i - MaP[1]) < 0]
    sigmavrot2_r = [i for i in samples[:, 1] if (i - MaP[1]) > 0]
    sigmavrot3_l = [i for i in samples[:, 2] if (i - MaP[2]) < 0]
    sigmavrot3_r = [i for i in samples[:, 2] if (i - MaP[2]) > 0]
    sigmavrot4_l = [i for i in samples[:, 3] if (i - MaP[3]) < 0]
    sigmavrot4_r = [i for i in samples[:, 3] if (i - MaP[3]) > 0]

    if len(sigmavrot1_l) == 0: sigmavrot1_l.append(0)
    if len(sigmavrot1_r) == 0: sigmavrot1_r.append(0)
    if len(sigmavrot2_l) == 0: sigmavrot2_l.append(0)
    if len(sigmavrot2_r) == 0: sigmavrot2_r.append(0)
    if len(sigmavrot3_l) == 0: sigmavrot3_l.append(0)
    if len(sigmavrot3_r) == 0: sigmavrot3_r.append(0)
    if len(sigmavrot4_l) == 0: sigmavrot4_l.append(0)
    if len(sigmavrot4_r) == 0: sigmavrot4_r.append(0)

    # vdispsigma
    sigmavdisp1_l = [i for i in samples[:, 4] if (i - MaP[4]) < 0]
    sigmavdisp1_r = [i for i in samples[:, 4] if (i - MaP[4]) > 0]
    sigmavdisp2_l = [i for i in samples[:, 5] if (i - MaP[5]) < 0]
    sigmavdisp2_r = [i for i in samples[:, 5] if (i - MaP[5]) > 0]
    sigmavdisp3_l = [i for i in samples[:, 6] if (i - MaP[6]) < 0]
    sigmavdisp3_r = [i for i in samples[:, 6] if (i - MaP[6]) > 0]
    sigmavdisp4_l = [i for i in samples[:, 7] if (i - MaP[7]) < 0]
    sigmavdisp4_r = [i for i in samples[:, 7] if (i - MaP[7]) > 0]

    if len(sigmavdisp1_l) == 0: sigmavdisp1_l.append(0)
    if len(sigmavdisp1_r) == 0: sigmavdisp1_r.append(0)
    if len(sigmavdisp2_l) == 0: sigmavdisp2_l.append(0)
    if len(sigmavdisp2_r) == 0: sigmavdisp2_r.append(0)
    if len(sigmavdisp3_l) == 0: sigmavdisp3_l.append(0)
    if len(sigmavdisp3_r) == 0: sigmavdisp3_r.append(0)
    if len(sigmavdisp4_l) == 0: sigmavdisp4_l.append(0)
    if len(sigmavdisp4_r) == 0: sigmavdisp4_r.append(0)

    pdict['sigmavrot'] = [(np.std(sigmavrot1_l), np.std(sigmavrot1_r)),
                          (np.std(sigmavrot2_l), np.std(sigmavrot2_r)),
                          (np.std(sigmavrot3_l), np.std(sigmavrot3_r)),
                          (np.std(sigmavrot4_l), np.std(sigmavrot4_r))]
    pdict['sigmavdisp'] = [(np.std(sigmavdisp1_l), np.std(sigmavdisp1_r)),
                           (np.std(sigmavdisp2_l), np.std(sigmavdisp2_r)),
                           (np.std(sigmavdisp3_l), np.std(sigmavdisp3_r)),
                           (np.std(sigmavdisp4_l), np.std(sigmavdisp4_r))]

    if len(MaP) == 8:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]

    if len(MaP) == 9:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

    if len(MaP) == 10:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        pdict['phi'] = MaP[9]

        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

        # phi
        sigmaphi_l = [i for i in samples[:, 9] if (i - MaP[9]) < 0]
        sigmaphi_r = [i for i in samples[:, 9] if (i - MaP[9]) > 0]
        if len(sigmaphi_l) == 0: sigmaphi_l.append(0)
        if len(sigmaphi_r) == 0: sigmaphi_r.append(0)
        pdict['sigmaphi'] = [(np.std(sigmaphi_l), np.std(sigmaphi_r))]

    # We don't need data entry
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
示例#13
0
def runMCMC(path, ndim, p, loglike, ptform, galname, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 8:
        nparams = '_8P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(res1,
                                     truths=np.array([
                                         pdict['vrot'][0], pdict['vrot'][1],
                                         pdict['vrot'][2], pdict['vrot'][3],
                                         pdict['vdisp'][0], pdict['vdisp'][1],
                                         pdict['vdisp'][2], pdict['vdisp'][3]
                                     ]),
                                     truth_color='black',
                                     show_titles=True,
                                     trace_cmap='viridis',
                                     connect=True,
                                     smooth=0.02,
                                     connect_highlight=range(8),
                                     labels=[
                                         r'$v_{rot,225}$', r'$v_{rot,450}$',
                                         r'$v_{rot,675}$', r'$v_{rot,900}$',
                                         r'$\sigma_{225}$', r'$\sigma_{450}$',
                                         r'$\sigma_{675}$', r'$\sigma_{900}$'
                                     ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=5,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))
    elif ndim == 9:
        nparams = '_9P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    elif ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

# Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
        for samps in samples.T
    ]

    pdict['sigmavrot'] = [(quantiles[0][0], quantiles[0][2]),
                          (quantiles[1][0], quantiles[1][2]),
                          (quantiles[2][0], quantiles[2][2]),
                          (quantiles[3][0], quantiles[3][2])]
    pdict['sigmavdisp'] = [(quantiles[4][0], quantiles[4][2]),
                           (quantiles[5][0], quantiles[5][2]),
                           (quantiles[6][0], quantiles[6][2]),
                           (quantiles[7][0], quantiles[7][2])]
    pdict['vrot'] = [
        quantiles[0][1], quantiles[1][1], quantiles[2][1], quantiles[3][1]
    ]
    pdict['vdisp'] = [
        quantiles[4][1], quantiles[5][1], quantiles[6][1], quantiles[7][1]
    ]

    if len(quantiles) == 9:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]

    if len(quantiles) == 10:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]
        pdict['phi'] = quantiles[9][1]
        pdict['sigmaphi'] = [(quantiles[9][0], quantiles[9][2])]

    # We don't need data entry, waste of space
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
dlogZdynesty = dres.logz[-1]        # value of logZ
dlogZerrdynesty = dres.logzerr[-1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using dynamic sampler) is {} ± {}'.format(dlogZdynesty, dlogZerrdynesty))

# get the posterior samples
dweights = np.exp(dres['logwt'] - dres['logz'][-1])
dpostsamples = resample_equal(dres.samples, dweights)

print('Number of posterior samples (using dynamic sampler) is {}'.format(dpostsamples.shape[0]))

# Now run with the static sampler
sampler = NestedSampler(loglikelihood_dynesty, prior_transform, ndims,
                        bound=bound, sample=sample, nlive=nlive)
sampler.run_nested(dlogz=0.1)

res = sampler.results

logZdynesty = res.logz[-1]        # value of logZ
logZerrdynesty = res.logzerr[-1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using static sampler) is {} ± {}'.format(logZdynesty, logZerrdynesty))

# get the posterior samples
weights = np.exp(res['logwt'] - res['logz'][-1])
postsamples = resample_equal(res.samples, weights)

print('Number of posterior samples (using static sampler) is {}'.format(postsamples.shape[0]))
示例#15
0
# get the posterior samples
dweights = np.exp(dres['logwt'] - dres['logz'][-1])
dpostsamples = resample_equal(dres.samples, dweights)

print('Number of posterior samples (using dynamic sampler) is {}'.format(
    dpostsamples.shape[0]))

# Now run with the static sampler
sampler = NestedSampler(loglikelihood_dynesty,
                        prior_transform,
                        ndims,
                        bound=bound,
                        sample=sample,
                        nlive=nlive)
sampler.run_nested(dlogz=0.1)

res = sampler.results

logZdynesty = res.logz[-1]  # value of logZ
logZerrdynesty = res.logzerr[
    -1]  # estimate of the statistcal uncertainty on logZ

# output marginal likelihood
print('Marginalised evidence (using static sampler) is {} ± {}'.format(
    logZdynesty, logZerrdynesty))

# get the posterior samples
weights = np.exp(res['logwt'] - res['logz'][-1])
postsamples = resample_equal(res.samples, weights)
示例#16
0
obs_times, obs_wavs, obs_excess, obs_excess_error = get_obs_data(
    config["spectrum"], config["error"], config["min_wav"], config["max_wav"])


def multinest_ln_like(cube):
    return get_ln_like(cube, config, obs_times, obs_wavs, obs_excess,
                       obs_excess_error)


sampler = NestedSampler(multinest_ln_like,
                        transform_prior,
                        3,
                        bound='multi',
                        nlive=100)
sampler.run_nested()
result = sampler.results
normalized_weights = np.exp(result.logwt - np.max(result.logwt))
normalized_weights /= np.sum(normalized_weights)
result.weights = normalized_weights
with open("dynesty_result.pkl", "wb") as f:
    pickle.dump(result, f)

best_params = result.samples[np.argmax(result.logl)]
get_ln_like(best_params,
            config,
            obs_times,
            obs_wavs,
            obs_excess,
            obs_excess_error,
            plot=True)
示例#17
0
def rebuild_current_distribution(
    fields: np.ndarray,
    ics: np.ndarray,
    jj_size: float,
    current_pattern: List[Union[Literal["f"], str]],
    sweep_invariants: List[Union[Literal["offset"], Literal["field_to_k"]]] = [
        "offset",
        "field_to_k",
    ],
    precision: float = 100,
    n_points: int = 2 ** 10 + 1,
) -> dict:
    """Rebuild a current distribution from a Fraunhofer pattern.

    This assumes a uniform field focusing since allowing a non uniform focusing
    would lead to a much larger space to explore.

    Parameters
    ----------
    fields : np.ndarray
        Out of plane field for which the critical current was measured.
    ics : np.ndarray
        Critical current of the junction.
    jj_size : float
        Size of the junction.
    current_pattern : List[Union[Literal["f"], str]]
        Describe in how many pieces to use to represent the junction. If the
        input arrays are more than 1D, "f" means that value is the same across
        all outer dimension, "v" means that the slice takes different value
        for all outer dimension (ie. one value per sweep).
    sweep_invariants : Tuple[Union[Literal["offset", "field_to_k"]]]
        Indicate what quantities are invariants across sweep for more the 1D
        inputs.
    precision : float, optional
        pass
    n_points : int, optional

    Returns
    -------
    dict


    """
    # Get the offset and estimated amplitude used in the prior
    # We do not use the estimated current and phase distribution to give the
    # more space to the algorithm.
    offsets, first_node_locs, _, _, _ = guess_current_distribution(
        field, fraunhofer, site_number, jj_size
    )
    # Gives a Fraunhofer pattern at the first node for v[1] = 1
    field_to_ks = 2 * np.pi / jj_size / np.abs(first_node_locs - offsets)

    # Determine the dimensionality of the problem based on the invariants and
    # the shape of the inputs.
    if len(sweep_invariants) > 2:
        raise ValueError("There are at most 2 invariants.")
    if any(k for k in sweep_invariants if k not in ("offset", "field_to_k")):
        raise ValueError(
            f"Invalid invariant specified {sweep_invariants}, "
            "valid values are 'offset', 'field_to_k'."
        )

    shape = fields.shape[:-1]
    shape_product = prod(shape) if shape else 0

    if shape_product == 0 and any(p.startswith("v") for p in current_pattern):
        raise ValueError(
            "Found variable current in the distribution but the measurements are 1D."
        )

    dim = len(sweep_invariants) + current_pattern.count("f")
    dim += shape_product * (current_pattern.count("v") + 2 - len(sweep_invariants))

    # Pre-compute slices to access elements in the prior and log-like
    offset_access = slice(
        0, 1 if "offset" in sweep_invariants else (shape_product or 1)
    )
    field_to_k_access = slice(
        offset_access.stop,
        offset_access.stop + 1
        if "field_to_k" in sweep_invariants
        else (shape_product or 1),
    )

    stop = field_to_k_access.stop
    current_density_accesses = []
    for p in current_pattern:
        if p == "f":
            current_density_accesses.append(slice(stop, stop + 1))
            stop += 1
        elif p == "v":
            current_density_accesses.append(slice(stop, stop + (shape_product or 1)))
            stop += current_density_accesses[-1].stop
        else:
            raise ValueError(
                f"Valid values in current_pattern are 'f' and 'v', found '{p}'"
            )

    def prior(u):
        """Map the sampled in 0-1 to the relevant values range.

        For all values we consider the values in the prior to be the log of the
        values we are looking for.

        """
        v = np.empty_like(u)
        v[offset_access] = 4 * u[offset_access] - 2
        v[field_to_k_access] = 4 * u[field_to_k_access] - 2
        stop += step

        # For all the amplitude we map the value between 0 and -X since the
        # amplitude of a single segment cannot be larger than the total current
        # X is determined based on the number of segments
        ampl = -np.log10(len(current_pattern))
        for sl in current_density_accesses:
            v[sl] = u[sl] * ampl

        return v

    def loglike(v):
        """Compute the distance to the data"""

        # We turn invariant input into their variant form (from 1 occurence in v
        # to n repetition in w) to ease a systematic writing of the loglike.
        stop = step = shape_product or 1

        w = np.empty((2 + len(current_pattern)) * (shape_product or 1))
        stop = step = shape_product or 1
        w[0:stop] = w_offset = v[offset_access]
        w[stop : stop + step] = w_f2k = v[field_to_k_access]
        stop += step
        for sl in current_density_accesses:
            w[stop : stop + step] = v[sl]

        # Pack the current distribution so that each line corresponds to different
        # conditions
        c_density = w[stop + step :].reshape((len(current_pattern), -1)).T

        err = np.empty_like(ics)

        it = np.nditer((offsets, first_node_locs, field_to_ks), ["multi_index"])
        for i, (off, fnloc, f2k) in enumerate(it):
            # Compute the offset
            f_off = off + np.sign(w_off[i]) * 10 ** -abs(w_off[i]) * fnloc

            # Compute the Fraunhofer pattern
            f = produce_fraunhofer_fast(
                (fields[it.multi_index] - f_off[i]),
                f2k * 10 ** w_f2k[i],
                jj_size,
                c_density[i],
                2 ** 10 + 1,
            )

            # Compute and store the error
            err[it.multi_index] = np.sum(
                (100 * (ics[it.multi_index] - f) / amplitude) ** 2
            )

        return -np.ravel(err)

    # XXX do that nasty part later
    sampler = NestedSampler(loglike, prior, dim)
    sampler.run_nested(dlogz=precision)
    res = sampler.results
    weights = np.exp(res.logwt - res.logz[-1])
    mu, cov = utils.mean_and_cov(res["samples"], weights)

    res["fraunhofer_params"] = {
        "offset": offset + np.sign(mu[0]) * 10 ** -abs(mu[0]) * first_node_loc,
        "field_to_k": 2 * np.pi / jj_size / abs(first_node_loc - offset) * 10 ** mu[1],
        "amplitude": amplitude * 10 ** mu[2],
        "current_distribution": np.array(
            [1 - np.sum(mu[3 : 3 + site_number - 1])]
            + list(mu[3 : 3 + site_number - 1])
        ),
        "phase_distribution": np.array(
            [0] + list(mu[3 + site_number - 1 : 3 + 2 * site_number - 2])
        ),
    }

    return res
示例#18
0
def main(path2config):

    # load the yaml parameters
    config = yaml.load(open(path2config))
    sim_params = config['sim_params']
    HOD_params = config['HOD_params']
    clustering_params = config['clustering_params']
    data_params = config['data_params']
    dynesty_config_params = config['dynesty_config_params']
    fit_params = config['dynesty_fit_params']

    # create a new abacushod object and load the subsamples
    newBall = AbacusHOD(sim_params, HOD_params, clustering_params)

    # read data parameters
    newData = wp_Data(data_params, HOD_params)

    # parameters to fit
    nparams = len(fit_params.keys())
    param_mapping = {}
    param_tracer = {}
    params = np.zeros((nparams, 2))
    for key in fit_params.keys():
        mapping_idx = fit_params[key][0]
        tracer_type = fit_params[key][-1]
        param_mapping[key] = mapping_idx
        param_tracer[key] = tracer_type
        params[mapping_idx, :] = fit_params[key][1:-1]

    # Make path to output
    if not os.path.isdir(
            os.path.expanduser(dynesty_config_params['path2output'])):
        try:
            os.makedirs(
                os.path.expanduser(dynesty_config_params['path2output']))
        except:
            pass

    # dynesty parameters
    nlive = dynesty_config_params['nlive']
    maxcall = dynesty_config_params['maxcall']
    method = dynesty_config_params['method']
    bound = dynesty_config_params['bound']

    # where to record
    prefix_chain = os.path.join(
        os.path.expanduser(dynesty_config_params['path2output']),
        dynesty_config_params['chainsPrefix'])

    # initiate sampler
    found_file = os.path.isfile(prefix_chain + '.dill')
    if (not found_file) or (not dynesty_config_params['rerun']):

        # initialize our nested sampler
        sampler = NestedSampler(
            lnprob,
            prior_transform,
            nparams,
            logl_args=[param_mapping, param_tracer, newData, newBall],
            ptform_args=[params[:, 0], params[:, 1]],
            nlive=nlive,
            sample=method,
            rstate=np.random.RandomState(dynesty_config_params['rseed']))
        # first_update = {'min_eff': 20})

    else:
        # load sampler to continue the run
        with open(prefix_chain + '.dill', "rb") as f:
            sampler = dill.load(f)
        sampler.rstate = np.load(prefix_chain + '_results.npz')['rstate']
    print("run sampler")

    sampler.run_nested(maxcall=maxcall)

    # save sampler itself
    with open(prefix_chain + '.dill', "wb") as f:
        dill.dump(sampler, f)
    res1 = sampler.results
    np.savez(prefix_chain + '_results.npz',
             res=res1,
             rstate=np.random.get_state())
示例#19
0
    def run_multinest(self,
                      transit_bins,
                      transit_depths,
                      transit_errors,
                      eclipse_bins,
                      eclipse_depths,
                      eclipse_errors,
                      fit_info,
                      include_condensation=True,
                      plot_best=False,
                      maxiter=None,
                      maxcall=None,
                      nlive=100,
                      **dynesty_kwargs):
        '''Runs nested sampling to retrieve atmospheric parameters.

        Parameters
        ----------
        transit_bins : array_like, shape (N,2)
            Wavelength bins, where wavelength_bins[i][0] is the start
            wavelength and wavelength_bins[i][1] is the end wavelength for
            bin i.
        transit_depths : array_like, length N
            Measured transit depths for the specified wavelength bins
        transit_errors : array_like, length N
            Errors on the aforementioned transit depths
        eclipse_bins : array_like, shape (N,2)
            Wavelength bins, where wavelength_bins[i][0] is the start
            wavelength and wavelength_bins[i][1] is the end wavelength for
            bin i.
        eclipse_depths : array_like, length N
            Measured eclipse depths for the specified wavelength bins
        eclipse_errors : array_like, length N
            Errors on the aforementioned eclipse depths
        fit_info : :class:`.FitInfo` object
            Tells us what parameters to
            freely vary, and in what range those parameters can vary. Also
            sets default values for the fixed parameters.
        include_condensation : bool, optional
            When determining atmospheric abundances, whether to include
            condensation.
        plot_best : bool, optional
            If True, plots the best fit model with the data
        nlive : int
            Number of live points to use for nested sampling
        **dynesty_kwargs : keyword arguments to pass to dynesty's NestedSampler

        Returns
        -------
        result : Result object
            This returns dynesty's NestedSampler 'results' field, slightly
            modified.  The object is
            dictionary-like and has many useful items.  For example,
            result.samples (or alternatively, result["samples"]) are the
            parameter values of each sample, result.logwt contains the
            log(weights), result.weights contains the normalized weights 
            (this is added by PLATON), 
            result.logl contains the ln likelihoods, and result.logp
            contains the ln posteriors (this is added by PLATON).  result.logz
            is the natural logarithm of the evidence.
        '''
        transit_calc = TransitDepthCalculator(
            include_condensation=include_condensation)
        transit_calc.change_wavelength_bins(transit_bins)
        eclipse_calc = EclipseDepthCalculator()
        eclipse_calc.change_wavelength_bins(eclipse_bins)

        self._validate_params(fit_info, transit_calc)

        def transform_prior(cube):
            new_cube = np.zeros(len(cube))
            for i in range(len(cube)):
                new_cube[i] = fit_info._from_unit_interval(i, cube[i])
            return new_cube

        def multinest_ln_like(cube):
            ln_like = self._ln_like(cube, transit_calc, eclipse_calc, fit_info,
                                    transit_depths, transit_errors,
                                    eclipse_depths, eclipse_errors)
            if np.random.randint(100) == 0:
                print("\nEvaluated params: {}".format(
                    self.pretty_print(fit_info)))
            return ln_like

        num_dim = fit_info._get_num_fit_params()
        sampler = NestedSampler(multinest_ln_like,
                                transform_prior,
                                num_dim,
                                bound='multi',
                                sample='rwalk',
                                update_interval=float(num_dim),
                                nlive=nlive,
                                **dynesty_kwargs)
        sampler.run_nested(maxiter=maxiter, maxcall=maxcall)
        result = sampler.results

        result.logp = result.logl + np.array(
            [fit_info._ln_prior(params) for params in result.samples])
        best_params_arr = result.samples[np.argmax(result.logp)]

        normalized_weights = np.exp(result.logwt) / np.sum(np.exp(
            result.logwt))
        result.weights = normalized_weights

        write_param_estimates_file(
            dynesty.utils.resample_equal(result.samples, normalized_weights),
            best_params_arr, np.max(result.logp), fit_info.fit_param_names)

        if plot_best:
            self._ln_prob(best_params_arr,
                          transit_calc,
                          eclipse_calc,
                          fit_info,
                          transit_depths,
                          transit_errors,
                          eclipse_depths,
                          eclipse_errors,
                          plot=True)
            plt.figure(3)
            dyplot.runplot(result)
            plt.savefig("dyplot_runplot.png")
            plt.figure(4)
            dyplot.traceplot(result)
            plt.savefig("dyplot_traceplot.png")
        return result