コード例 #1
0
def plot_status(res, labels=None, periodic=None):
    # Generate a plot of the trace.

    try:
        fig, ax = dyplot.traceplot(res, show_titles=True, trace_cmap="viridis",\
                connect=True, connect_highlight=range(5), labels=labels)
    except:
        # If it hasn't converged enough...
        fig, ax = dyplot.traceplot(res, show_titles=True, trace_cmap="viridis",\
                connect=True, connect_highlight=range(5), labels=labels, \
                kde=False)

    fig.savefig("traceplot.png")

    plt.close(fig)

    # Generate a bounds cornerplot.

    fig, ax = dyplot.cornerbound(res, it=res.niter-1, periodic=periodic, \
            prior_transform=sampler.prior_transform, show_live=True, \
            labels=labels)

    fig.savefig("boundplot.png")

    plt.close(fig)
コード例 #2
0
    def traceplot(self, **kwargs):

        if conf.instance["general"]["test"]["test_mode"]:
            return None

        dyplot.traceplot(results=self.samples.results, **kwargs)

        self.output.to_figure(structure=None, auto_filename="traceplot")
        self.mat_plot_1d.figure.close()
コード例 #3
0
def test_gaussian():
    logz_tol = 1
    sampler = dynesty.NestedSampler(loglikelihood_gau,
                                    prior_transform_gau,
                                    ndim_gau,
                                    nlive=nlive)
    sampler.run_nested(print_progress=printing)

    # add samples
    # check continuation behavior
    sampler.run_nested(dlogz=0.1, print_progress=printing)

    # get errors
    nerr = 2
    for i in range(nerr):
        sampler.reset()
        sampler.run_nested(print_progress=False)
        results = sampler.results
        pos = results.samples
        wts = np.exp(results.logwt - results.logz[-1])
        mean, cov = dyfunc.mean_and_cov(pos, wts)
        logz = results.logz[-1]
        assert (np.abs(logz - logz_truth_gau) < logz_tol)
    # check summary
    res = sampler.results
    res.summary()

    # check plots
    dyplot.runplot(sampler.results)
    plt.close()
    dyplot.traceplot(sampler.results)
    plt.close()
    dyplot.cornerpoints(sampler.results)
    plt.close()
    dyplot.cornerplot(sampler.results)
    plt.close()
    dyplot.boundplot(sampler.results,
                     dims=(0, 1),
                     it=3000,
                     prior_transform=prior_transform_gau,
                     show_live=True,
                     span=[(-10, 10), (-10, 10)])
    plt.close()
    dyplot.cornerbound(sampler.results,
                       it=3500,
                       prior_transform=prior_transform_gau,
                       show_live=True,
                       span=[(-10, 10), (-10, 10)])
    plt.close()
コード例 #4
0
 def plot_current_state(self):
     import matplotlib.pyplot as plt
     if self.check_point_plot:
         import dynesty.plotting as dyplot
         labels = [label.replace('_', ' ') for label in self.search_parameter_keys]
         try:
             filename = "{}/{}_checkpoint_trace.png".format(self.outdir, self.label)
             fig = dyplot.traceplot(self.sampler.results, labels=labels)[0]
             fig.tight_layout()
             fig.savefig(filename)
         except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError, OverflowError, Exception) as e:
             logger.warning(e)
             logger.warning('Failed to create dynesty state plot at checkpoint')
         finally:
             plt.close("all")
         try:
             filename = "{}/{}_checkpoint_trace_unit.png".format(self.outdir, self.label)
             from copy import deepcopy
             temp = deepcopy(self.sampler.results)
             temp["samples"] = temp["samples_u"]
             fig = dyplot.traceplot(temp, labels=labels)[0]
             fig.tight_layout()
             fig.savefig(filename)
         except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError, OverflowError, Exception) as e:
             logger.warning(e)
             logger.warning('Failed to create dynesty unit state plot at checkpoint')
         finally:
             plt.close("all")
         try:
             filename = "{}/{}_checkpoint_run.png".format(self.outdir, self.label)
             fig, axs = dyplot.runplot(
                 self.sampler.results, logplot=False, use_math_text=False)
             fig.tight_layout()
             plt.savefig(filename)
         except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError) as e:
             logger.warning(e)
             logger.warning('Failed to create dynesty run plot at checkpoint')
         finally:
             plt.close('all')
         try:
             filename = "{}/{}_checkpoint_stats.png".format(self.outdir, self.label)
             fig, axs = dynesty_stats_plot(self.sampler)
             fig.tight_layout()
             plt.savefig(filename)
         except (RuntimeError, ValueError) as e:
             logger.warning(e)
             logger.warning('Failed to create dynesty stats plot at checkpoint')
         finally:
             plt.close('all')
コード例 #5
0
ファイル: plot.py プロジェクト: rodluger/starry_process
def plot_trace(results, **kwargs):
    """
    Plot the nested sampling trace.

    """
    # Get kwargs
    kwargs = update_with_defaults(**kwargs)
    gen_kwargs = kwargs["generate"]
    labels = ["r", "a", "b", "c", "n", "bm", "blv"]

    # Get truths
    try:
        a, b = StarryProcess().latitude._transform.transform(
            gen_kwargs["latitude"]["mu"], gen_kwargs["latitude"]["sigma"])
    except:
        a = np.nan
        b = np.nan
    truths = [
        gen_kwargs["radius"]["mu"],
        a,
        b,
        gen_kwargs["contrast"]["mu"],
        gen_kwargs["nspots"]["mu"],
        np.nan,
        np.nan,
    ]
    ndim = results.samples.shape[-1]
    fig, _ = dyplot.traceplot(results,
                              truths=truths[:ndim],
                              labels=labels[:ndim])
    return fig
コード例 #6
0
 def generate_trace_plots(self, dynesty_results):
     check_directory_exists_and_if_not_mkdir(self.outdir)
     filename = '{}/{}_trace.png'.format(self.outdir, self.label)
     logger.debug("Writing trace plot to {}".format(filename))
     from dynesty import plotting as dyplot
     fig, axes = dyplot.traceplot(dynesty_results,
                                  labels=self.result.parameter_labels)
     fig.tight_layout()
     fig.savefig(filename)
コード例 #7
0
ファイル: dynesty_gaussian.py プロジェクト: keflavich/nestfit
def plot_traceplot(dsampler):
    mix = test_mixture()
    truths = np.concatenate(
        (mix.amp.flatten(), mix.cen.flatten(), mix.std.flatten()))
    labels = ['a1', 'a2', 'a3', 'c1', 'c2', 'c3', 's1', 's2', 's3']
    fig, axes = dyplot.traceplot(dsampler.results,
                                 truths=truths,
                                 labels=labels,
                                 fig=plt.subplots(9, 2, figsize=(8, 11)))
    fig.tight_layout()
    plt.savefig(PLOT_DIR / Path('test_traceplot_3gauss.pdf'))
    plt.close('all')
コード例 #8
0
 def plot_current_state(self):
     import matplotlib.pyplot as plt
     if self.check_point_plot:
         import dynesty.plotting as dyplot
         labels = [
             label.replace('_', ' ') for label in self.search_parameter_keys
         ]
         try:
             filename = "{}/{}_checkpoint_trace.png".format(
                 self.outdir, self.label)
             fig = dyplot.traceplot(self.sampler.results, labels=labels)[0]
             fig.tight_layout()
             fig.savefig(filename)
         except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError,
                 OverflowError, Exception) as e:
             logger.warning(e)
             logger.warning(
                 'Failed to create dynesty state plot at checkpoint')
         finally:
             plt.close("all")
         try:
             filename = "{}/{}_checkpoint_run.png".format(
                 self.outdir, self.label)
             fig, axs = dyplot.runplot(self.sampler.results,
                                       logplot=False,
                                       use_math_text=False)
             fig.tight_layout()
             plt.savefig(filename)
         except (RuntimeError, np.linalg.linalg.LinAlgError,
                 ValueError) as e:
             logger.warning(e)
             logger.warning(
                 'Failed to create dynesty run plot at checkpoint')
         finally:
             plt.close('all')
         try:
             filename = "{}/{}_checkpoint_stats.png".format(
                 self.outdir, self.label)
             fig, axs = plt.subplots(nrows=3, sharex=True)
             for ax, name in zip(axs, ["boundidx", "nc", "scale"]):
                 ax.plot(getattr(self.sampler, "saved_{}".format(name)),
                         color="C0")
                 ax.set_ylabel(name)
             axs[-1].set_xlabel("iteration")
             fig.tight_layout()
             plt.savefig(filename)
         except (RuntimeError, ValueError) as e:
             logger.warning(e)
             logger.warning(
                 'Failed to create dynesty stats plot at checkpoint')
         finally:
             plt.close('all')
コード例 #9
0
 def plot_unweighted(weightedouput, population):
     """
     Produces the corner, trace and runplot for the results out of the bayesian analysis
     using the individual populations without normalisation.
     input:
         weightedoutput: Result dictionary from the bayesian analysis.
         population: Label of the population to used.
     """
     fig1, _ = dyplot.cornerplot(weightedouput, color='blue', \
                 truth_color='black', show_titles=True, max_n_ticks=3, quantiles=None)
     fig2, _ = dyplot.traceplot(weightedouput, truth_color='black', \
                  show_titles=True, trace_cmap='viridis')
     fig3, _ = dyplot.runplot(weightedouput, lnz_error=False)
     fig1.savefig("./output/"+population+"_corner_unweighted.png")
     fig2.savefig("./output/"+population+"_trace_unweighted.png")
     fig3.savefig("./output/"+population+"_runplot_unweighted.png")
コード例 #10
0
ファイル: plot_utils.py プロジェクト: astroshrey/ttvnest
def dynesty_plots(system, truthvals = None, outname = None):
	if system.results == None:
		raise ValueError("No retrieval found in your system object!")

	if outname is not None:
		names = [outname + '_cornerplot.png',
			outname + '_traceplot.png',
			outname + '_runplot.png']
	#cornerplot
	plt.figure(figsize = (20, 20))
	cfig, caxes = dyplot.cornerplot(system.results, color = 'blue',
		max_n_ticks = 3, labels = system.fit_param_names,
		truths = truthvals)
	if outname == None:
		plt.show()
	else:
		plt.savefig(names[0])
		plt.close('all')

	#traceplot
	plt.figure(figsize = (20, 20))
	tfig, taxes = dyplot.traceplot(system.results, truths = truthvals,
		show_titles = True, trace_cmap = 'viridis',
		labels = system.fit_param_names)
	if outname == None:
		plt.show()
	else:
		plt.savefig(names[1])
		plt.close('all')

	#runplot
	try:
		plt.figure(figsize = (20, 20))
		rfig, raxes = dyplot.runplot(system.results)
		if outname == None:
			plt.show()
		else:
			plt.savefig(names[2])
			plt.close('all')
			return names
	except ValueError:
		plt.close('all')
		print("Axis limits error on runplot; internal to dynesty")
		pass

	return None
コード例 #11
0
 def drun(self, dlogz=0.1, ndim=4):
     """ simple main function for sampling. """
     # initialize our nested sampler
     sampler = dynesty.NestedSampler(self.log_likelihood, likemod.pt_te,
                                     ndim)
     sampler.run_nested(dlogz=dlogz)
     self.res = sampler.results
     self.res.summary()
     fig, _ = dyplot.runplot(self.res, lnz_error=False)
     fig1, _ = dyplot.traceplot(self.res, truths=np.zeros(ndim), \
                                 truth_color='black', show_titles=True, \
                                 trace_cmap='viridis', connect=True, \
                                 connect_highlight=range(10))
     fig2, _ = dyplot.cornerplot(self.res, color='blue', \
                             truth_color='black', show_titles=True, \
                             max_n_ticks=3, quantiles=None)
     fig.savefig('./output/evidence.png')
     fig1.savefig('./output/tracers.png')
     fig2.savefig('./output/cornerplot.png')
コード例 #12
0
def plot_current_state(sampler, search_parameter_keys, outdir, label):
    labels = [label.replace("_", " ") for label in search_parameter_keys]
    try:
        filename = "{}/{}_checkpoint_trace.png".format(outdir, label)
        fig = dyplot.traceplot(sampler.results, labels=labels)[0]
        fig.tight_layout()
        fig.savefig(filename)
    except (
            AssertionError,
            RuntimeError,
            np.linalg.linalg.LinAlgError,
            ValueError,
    ) as e:
        logger.warning(e)
        logger.warning("Failed to create dynesty state plot at checkpoint")
    finally:
        plt.close("all")
    try:
        filename = "{}/{}_checkpoint_run.png".format(outdir, label)
        fig, axs = dyplot.runplot(sampler.results)
        fig.tight_layout()
        plt.savefig(filename)
    except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError) as e:
        logger.warning(e)
        logger.warning("Failed to create dynesty run plot at checkpoint")
    finally:
        plt.close("all")
    try:
        filename = "{}/{}_checkpoint_stats.png".format(outdir, label)
        fig, axs = plt.subplots(nrows=3, sharex=True)
        for ax, name in zip(axs, ["boundidx", "nc", "scale"]):
            ax.plot(getattr(sampler, f"saved_{name}"), color="C0")
            ax.set_ylabel(name)
        axs[-1].set_xlabel("iteration")
        fig.tight_layout()
        plt.savefig(filename)
    except (RuntimeError, ValueError) as e:
        logger.warning(e)
        logger.warning("Failed to create dynesty stats plot at checkpoint")
    finally:
        plt.close("all")
コード例 #13
0
def pyorbit_dynesty(config_in, input_datasets=None, return_output=None):

    output_directory = './' + config_in['output'] + '/dynesty/'

    mc = ModelContainerDynesty()
    pars_input(config_in, mc, input_datasets)

    if mc.nested_sampling_parameters['shutdown_jitter']:
        for dataset in mc.dataset_dict.itervalues():
            dataset.shutdown_jitter()

    mc.model_setup()
    mc.create_variables_bounds()
    mc.initialize_logchi2()

    mc.create_starting_point()

    results_analysis.results_resumen(mc, None, skip_theta=True)

    mc.output_directory = output_directory

    print()
    print('Reference Time Tref: ', mc.Tref)
    print()
    print('*************************************************************')
    print()

    import dynesty

    # "Standard" nested sampling.
    sampler = dynesty.NestedSampler(mc.dynesty_call, mc.dynesty_priors,
                                    mc.ndim)
    sampler.run_nested()
    results = sampler.results

    # "Dynamic" nested sampling.
    dsampler = dynesty.DynamicNestedSampler(mc.dynesty_call, mc.dynesty_priors,
                                            mc.ndim)
    dsampler.run_nested()
    dresults = dsampler.results

    from dynesty import plotting as dyplot

    # Plot a summary of the run.
    rfig, raxes = dyplot.runplot(results)

    # Plot traces and 1-D marginalized posteriors.
    tfig, taxes = dyplot.traceplot(results)

    # Plot the 2-D marginalized posteriors.
    cfig, caxes = dyplot.cornerplot(results)

    from dynesty import utils as dyfunc

    # Extract sampling results.
    samples = results.samples  # samples
    weights = np.exp(results.logwt - results.logz[-1])  # normalized weights

    # Compute 5%-95% quantiles.
    quantiles = dyfunc.quantile(samples, [0.05, 0.95], weights=weights)

    # Compute weighted mean and covariance.
    mean, cov = dyfunc.mean_and_cov(samples, weights)

    # Resample weighted samples.
    samples_equal = dyfunc.resample_equal(samples, weights)

    # Generate a new set of results with statistical+sampling uncertainties.
    results_sim = dyfunc.simulate_run(results)
    """ A dummy file is created to let the cpulimit script to proceed with the next step"""
    nested_sampling_create_dummy_file(mc)

    if return_output:
        return mc
    else:
        return
コード例 #14
0
def main(args=None):
    if args.results == "none":
        ndim = len(JOINT_PRIOR)
        date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        image_positions = pd.read_csv(args.image_positions)
        pprint(image_positions)
        # A=0, B=1, C=2, D=3
        x_image = image_positions["theta_x"].to_numpy()
        y_image = image_positions["theta_y"].to_numpy()
        quad_model = QuadPseudoNIELensModel(x_image, y_image, args.plate_scale)
        time_delays = pd.read_csv(args.time_delays)
        pprint(time_delays)

        # Expected: DA=0, DB=1, DC=2 (index)
        sigma_t = time_delays["sigma"].to_numpy()  # units of days
        delta_t = time_delays["delta_t"].to_numpy()
        joint_loglikelihood = joint_loglikelihood_func(quad_model, delta_t,
                                                       sigma_t)

        with Pool(args.nworkers) as pool:
            sampler = DynamicNestedSampler(joint_loglikelihood,
                                           joint_prior_transform,
                                           ndim,
                                           pool=pool,
                                           nlive=args.nlive,
                                           queue_size=args.nworkers)
            sampler.run_nested()
            res = sampler.results

        # save results
        with open(f"../results/joint_result_{date}.p", "wb") as f:
            pickle.dump(res, f)
    else:  # we just want to plot the result from an older run
        with open(args.results, "rb") as f:
            res = pickle.load(f)
        ndim = res.samples.shape[1]

    if args.plot_results:
        # trace plot
        fig, axs = dyplot.traceplot(
            res,
            show_titles=True,
            trace_cmap='plasma',
            connect=True,
            connect_highlight=range(5),
            labels=LABELS,
        )
        fig.tight_layout(pad=2.0)
        fig.savefig("../figures/joint_inference_trace_plot.png",
                    bbox_inches="tight")

        # corner points plot
        fig, axes = plt.subplots(ndim - 1, ndim - 1, figsize=(15, 15))
        axes.reshape([ndim - 1, ndim - 1])
        fg, ax = dyplot.cornerpoints(res,
                                     cmap='plasma',
                                     kde=False,
                                     fig=(fig, axes),
                                     labels=LABELS)
        fg.savefig("../figures/joint_inference_cornerpoints.png",
                   bbox_inches="tight")

        # corner plot
        fig, axes = plt.subplots(ndim, ndim, figsize=(15, 15))
        axes.reshape([ndim, ndim])
        fg, ax = dyplot.cornerplot(res,
                                   fig=(fig, axes),
                                   color="b",
                                   labels=LABELS,
                                   show_titles=True)
        fg.savefig("../figures/joint_inference_corner_plot.png",
                   bbox_inches="tight")

        #### marginalized posterior #####
        Ddt = res.samples[:, 0]  # histogram of Ddt
        weights = np.exp(
            res['logwt'] -
            res['logz'][-1])  # posterior probability (Bayes theorem)

        # eliminate outliers with low and high + estimate confidance interval
        low, fifth, median, ninety_fifth, high = weighted_quantile(
            Ddt, [0.0001, 0.05, 0.5, 0.95, 0.9999], weights)
        error_plus = ninety_fifth - median  # error estimate with 5th percentile
        error_minus = median - fifth  # error estimate with 95th percentile
        good = (Ddt > low) & (Ddt < high)  # remove outliers
        Ddt = Ddt[good]
        weights = weights[good]

        plt.figure(figsize=(8, 8))
        plt.hist(Ddt, bins=100, weights=weights)
        plt.title(r"$D_{\Delta t}$=%.2f$^{+%.2f}_{-%.2f}$" %
                  (median, error_plus, error_minus))
        plt.xlabel(r"$D_{\Delta t}$")
        plt.savefig("../figures/marginalized_posterior_Ddt.png")

        # assume a flat LambdCDM model (with negligible radiation)

        # We need to model kappa_ext for this step
        def integrand(z):
            return 1 / np.sqrt(args.omega_m * (1 + z)**3 + args.omega_l)

        Dd = quad(integrand, 0, args.z_lens)[0] / (1 + args.z_lens)
        Ds = quad(integrand, 0, args.z_source)[0] / (1 + args.z_source)
        Dds = quad(integrand, args.z_lens,
                   args.z_source)[0] / (1 + args.z_source)
        factor = (1 + args.z_lens) * Ds * Dd / Dds
        H0 = (c * factor / Ddt / u.Mpc).to(u.km / u.s / u.Mpc).value

        plt.figure(figsize=(8, 8))
        fifth, median, ninety_fifth = weighted_quantile(
            H0, [0.05, 0.5, 0.95], weights)
        error_plus = ninety_fifth - median
        error_minus = median - fifth
        plt.hist(H0, bins=100, weights=weights)
        plt.title(r"$H_0$=%.2f$^{+%.2f}_{-%.2f}$" %
                  (median, error_plus, error_minus))
        plt.xlabel(r"$H_0$ [km s$^{-1}$ Mpc$^{-1}$]")
        plt.savefig("../figures/marginalized_posterior_H0.png")
コード例 #15
0
def ns_output(datadir):
    '''
    Inputs:
    -------
    datadir : str
        the working directory for allesfitter
        must contain all the data files
        output directories and files will also be created inside datadir
            
    Outputs:
    --------
    This will output information into the console, and create a output files 
    into datadir/results/ (or datadir/QL/ if QL==True)    
    '''
    config.init(datadir)
    
    #::: security check
    if os.path.exists(os.path.join(config.BASEMENT.outdir,'ns_table.csv')):
        try:
            overwrite = str(input('Nested Sampling output files already exists in '+config.BASEMENT.outdir+'.\n'+\
                                  'What do you want to do?\n'+\
                                  '1 : overwrite the output files\n'+\
                                  '2 : abort\n'))
            if (overwrite == '1'):
                pass
            else:
                raise ValueError('User aborted operation.')
        except EOFError:
            warnings.warn("Nested Sampling output files already existed from a previous run, and were automatically overwritten.")
            pass
    
    #::: load the save_ns.pickle
    f = gzip.GzipFile(os.path.join(config.BASEMENT.outdir,'save_ns.pickle.gz'), 'rb')
    results = pickle.load(f)
    f.close()
           
    
    #::: plot the fit        
    posterior_samples_for_plot = draw_ns_posterior_samples(results, Nsamples=20) #only 20 samples for plotting
    
    for companion in config.BASEMENT.settings['companions_all']:
        fig, axes = afplot(posterior_samples_for_plot, companion)
        if fig is not None:
            fig.savefig( os.path.join(config.BASEMENT.outdir,'ns_fit_'+companion+'.pdf'), bbox_inches='tight' )       
            plt.close(fig)

    for companion in config.BASEMENT.settings['companions_phot']:
        for inst in config.BASEMENT.settings['inst_phot']:
            fig, axes = afplot_per_transit(posterior_samples_for_plot, inst, companion)
            fig.savefig( os.path.join(config.BASEMENT.outdir,'ns_fit_per_transit_'+inst+'_'+companion+'.pdf'), bbox_inches='tight' )
            plt.close(fig)
          
    
    #::: retrieve the results
    posterior_samples = draw_ns_posterior_samples(results)                               # all weighted posterior_samples
    params_median, params_ll, params_ul = get_params_from_samples(posterior_samples)     # params drawn form these posterior_samples
    
    #::: output the results
    logprint('\nResults:')
    logprint('--------------------------')
#    print(results.summary())
    logZdynesty = results.logz[-1]                                                       # value of logZ
    logZerrdynesty = results.logzerr[-1]                                                 # estimate of the statistcal uncertainty on logZ
    logprint('log(Z) = {} +- {}'.format(logZdynesty, logZerrdynesty))
    logprint('Nr. of posterior samples: {}'.format(len(posterior_samples)))
    
    
    #::: make pretty titles for the plots  
    labels, units = [], []
    for i,l in enumerate(config.BASEMENT.fitlabels):
        labels.append( str(config.BASEMENT.fitlabels[i]) )
        units.append( str(config.BASEMENT.fitunits[i]) )
        
    results2 = results.copy()                    
    params_median2, params_ll2, params_ul2 = params_median.copy(), params_ll.copy(), params_ul.copy()     # params drawn form these posterior_samples; only needed for plots (subtract epoch offset)  
    fittruths2 = config.BASEMENT.fittruths.copy()
    for companion in config.BASEMENT.settings['companions_all']:
        
        if companion+'_epoch' in config.BASEMENT.fitkeys:
            ind    = np.where(config.BASEMENT.fitkeys==companion+'_epoch')[0][0]
            results2['samples'][:,ind] -= int(params_median[companion+'_epoch'])                #np.round(params_median[companion+'_epoch'],decimals=0)
            units[ind] = str(units[ind]+'-'+str(int(params_median[companion+'_epoch']))+'d')    #np.format_float_positional(params_median[companion+'_epoch'],0)+'d')
            fittruths2[ind] -= int(params_median[companion+'_epoch'])
            params_median2[companion+'_epoch'] -= int(params_median[companion+'_epoch'])
                

    for i,l in enumerate(labels):
        if len( units[i].strip(' ') ) > 0:
            labels[i] = str(labels[i]+' ('+units[i]+')')
        
        
    #::: traceplot    
    cmap = truncate_colormap( 'Greys', minval=0.2, maxval=0.8, n=256 )
    tfig, taxes = dyplot.traceplot(results2, labels=labels, quantiles=[0.16, 0.5, 0.84], truths=fittruths2, post_color='grey', trace_cmap=[cmap]*config.BASEMENT.ndim, trace_kwargs={'rasterized':True})
    plt.tight_layout()
    
    
    #::: cornerplot
    # ndim = results2['samples'].shape[1]
    fontsize = np.min(( 24. + 0.5*config.BASEMENT.ndim, 40 ))
    cfig, caxes = dyplot.cornerplot(results2, labels=labels, span=[0.997 for i in range(config.BASEMENT.ndim)], quantiles=[0.16, 0.5, 0.84], truths=fittruths2, hist_kwargs={'alpha':0.25,'linewidth':0,'histtype':'stepfilled'}, 
                                    label_kwargs={"fontsize":fontsize, "rotation":45, "horizontalalignment":'right'})


    #::: runplot
#    rfig, raxes = dyplot.runplot(results)
#    rfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_run.jpg'), dpi=100, bbox_inches='tight' )
#    plt.close(rfig)
    
    
    #::: set allesfitter titles and labels
    for i, key in enumerate(config.BASEMENT.fitkeys): 
        
        value = round_tex(params_median2[key], params_ll2[key], params_ul2[key])
        ctitle = r'' + labels[i] + '\n' + r'$=' + value + '$'
        ttitle = r'' + labels[i] + r'$=' + value + '$'
        if len(config.BASEMENT.fitkeys)>1:
            # caxes[i,i].set_title(ctitle)
            caxes[i,i].set_title(ctitle, fontsize=fontsize, rotation=45, horizontalalignment='left')
            taxes[i,1].set_title(ttitle)
            for i in range(caxes.shape[0]):
                for j in range(caxes.shape[1]):
                    caxes[i,j].xaxis.set_label_coords(0.5, -0.5)
                    caxes[i,j].yaxis.set_label_coords(-0.5, 0.5)
        
                    if i==(caxes.shape[0]-1): 
                        fmt = ScalarFormatter(useOffset=False)
                        caxes[i,j].xaxis.set_major_locator(MaxNLocator(nbins=3))
                        caxes[i,j].xaxis.set_major_formatter(fmt)
                    if (i>0) and (j==0):
                        fmt = ScalarFormatter(useOffset=False)
                        caxes[i,j].yaxis.set_major_locator(MaxNLocator(nbins=3))
                        caxes[i,j].yaxis.set_major_formatter(fmt)
                        
                    for tick in caxes[i,j].xaxis.get_major_ticks(): tick.label.set_fontsize(24) 
                    for tick in caxes[i,j].yaxis.get_major_ticks(): tick.label.set_fontsize(24)    
        else:
            caxes.set_title(ctitle)
            taxes[1].set_title(ttitle)
            caxes.xaxis.set_label_coords(0.5, -0.5)
            caxes.yaxis.set_label_coords(-0.5, 0.5)
               
            
    #::: save and close the trace- and cornerplot
    tfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_trace.pdf'), bbox_inches='tight' )
    plt.close(tfig)
    cfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_corner.pdf'), bbox_inches='tight' )
    plt.close(cfig)


    #::: save the tables
    save_table(posterior_samples, 'ns')
    save_latex_table(posterior_samples, 'ns')
    

    #::: derive values (using stellar parameters from params_star.csv)
    deriver.derive(posterior_samples, 'ns')
   
    
    #::: make top-down orbit plot (using stellar parameters from params_star.csv)
    try:
        params_star = np.genfromtxt( os.path.join(config.BASEMENT.datadir,'params_star.csv'), delimiter=',', names=True, dtype=None, encoding='utf-8', comments='#' )
        fig, ax = plot_top_down_view(params_median, params_star)
        fig.savefig( os.path.join(config.BASEMENT.outdir,'top_down_view.pdf'), bbox_inches='tight' )
        plt.close(fig)        
    except:
        logprint('\nOrbital plots could not be produced.')
    
    
    #::: plot TTV results (if wished for)
    if config.BASEMENT.settings['fit_ttvs'] == True:
        plot_ttv_results(params_median, params_ll, params_ul)
    
    
    #::: clean up
    logprint('\nDone. For all outputs, see', config.BASEMENT.outdir)
    
    
    #::: return a nerdy quote
    try:
        with open(os.path.join(os.path.dirname(__file__), 'utils', 'quotes.txt')) as dataset:
            return(np.random.choice([l for l in dataset]))
    except:
        return('42')
コード例 #16
0
def ns_output(datadir):
    '''
    Inputs:
    -------
    datadir : str
        the working directory for allesfitter
        must contain all the data files
        output directories and files will also be created inside datadir
            
    Outputs:
    --------
    This will output information into the console, and create a output files 
    into datadir/results/ (or datadir/QL/ if QL==True)    
    '''
    config.init(datadir)
    
    #::: security check
    if os.path.exists(os.path.join(config.BASEMENT.outdir,'ns_table.csv')):
        try:
            overwrite = str(input('Nested Sampling output files already exists in '+config.BASEMENT.outdir+'.\n'+\
                                  'What do you want to do?\n'+\
                                  '1 : overwrite the output files\n'+\
                                  '2 : abort\n'))
            if (overwrite == '1'):
                pass
            else:
                raise ValueError('User aborted operation.')
        except EOFError:
            warnings.warn("Nested Sampling output files already existed from a previous run, and were automatically overwritten.")
            pass
    
    #::: load the save_ns.pickle
#    with open( os.path.join(config.BASEMENT.outdir,'save_ns.pickle'),'rb' ) as f:
#        results = pickle.load(f)
#    f = bzip2.BZ2File(os.path.join(config.BASEMENT.outdir,'save_ns.pickle.bz2'), 'rb')
    f = gzip.GzipFile(os.path.join(config.BASEMENT.outdir,'save_ns.pickle.gz'), 'rb')
    results = pickle.load(f)
    f.close()
           
        
    #::: plot the fit        
    posterior_samples_for_plot = draw_ns_posterior_samples(results, Nsamples=20) #only 20 samples for plotting
    for companion in config.BASEMENT.settings['companions_all']:
        fig, axes = afplot(posterior_samples_for_plot, companion)
        fig.savefig( os.path.join(config.BASEMENT.outdir,'ns_fit_'+companion+'.pdf'), bbox_inches='tight' )
        f = gzip.GzipFile(os.path.join(config.BASEMENT.outdir,'ns_fit.pickle.gz'), 'wb')
        pickle.dump((fig,axes), f)
        f.close()        
        plt.close(fig)

    
    #::: retrieve the results
    posterior_samples = draw_ns_posterior_samples(results)                               # all weighted posterior_samples
    params_median, params_ll, params_ul = get_params_from_samples(posterior_samples)     # params drawn form these posterior_samples
    
    #::: output the results
    logprint('\nResults:')
    logprint('--------------------------')
#    print(results.summary())
    logZdynesty = results.logz[-1]                                                       # value of logZ
    logZerrdynesty = results.logzerr[-1]                                                 # estimate of the statistcal uncertainty on logZ
    logprint('log(Z) = {} +- {}'.format(logZdynesty, logZerrdynesty))
    logprint('Nr. of posterior samples: {}'.format(len(posterior_samples)))
    
    
    #::: make pretty titles for the plots  
    labels, units = [], []
    for i,l in enumerate(config.BASEMENT.fitlabels):
        labels.append( str(config.BASEMENT.fitlabels[i]) )
        units.append( str(config.BASEMENT.fitunits[i]) )
        
    results2 = results.copy()                    
    posterior_samples2 = draw_ns_posterior_samples(results2)                               # all weighted posterior_samples
    params_median2, params_ll2, params_ul2 = get_params_from_samples(posterior_samples2)     # params drawn form these posterior_samples                              #only needed for plots (subtract epoch offset)  
    for companion in config.BASEMENT.settings['companions_all']:
        
        if companion+'_epoch' in config.BASEMENT.fitkeys:
            ind    = np.where(config.BASEMENT.fitkeys==companion+'_epoch')[0][0]
            results2['samples'][:,ind] -= int(params_median[companion+'_epoch']) #np.round(params_median[companion+'_epoch'],decimals=0)
            units[ind] = str(units[ind]+'-'+str(int(params_median[companion+'_epoch']))+'d') #np.format_float_positional(params_median[companion+'_epoch'],0)+'d')
            config.BASEMENT.fittruths[ind] -= int(params_median[companion+'_epoch'])
                
    for i,l in enumerate(labels):
        if units[i]!='':
            labels[i] = str(labels[i]+' ('+units[i]+')')
        
        
    #::: traceplot    
    cmap = truncate_colormap( 'Greys', minval=0.2, maxval=0.8, n=256 )
    tfig, taxes = dyplot.traceplot(results2, labels=labels, truths=config.BASEMENT.fittruths, post_color='grey', trace_cmap=[cmap]*config.BASEMENT.ndim, trace_kwargs={'rasterized':True})
    plt.tight_layout()
    
    
    #::: cornerplot
    cfig, caxes = dyplot.cornerplot(results2, labels=labels, truths=config.BASEMENT.fittruths, hist_kwargs={'alpha':0.25,'linewidth':0,'histtype':'stepfilled'})


    #::: runplot
#    rfig, raxes = dyplot.runplot(results)
#    rfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_run.jpg'), dpi=100, bbox_inches='tight' )
#    plt.close(rfig)
    

    #::: set allesfitter titles
    for i, key in enumerate(config.BASEMENT.fitkeys):  
        value = round_tex(params_median2[key], params_ll2[key], params_ul2[key])
        ttitle = r'' + labels[i] + r'$=' + value + '$'
        ctitle = r'' + labels[i] + '\n' + r'$=' + value + '$'
        if len(config.BASEMENT.fitkeys)>1:
            caxes[i,i].set_title(ctitle)
            taxes[i,1].set_title(ttitle)
            for i in range(caxes.shape[0]):
                for j in range(caxes.shape[1]):
                    caxes[i,j].xaxis.set_label_coords(0.5, -0.5)
                    caxes[i,j].yaxis.set_label_coords(-0.5, 0.5)
        else:
            caxes.set_title(ctitle)
            taxes[1].set_title(ttitle)
            caxes.xaxis.set_label_coords(0.5, -0.5)
            caxes.yaxis.set_label_coords(-0.5, 0.5)
        
               
            
    #::: save and close the trace- and cornerplot
    tfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_trace.pdf'), bbox_inches='tight' )
    plt.close(tfig)
    cfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_corner.pdf'), bbox_inches='tight' )
    plt.close(cfig)


    #::: save the tables
    save_table(posterior_samples, 'ns')
    save_latex_table(posterior_samples, 'ns')
    

    #::: derive values (using stellar parameters from params_star.csv)
    if os.path.exists(os.path.join(config.BASEMENT.datadir,'params_star.csv')):
        deriver.derive(posterior_samples, 'ns')
    else:
        print('File "params_star.csv" not found. Cannot derive final parameters.')
    
    
    logprint('Done. For all outputs, see', config.BASEMENT.outdir)
コード例 #17
0
def plot_a_results(results, pdfs, pdf_weights, suffix, a_min, a_max):
    samples = results.samples
    weights = np.exp(results.logwt - results.logz[-1])
    samples_equal = dyfunc.resample_equal(samples, weights)

    # results.summary()
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    errors = np.diagonal(cov)**0.5

    maxL_index = results['logl'].argmax()
    maxL_params = samples[maxL_index]

    param_names = ['alpha1']  #, 'alpha2', 'log_a_break', 'amp']

    for ii in range(len(mean)):
        print('{0:5s} = {1:5.2f} +/- {2:5.2f}, maxL = {3:5.2f}'.format(
            param_names[ii], mean[ii], errors[ii], maxL_params[ii]))

    plt.close('all')

    # dyplot.runplot(results)
    # plt.savefig('dnest_a_run_' + suffix + '.png')

    dyplot.traceplot(results)
    plt.savefig('dnest_a_trace_' + suffix + '.png')

    dyplot.cornerplot(results)
    plt.savefig('dnest_a_corner_' + suffix + '.png')

    # Make a plot of the resulting distributions.
    # Note these bins have to match what we used to make the PDFs in the first place.
    a_bin = np.logspace(3, 8, 50)
    # a_bin = np.linspace(1e3, 1e6, 100)
    a_bin_mid = a_bin[:-1] + np.diff(a_bin)

    alpha1 = mean[0]
    # alpha2 = mean[1]
    # a_break = 10**mean[2]

    # p_a = broken_powerlaw_trunc(a_bin_mid, alpha1, alpha2, a_break, a_min=a_min, a_max=a_max)
    p_a = powerlaw_trunc(a_bin_mid, alpha1, a_min=a_min, a_max=a_max)

    N_samp = 1000
    p_a_nk = np.zeros((len(a_bin_mid), N_samp), dtype=float)
    for ss in range(N_samp):
        # p_a_nk[:, ss] = broken_powerlaw_trunc(a_bin_mid,
        #                                           samples_equal[ss, 0],
        #                                           samples_equal[ss, 1],
        #                                           10**samples_equal[ss, 2],
        #                                           a_min=a_min, a_max=a_max)
        p_a_nk[:, ss] = powerlaw_trunc(a_bin_mid,
                                       samples_equal[ss, 0],
                                       a_min=a_min,
                                       a_max=a_max)

    fix, ax = plt.subplots(2, 1, sharex=True)
    plt.subplots_adjust(hspace=0)

    for ss in range(N_samp):
        ax[0].loglog(a_bin_mid, p_a_nk[:, ss], 'r-', linewidth=1, alpha=0.05)

    ax[0].loglog(a_bin_mid, p_a, 'r-', linewidth=5)

    # Plot the individual star PDFs
    a_bin_widths = np.diff(a_bin)

    for ss in range(pdfs.shape[0]):
        an, ab = np.histogram(pdfs[ss],
                              bins=a_bin,
                              weights=pdf_weights[ss],
                              density=False)
        an /= a_bin_widths
        ax[1].loglog(a_bin_mid, an, 'k-', linewidth=2, alpha=0.5)

    # Joint PDF:
    an, ab = np.histogram(pdfs.ravel(),
                          bins=a_bin,
                          weights=pdf_weights.ravel(),
                          density=False)
    an /= a_bin_widths
    ax[1].loglog(a_bin_mid, an, 'g-', linewidth=3)

    ax[1].set_xlabel('Semi-major Axis (AU)')
    ax[1].set_ylabel('PDF')
    ax[0].set_ylabel('PDF')

    plt.savefig('dnest_a_dist_' + suffix + '.png')

    return
コード例 #18
0
def plot(n, alpha, beta, dlogz_val=.1, interloper=False, d=1):
    x_k = np.round(generator(n, alpha, beta, d), 2)  #round up

    #append values if interloper is true
    if (interloper == True):
        interloper_x_k = np.round(generator(n, alpha + 3, beta, d), 2)
        x_k = np.append(x_k, interloper_x_k)

    #feed parameters for alpha and beta

    def lighthouse_logl(params):

        return log_likelihood(x_k, params[0], params[1])

    #prior transform, using documentation for range -1000, 1000

    def prior_transform(u):
        return [2000 * u[0] - 1000, 1000 * u[1]]

    ndim = 2
    sampler = dynesty.NestedSampler(lighthouse_logl,
                                    prior_transform,
                                    ndim,
                                    bound='single',
                                    nlive=200)
    sampler.run_nested(dlogz=dlogz_val, print_progress=False)
    results = sampler.results

    #runplot

    dyplot.runplot(results)
    #plt.savefig('int_run_plot' + str(dlogz_val)+".png", dpi =300)
    plt.show()

    #cornerpoints plot

    fig = plt.subplots(1, 1, figsize=(10, 10))
    dyplot.cornerpoints(results,
                        fig=fig,
                        cmap='plasma',
                        truths=np.zeros(ndim),
                        kde=False)
    fig[1].set_ylabel('$\\beta$')
    fig[1].set_xlabel('$\\alpha$')
    plt.tight_layout()
    plt.xlim(-10, 10)
    plt.ylim(0, 10)
    plt.savefig('int_corner1' + str(dlogz_val) + ".png", dpi=300)
    plt.show()

    #traceplot

    fig = plt.subplots(2, 2, figsize=(15, 10))
    dyplot.traceplot(results,
                     fig=fig,
                     truth_color='black',
                     trace_cmap='viridis',
                     connect=True,
                     connect_highlight=range(5),
                     show_titles=True)
    fig[1][1, 1].set_xlabel('$\\beta$')
    fig[1][0, 1].set_xlabel('$\\alpha$')
    fig[1][1, 0].set_ylabel('$\\beta$')
    fig[1][0, 0].set_ylabel('$\\alpha$')
    plt.tight_layout()
    #plt.savefig('int_trace' + str(dlogz_val)+".png", dpi =300)
    plt.show()

    print(results.samples[-1])
コード例 #19
0
                                               bound="multi", method="slice", bootstrap=0)
        t0 = time.time()
        sampler.run_nested(nlive_init=int(nlive/2), nlive_batch=int(nlive),
                           wt_kwargs={'pfrac': 1.0}, stop_kwargs={"post_thresh":0.2})
        dur = time.time() - t0
        results = sampler.results
        results['duration'] = dur
        indmax = results['logl'].argmax()
        best = results['samples'][indmax, :]

        from dynesty import plotting as dyplot
        truths = ptrue.copy()
        label = filters + ["ra", "dec", "q", "pa", "n", "rh"]
        cfig, caxes = dyplot.cornerplot(results, fig=pl.subplots(ndim, ndim, figsize=(13., 10)),
                                        labels=label, show_titles=True, title_fmt='.8f', truths=truths)
        tfig, taxes = dyplot.traceplot(results, fig=pl.subplots(ndim, 2, figsize=(13., 13.)),
                                    labels=label)

    # -- hmc ---
    if False:
        p0 = ptrue.copy()
        prange = upper - lower
        scales = np.array(nsource * [ 5., plate_scale, plate_scale, 1.0, 1. ])

        from hmc import BasicHMC
        model = Posterior(scene, plans, upper=upper, lower=lower)
        sampler = BasicHMC(model, verbose=False)
        sampler.ndim = len(p0)


        sampler.set_mass_matrix(1/scales**2)
        eps = sampler.find_reasonable_stepsize(p0*1.0)
コード例 #20
0
ファイル: test_plot.py プロジェクト: joshspeagle/dynesty
def test_gaussian(dynamic, periodic, ndim, bound):
    rstate = get_rstate()
    g = Gaussian(ndim=ndim)
    if periodic:
        periodic = [0]
    else:
        periodic = None
    if dynamic:
        sampler = dynesty.DynamicNestedSampler(g.loglikelihood,
                                               g.prior_transform,
                                               g.ndim,
                                               nlive=nlive,
                                               rstate=rstate,
                                               periodic=periodic,
                                               bound=bound)
    else:
        sampler = dynesty.NestedSampler(g.loglikelihood,
                                        g.prior_transform,
                                        g.ndim,
                                        nlive=nlive,
                                        rstate=rstate,
                                        periodic=periodic,
                                        bound=bound)
    sampler.run_nested(print_progress=printing)
    results = sampler.results
    # check plots
    dyplot.runplot(results)
    dyplot.runplot(results, span=[(0., 10.), 0.001, 0.2, (5., 6.)])
    dyplot.runplot(results, logplot=True)
    dyplot.runplot(results,
                   fig=(plt.gcf(), plt.gcf().axes),
                   max_x_ticks=0,
                   max_y_ticks=0)
    plt.close()
    dyplot.traceplot(results)
    dyplot.traceplot(results, smooth=[10] * ndim)
    dyplot.traceplot(results, connect=True)
    dyplot.traceplot(results,
                     fig=(plt.gcf(), plt.gcf().axes),
                     show_titles=True,
                     truths=np.zeros(ndim),
                     verbose=True,
                     max_n_ticks=0)
    plt.close()

    truths = np.zeros(ndim)
    truths[0] = -.1
    span = [[-10, 10]] * ndim
    if ndim > 1:
        truths[1] = .1
        span[1] = .9

    dyplot.cornerplot(results, show_titles=True, truths=truths)
    dyplot.cornerplot(results,
                      smooth=10,
                      verbose=True,
                      hist2d_kwargs=dict(plot_datapoints=True),
                      max_n_ticks=0)
    plt.close()
    if ndim != 1:
        # cornerbound
        dyplot.cornerbound(results,
                           it=500,
                           prior_transform=g.prior_transform,
                           show_live=True,
                           span=span)
        dyplot.cornerbound(results,
                           it=500,
                           show_live=True,
                           span=span,
                           fig=(plt.gcf(), plt.gcf().axes))
        plt.close()
        # boundplot
        dyplot.boundplot(results,
                         dims=(0, 1)[:min(ndim, 2)],
                         it=1000,
                         prior_transform=g.prior_transform,
                         show_live=True,
                         span=span)
        dyplot.boundplot(results, dims=(0, 1)[:min(ndim, 2)], it=1000)
        plt.close()

        # cornerpoints
        dyplot.cornerpoints(results)
        plt.close()
        dyplot.cornerpoints(results, span=span, truths=truths, max_n_ticks=0)
        plt.close()
コード例 #21
0
    def write_current_state(self, plot=True):
        """
        Write the current state of the sampler to disk.

        The required information to reconstruct the state of the run are written
        to an hdf5 file.
        All but the most recent removed live point in the chain are removed from
        the sampler to reduce memory usage.
        This means it is necessary to not append the first live point to the
        file if updating a previous checkpoint.

        Parameters
        ----------
        sampler: `dynesty.NestedSampler`
            NestedSampler to write to disk.
        """
        check_directory_exists_and_if_not_mkdir(self.outdir)
        print("")
        logger.info("Writing checkpoint file {}".format(self.resume_file))

        end_time = datetime.datetime.now()
        if hasattr(self, 'start_time'):
            self.sampling_time += end_time - self.start_time
            self.start_time = end_time

        current_state = dict(
            unit_cube_samples=self.sampler.saved_u,
            physical_samples=self.sampler.saved_v,
            sample_likelihoods=self.sampler.saved_logl,
            sample_log_volume=self.sampler.saved_logvol,
            sample_log_weights=self.sampler.saved_logwt,
            cumulative_log_evidence=self.sampler.saved_logz,
            cumulative_log_evidence_error=self.sampler.saved_logzvar,
            cumulative_information=self.sampler.saved_h,
            id=self.sampler.saved_id,
            it=self.sampler.saved_it,
            nc=self.sampler.saved_nc,
            boundidx=self.sampler.saved_boundidx,
            bounditer=self.sampler.saved_bounditer,
            scale=self.sampler.saved_scale,
            sampling_time=self.sampling_time.total_seconds())

        current_state.update(ncall=self.sampler.ncall,
                             live_logl=self.sampler.live_logl,
                             iteration=self.sampler.it - 1,
                             live_u=self.sampler.live_u,
                             live_v=self.sampler.live_v,
                             nlive=self.sampler.nlive,
                             live_bound=self.sampler.live_bound,
                             live_it=self.sampler.live_it,
                             added_live=self.sampler.added_live)

        try:
            weights = np.exp(current_state['sample_log_weights'] -
                             current_state['cumulative_log_evidence'][-1])
            from dynesty.utils import resample_equal

            current_state['posterior'] = resample_equal(
                np.array(current_state['physical_samples']), weights)
            current_state['search_parameter_keys'] = self.search_parameter_keys
        except ValueError:
            logger.debug("Unable to create posterior")

        with open(self.resume_file, 'wb') as file:
            pickle.dump(current_state, file)

        if plot and self.check_point_plot:
            import dynesty.plotting as dyplot
            labels = [
                label.replace('_', ' ') for label in self.search_parameter_keys
            ]
            filename = "{}/{}_checkpoint_trace.png".format(
                self.outdir, self.label)
            try:
                fig = dyplot.traceplot(self.sampler.results, labels=labels)[0]
                fig.tight_layout()
                fig.savefig(filename)
                plt.close('all')
            except (RuntimeError, np.linalg.linalg.LinAlgError,
                    ValueError) as e:
                logger.warning(e)
                logger.warning(
                    'Failed to create dynesty state plot at checkpoint')
コード例 #22
0
    # Plot a summary of the run.
    if plt_summary:
        print 'making SUMMARY plot'
        rfig, raxes = dyplot.runplot(res, mark_final_live=False, label_kwargs=font_kwargs)
        for ax in raxes:
            ax.xaxis.set_tick_params(labelsize=tick_fs)
            ax.yaxis.set_tick_params(labelsize=tick_fs)
            ax.yaxis.get_offset_text().set_size(fs)
        rfig.tight_layout()
        rfig.savefig(outfolder+objname+'_dynesty_summary.pdf',dpi=200)

    # Plot traces and 1-D marginalized posteriors.
    if plt_trace:
        print 'making TRACE plot'
        tfig, taxes = dyplot.traceplot(res, labels=parnames,label_kwargs=font_kwargs)
        for ax in taxes.ravel():
            ax.xaxis.set_tick_params(labelsize=tick_fs)
            ax.yaxis.set_tick_params(labelsize=tick_fs)
        tfig.tight_layout()
        tfig.savefig(outfolder+objname+'_dynesty_trace.pdf',dpi=200)

    # corner plot
    if plt_corner: 
        print 'making CORNER plot'
        subcorner(res, eout, parnames,outname=outfolder+objname, **opts)

    # sed plot
    if plt_sed:
        print 'making SED plot'
        pfig = sed_figure(sresults = [res], eout=[eout],
コード例 #23
0
ファイル: runtests.py プロジェクト: wym109/dynesty
sys.stderr.write('logz_tol: {}\n'.format(lz_tol))
sys.stderr.write('mean_tol: {}\n'.format(m_tol))
sys.stderr.write('cov_tol: {}\n'.format(c_tol))

# check summary
sys.stderr.write('\nResults\n')
res = sampler.results
res.summary()

# check plots
sys.stderr.write('\nPlotting\n')
sys.stderr.write('Summary/Run Plot\n')
dyplot.runplot(sampler.results)
plt.close()
sys.stderr.write('Trace Plot\n')
dyplot.traceplot(sampler.results)
plt.close()
sys.stderr.write('Sub-Corner Plot (Points)\n')
dyplot.cornerpoints(sampler.results)
plt.close()
sys.stderr.write('Corner Plot (Contours)\n')
dyplot.cornerplot(sampler.results)
plt.close()
sys.stderr.write('2-D Bound Plot\n')
dyplot.boundplot(sampler.results,
                 dims=(0, 1),
                 it=3000,
                 prior_transform=prior_transform,
                 show_live=True,
                 span=[(-10, 10), (-10, 10)])
plt.close()
コード例 #24
0
ファイル: linear_test.py プロジェクト: ExoCTK/chimera
                                        ndim=3,
                                        bound='multi',
                                        sample='rwalk',
                                        update_interval=3.,
                                        pool=pool,
                                        queue_size=4)
dsampler.run_nested()
dres = dsampler.results

from dynesty import plotting as dyplot

truths = [m_true, b_true, np.log(f_true)]

# trace plot
fig, axes = dyplot.traceplot(dsampler.results,
                             truths=truths,
                             fig=plt.subplots(3, 2, figsize=(16, 12)))
fig.tight_layout()

# corner plot
fig, axes = dyplot.cornerplot(dres,
                              truths=truths,
                              show_titles=True,
                              title_kwargs={'y': 1.04},
                              fig=plt.subplots(3, 3, figsize=(15, 15)))
'''
results=dsampler.results
samples=results['samples']
weights = np.exp(results['logwt'] - results['logz'][-1])  #weights when plotting histogram....
    
'''
コード例 #25
0
ファイル: sed_fitter.py プロジェクト: vedantchandra/WD_models
    def fit(self,
            sed,
            e_sed,
            parallax=[100, 0.001],
            nlive=250,
            distance=None,
            binary=False,
            plot_fit=True,
            plot_trace=False,
            plot_corner=False,
            progress=False,
            textx=0.025,
            textsize=12):

        if self.to_flux:
            sed = self.mag_to_flux(sed)
            e_sed = sed * e_sed  # magnitude error to flux error

        if not binary:
            ndim = 3

            def loglike(theta):
                teff, logg, plx = theta
                model = self.model_sed(teff, logg, plx)
                ivar = 1 / e_sed**2
                logchi = -0.5 * np.sum((sed - model)**2 * ivar)
                if np.isnan(logchi):
                    return -np.Inf
                else:
                    return logchi

            def prior_transform(u):
                x = np.array(u)
                x[0] = u[0] * (self.teff_range[1] -
                               self.teff_range[0]) + self.teff_range[0]
                x[1] = u[1] * (self.logg_range[1] -
                               self.logg_range[0]) + self.logg_range[0]
                t = stats.norm.ppf(u[2])
                x[2] = parallax[1] * t
                x[2] += parallax[0]
                return x

        elif binary:
            ndim = 5

            def loglike(theta):
                teff1, logg1, teff2, logg2, plx = theta

                model = self.model_binary_sed(teff1, logg1, teff2, logg2, plx)

                ivar = 1 / e_sed**2
                logchi = -0.5 * np.sum((sed - model)**2 * ivar)
                if np.isnan(logchi):
                    return -np.Inf
                elif teff1 > teff2:
                    return -np.Inf
                else:
                    return logchi

            def prior_transform(u):
                x = np.array(u)
                x[0] = u[0] * (self.teff_range[1] -
                               self.teff_range[0]) + self.teff_range[0]
                x[1] = u[1] * (self.logg_range[1] -
                               self.logg_range[0]) + self.logg_range[0]
                x[2] = u[2] * (self.teff_range[1] -
                               self.teff_range[0]) + self.teff_range[0]
                x[3] = u[3] * (self.logg_range[1] -
                               self.logg_range[0]) + self.logg_range[0]
                t = stats.norm.ppf(u[4])
                x[4] = parallax[1] * t
                x[4] += parallax[0]
                return x

        ########## DYNESTY ###################

        dsampler = dynesty.NestedSampler(loglike,
                                         prior_transform,
                                         ndim=ndim,
                                         nlive=nlive)
        dsampler.run_nested(print_progress=progress)

        result = dsampler.results

        samples, weights = result.samples, np.exp(result.logwt -
                                                  result.logz[-1])
        chis = -2 * np.array([loglike(sample) for sample in result.samples])
        bestfit = np.argmin(chis)
        resampled = dyfunc.resample_equal(samples, weights)
        cov = np.var(resampled, axis=0)

        mean = result.samples[bestfit]

        print(result.samples[bestfit])

        bandwls = []
        for band in self.bands:
            bandwls.append(self.mean_wl[band])

        ########## PLOTTING #################

        if plot_trace:

            f = dyplot.traceplot(dsampler.results,
                                 show_titles=True,
                                 trace_cmap='viridis')
            plt.tight_layout()
        if plot_corner:

            if binary:
                f = dyplot.cornerplot(dsampler.results,
                                      show_titles=True,
                                      labels=[
                                          '$T_{\mathrm{eff,1}}$',
                                          '$\log{g}_1$',
                                          '$T_{\mathrm{eff,2}}$',
                                          '$\log{g}_2$', r'$\varpi$'
                                      ])
            if not binary:
                f = dyplot.cornerplot(
                    dsampler.results,
                    show_titles=True,
                    labels=['$T_{\mathrm{eff}}$', '$\log{g}$', r'$\varpi$'])

            plt.tight_layout()

        if not binary:

            model = self.model_sed(*mean)
            ivar = 1 / e_sed**2
            redchi = np.sum((sed - model)**2 * ivar) / (len(sed) - ndim)

            if plot_fit:

                plt.figure(figsize=(10, 5))
                plt.errorbar(bandwls,
                             sed,
                             yerr=e_sed,
                             linestyle='none',
                             capsize=5,
                             color='k')
                plt.scatter(bandwls, model, color='k')
                plt.text(textx,
                         0.35,
                         '$T_{\mathrm{eff}}$ = %i ± %i' %
                         (mean[0], np.sqrt(cov[0])),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.text(textx,
                         0.25,
                         '$\log{g}$ = %.2f ± %.2f' %
                         (mean[1], np.sqrt(cov[1])),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.text(textx,
                         0.15,
                         'atm = %s' % (self.atm_type),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.text(textx,
                         0.05,
                         '$\chi_r^2$ = %.2f' % (redchi),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.xlabel('Wavelength ($\mathrm{\AA}$', fontsize=16)
                plt.ylabel(
                    '$f_\lambda\ [erg\ cm^{-2}\ s^{-1}\ \mathrm{\AA}^{-1}]$',
                    fontsize=16)
                plt.yscale('log')

            return [mean[0], np.sqrt(cov[0]), mean[1], np.sqrt(cov[1])], redchi

        elif binary:

            model = self.model_binary_sed(*mean)

            ivar = 1 / e_sed**2
            redchi = np.sum((sed - model)**2 * ivar) / (len(sed) - ndim)

            if plot_fit:

                plt.figure(figsize=(10, 5))
                plt.errorbar(bandwls,
                             sed,
                             yerr=e_sed,
                             linestyle='none',
                             capsize=5,
                             color='k')
                plt.scatter(bandwls, model, color='k')
                plt.text(textx,
                         0.45,
                         '$T_{\mathrm{eff,1}}$ = %i ± %i' %
                         (mean[0], np.sqrt(cov[0])),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.text(textx,
                         0.35,
                         '$\log{g}_1$ = %.2f ± %.2f' %
                         (mean[1], np.sqrt(cov[1])),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.text(textx,
                         0.25,
                         '$T_{\mathrm{eff,2}}$ = %i ± %i' %
                         (mean[2], np.sqrt(cov[2])),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.text(textx,
                         0.15,
                         '$\log{g}_2$ = %.2f ± %.2f' %
                         (mean[3], np.sqrt(cov[3])),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                #plt.text(0.15, 0.2, 'atm = %s' %(self.atm_type), transform = plt.gca().transAxes, fontsize = 12)
                plt.text(textx,
                         0.05,
                         '$\chi_r^2$ = %.2f' % (redchi),
                         transform=plt.gca().transAxes,
                         fontsize=textsize)
                plt.xlabel('Wavelength ($\mathrm{\AA}$)', fontsize=16)
                plt.ylabel(
                    '$f_\lambda\ [erg\ cm^{-2}\ s^{-1}\ \mathrm{\AA}^{-1}]$',
                    fontsize=16)
                plt.yscale('log')

            return [
                mean[0],
                np.sqrt(cov[0]), mean[1],
                np.sqrt(cov[1]), mean[2],
                np.sqrt(cov[2]), mean[3],
                np.sqrt(cov[3])
            ], redchi
コード例 #26
0
def runMCMC(path, ndim, p, loglike, ptform, galname, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 8:
        nparams = '_8P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(res1,
                                     truths=np.array([
                                         pdict['vrot'][0], pdict['vrot'][1],
                                         pdict['vrot'][2], pdict['vrot'][3],
                                         pdict['vdisp'][0], pdict['vdisp'][1],
                                         pdict['vdisp'][2], pdict['vdisp'][3]
                                     ]),
                                     truth_color='black',
                                     show_titles=True,
                                     trace_cmap='viridis',
                                     connect=True,
                                     smooth=0.02,
                                     connect_highlight=range(8),
                                     labels=[
                                         r'$v_{rot,225}$', r'$v_{rot,450}$',
                                         r'$v_{rot,675}$', r'$v_{rot,900}$',
                                         r'$\sigma_{225}$', r'$\sigma_{450}$',
                                         r'$\sigma_{675}$', r'$\sigma_{900}$'
                                     ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=5,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))
    elif ndim == 9:
        nparams = '_9P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=0.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    elif ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=250,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': 300,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        with open(path + '/result_nested_P' + '{}'.format(ndim) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()

        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        matplotlib.rcParams.update({'font.size': 16})
        fig, axes = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=[0.16, 0.5, 0.84],
            labels=[
                r'$V_{225}[km/s]$', r'$V_{450}[km/s]$', r'$V_{675}[km/s]$',
                r'$V_{900}[km/s]$', r'$\sigma_{gas,225}[km/s]$',
                r'$\sigma_{gas,450}[km/s]$', r'$\sigma_{gas,675}[km/s]$',
                r'$\sigma_{gas,900}[km/s]$', r'$i[deg]$', r'$\phi[deg]$'
            ])

        # Save the model data
        samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
        mean, cov = dyfunc.mean_and_cov(samples, weights)
        MaP = res1['samples'][res1['logl'].tolist().index(
            max(res1['logl'].tolist()))]
        quantiles = [
            dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
            for samps in samples.T
        ]
        labels = [
            r'$V_{225}$', r'$V_{450}$', r'$V_{675}$', r'$V_{900}$',
            r'$\sigma_{gas,225}$', r'$\sigma_{gas,450}$',
            r'$\sigma_{gas,675}$', r'$\sigma_{gas,900}$', r'$i$', r'$\phi$'
        ]
        units = [
            ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]', ' [km/s]',
            ' [km/s]', ' [km/s]', ' [deg]', ' [deg]'
        ]
        for i in range(ndim):
            ax = axes[i, i]
            q5 = np.round(quantiles[i][1], 2)
            q14 = np.round(quantiles[i][0], 2)
            q84 = np.round(quantiles[i][2], 2)
            ax.set_title(r"$%.2f_{%.2f}^{+%.2f}$" %
                         (q5, -1 * abs(q5 - q14), abs(q5 - q84)) + units[i])

        # Loop over the histograms
        for yi in range(ndim):
            axes[yi, 0].set_ylabel(labels[yi] + units[yi],
                                   labelpad=30,
                                   fontsize=20)
            axes[-1, yi].set_xlabel(labels[yi] + units[yi],
                                    labelpad=30,
                                    fontsize=20)
            axes[yi, 0].tick_params(axis='y', which='major', labelsize=14)
            axes[-1, yi].tick_params(axis='x', which='major', labelsize=14)

        fig.tight_layout()
        plt.savefig(path + '/cornerplot_' + galname + nparams + '.pdf')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

# Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.16, 0.5, 0.84], weights=weights)
        for samps in samples.T
    ]

    pdict['sigmavrot'] = [(quantiles[0][0], quantiles[0][2]),
                          (quantiles[1][0], quantiles[1][2]),
                          (quantiles[2][0], quantiles[2][2]),
                          (quantiles[3][0], quantiles[3][2])]
    pdict['sigmavdisp'] = [(quantiles[4][0], quantiles[4][2]),
                           (quantiles[5][0], quantiles[5][2]),
                           (quantiles[6][0], quantiles[6][2]),
                           (quantiles[7][0], quantiles[7][2])]
    pdict['vrot'] = [
        quantiles[0][1], quantiles[1][1], quantiles[2][1], quantiles[3][1]
    ]
    pdict['vdisp'] = [
        quantiles[4][1], quantiles[5][1], quantiles[6][1], quantiles[7][1]
    ]

    if len(quantiles) == 9:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]

    if len(quantiles) == 10:
        pdict['inc'] = quantiles[8][1]
        pdict['sigmainc'] = [(quantiles[8][0], quantiles[8][2])]
        pdict['phi'] = quantiles[9][1]
        pdict['sigmaphi'] = [(quantiles[9][0], quantiles[9][2])]

    # We don't need data entry, waste of space
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
コード例 #27
0
def plot_ecc_results(results, pdfs, pdf_weights, suffix):
    samples = results.samples
    weights = np.exp(results.logwt - results.logz[-1])
    samples_equal = dyfunc.resample_equal(samples, weights)

    # results.summary()
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    errors = np.diagonal(cov)**0.5

    maxL_index = results['logl'].argmax()
    maxL_params = samples[maxL_index]

    param_names = ['alpha', 'beta']
    labels = ['$\\alpha$', '$\\beta$']

    for ii in range(len(mean)):
        print('{0:5s} = {1:5.2f} +/- {2:5.2f}, maxL = {3:5.2f}'.format(
            param_names[ii], mean[ii], errors[ii], maxL_params[ii]))
    plt.close('all')

    dyplot.runplot(results)
    plt.savefig('dnest_ecc_run_' + suffix + '.png')

    dyplot.traceplot(results, labels=labels)
    plt.savefig('dnest_ecc_trace_' + suffix + '.png')

    dyplot.cornerplot(results, labels=labels)
    plt.savefig('dnest_ecc_corner_' + suffix + '.png')

    # Make a plot of the resulting distributions.
    # Note these bins have to match what we used to make the PDFs in the first place.
    e_bin = np.arange(0, 1, 0.01)

    # Calculate the "best-fit" PDF.
    # p_e = scipy.stats.beta.pdf(e_bin, mean[0], mean[1])
    p_e = scipy.stats.beta.pdf(e_bin, maxL_params[0], maxL_params[1])

    # Make samples drawn from the posteriors.
    N_samp = 1000
    p_e_nk = np.zeros((len(e_bin), N_samp), dtype=float)
    for ss in range(N_samp):
        p_e_nk[:, ss] = scipy.stats.beta.pdf(e_bin, samples_equal[ss][0],
                                             samples_equal[ss][1])

    fix, ax = plt.subplots(2, 1, sharex=True)
    plt.subplots_adjust(hspace=0)

    for ss in range(N_samp):
        ax[0].plot(e_bin, p_e_nk[:, ss], 'r-', linewidth=1, alpha=0.05)

    ax[0].plot(e_bin, p_e, 'r-', linewidth=5)

    # Plot the individual star PDFs
    e_bin_edges = np.append(e_bin, 1.0)
    e_bin_widths = np.diff(e_bin_edges)

    for ss in range(pdfs.shape[0]):

        # # instantiate and fit the KDE model
        # kde = KernelDensity(bandwidth=1e-2, kernel='gaussian')
        # kde.fit(pdfs[ss][:, None], sample_weight=pdf_weights[ss])

        # # score_samples returns the log of the probability density
        # e_bin_kde = np.arange(0, 1.0, 5e-3)
        # logprob = kde.score_samples(e_bin_kde[:, None])
        # prob = np.exp(logprob)
        # prob *= pdf_weights[ss].sum()

        # ax[1].plot(e_bin_kde, prob, 'k-', color='green', linewidth=2, alpha=0.5)

        en, eb = np.histogram(pdfs[ss],
                              bins=e_bin_edges,
                              weights=pdf_weights[ss],
                              density=False)
        en /= e_bin_widths
        ax[1].plot(e_bin + (e_bin_widths / 2.0),
                   en,
                   'k-',
                   linewidth=2,
                   alpha=0.5)

    ax[1].set_xlabel('Eccentricity')
    ax[1].set_ylabel('PDF')
    ax[0].set_ylabel('PDF')

    # ax[0].set_ylim(0, 5)
    ylim1 = ax[1].get_ylim()
    ax[1].set_ylim(0, ylim1[1])

    plt.savefig('dnest_ecc_dist_' + suffix + '.png')

    return
コード例 #28
0
    def traceplot(self, **kwargs):

        dyplot.traceplot(results=self.samples.results, **kwargs)

        self.output.to_figure(structure=None, auto_filename="traceplot")
コード例 #29
0
        nlive_init=1000,
        nlive_batch=100,
        maxiter=maxiter,
        use_stop=False,
        # wt_kwargs={'pfrac': 1.0}
    )

    res = sampler.results
    pickle.dump(res, open(chain_file, "wb"))

t_run = (time.time() - t0) / 60.
print('\n============================================')
print("Sampling took {0:.10f} mins".format(t_run))
print('============================================')

# =====================================================================
# Plots
if plot:
    rfig, raxes = dyplot.runplot(
        res,
        span=[0.0, (0., 1.1), 0.0, (0., 1.05 * np.exp(np.nanmax(res.logz)))])
    plt.savefig(chain_file.replace('.pickle', '_runplot.png'))

    tfig, taxes = dyplot.traceplot(res, labels=labels)
    plt.savefig(chain_file.replace('.pickle', '_traceplot.png'))

    cfig, caxes = dyplot.cornerplot(res, labels=labels, show_titles=True)
    plt.savefig(chain_file.replace('.pickle', '_cornerplot.png'))

del res
コード例 #30
0
def MCMC_diagnostic(path, ndim, p, loglike, ptform, galname, nlive, **pdict):
    pdict = pdict['pdict']
    start = time.time()
    pdict['start'] = start

    if ndim == 10:
        nparams = '_10P'

        sampler = NestedSampler(loglike,
                                ptform,
                                ndim=ndim,
                                nlive=nlive,
                                sample='unif',
                                bound='multi',
                                logl_kwargs=pdict,
                                update_interval=.8,
                                dlogz=0.5,
                                first_update={
                                    'min_ncall': nlive,
                                    'min_eff': 50.
                                },
                                pool=p)
        sampler.run_nested(maxiter=15000, maxcall=50000)
        res1 = sampler.results

        # Save nested data
        # obtain KL divergence
        klds = []
        for i in range(500):
            kld = dyfunc.kld_error(res1, error='simulate')
            klds.append(kld[-1])
        print(np.mean(klds))
        res1['KLval'] = np.mean(klds)
        with open(path + '/result_nested_P' + '{}'.format(nlive) + '.json',
                  'w') as ff:
            ff.write(json.dumps(res1, cls=NumpyEncoder))

        lnz_truth = 10 * -np.log(2 * 30.)
        fig, axes = dyplot.runplot(res1, lnz_truth=lnz_truth)
        plt.savefig(path + '/runplot_' + galname + nparams + '.png')
        plt.close()

        fig, axes = dyplot.traceplot(
            res1,
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),
            truth_color='black',
            show_titles=True,
            trace_cmap='viridis',
            connect=True,
            smooth=0.02,
            connect_highlight=range(8),
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/traceplot_' + galname + nparams + '.png')
        plt.close()
        # initialize figure
        fig, axes = plt.subplots(2, 3, figsize=(15, 10))

        # plot 6 snapshots over the course of the run
        for i, a in enumerate(axes.flatten()):
            it = int((i + 1) * res1.niter / 8.)
            # overplot the result onto each subplot
            temp = dyplot.boundplot(res1,
                                    dims=(0, 1),
                                    it=it,
                                    prior_transform=ptform,
                                    max_n_ticks=3,
                                    show_live=True,
                                    span=[(70, 150), (70, 150)],
                                    fig=(fig, a))
            a.set_title('Iteration {0}'.format(it), fontsize=26)
        fig.tight_layout()
        plt.savefig(path + '/boundplot_' + galname + nparams + '.png')
        plt.close()

        fg, ax = dyplot.cornerplot(
            res1,
            color='blue',
            truths=np.array([
                pdict['vrot'][0], pdict['vrot'][1], pdict['vrot'][2],
                pdict['vrot'][3], pdict['vdisp'][0], pdict['vdisp'][1],
                pdict['vdisp'][2], pdict['vdisp'][3], pdict['inc'],
                pdict['phi']
            ]),  # 91.8,98.3,8.88,6.5,60,60
            truth_color='black',
            show_titles=True,
            smooth=0.02,
            max_n_ticks=5,
            quantiles=None,
            labels=[
                r'$v_{rot,225}$', r'$v_{rot,450}$', r'$v_{rot,675}$',
                r'$v_{rot,900}$', r'$\sigma_{225}$', r'$\sigma_{450}$',
                r'$\sigma_{675}$', r'$\sigma_{900}$', r'$i$', r'$\phi$'
            ])

        plt.savefig(path + '/cornerplot_' + galname + nparams + '.png')
        plt.close()

        with open(path + '/' + galname + '.txt', 'w+') as f:
            f.write('Running took: {} hours'.format(
                (time.time() - start) / 3600))

    # Save the model data
    samples, weights = res1.samples, np.exp(res1.logwt - res1.logz[-1])
    mean, cov = dyfunc.mean_and_cov(samples, weights)
    MaP = res1['samples'][res1['logl'].tolist().index(
        max(res1['logl'].tolist()))]
    quantiles = [
        dyfunc.quantile(samps, [0.025, 0.5, 0.975], weights=weights)
        for samps in samples.T
    ]
    print(quantiles)

    # vrotsigma
    sigmavrot1_l = [i for i in samples[:, 0] if (i - MaP[0]) < 0]
    sigmavrot1_r = [i for i in samples[:, 0] if (i - MaP[0]) > 0]
    sigmavrot2_l = [i for i in samples[:, 1] if (i - MaP[1]) < 0]
    sigmavrot2_r = [i for i in samples[:, 1] if (i - MaP[1]) > 0]
    sigmavrot3_l = [i for i in samples[:, 2] if (i - MaP[2]) < 0]
    sigmavrot3_r = [i for i in samples[:, 2] if (i - MaP[2]) > 0]
    sigmavrot4_l = [i for i in samples[:, 3] if (i - MaP[3]) < 0]
    sigmavrot4_r = [i for i in samples[:, 3] if (i - MaP[3]) > 0]

    if len(sigmavrot1_l) == 0: sigmavrot1_l.append(0)
    if len(sigmavrot1_r) == 0: sigmavrot1_r.append(0)
    if len(sigmavrot2_l) == 0: sigmavrot2_l.append(0)
    if len(sigmavrot2_r) == 0: sigmavrot2_r.append(0)
    if len(sigmavrot3_l) == 0: sigmavrot3_l.append(0)
    if len(sigmavrot3_r) == 0: sigmavrot3_r.append(0)
    if len(sigmavrot4_l) == 0: sigmavrot4_l.append(0)
    if len(sigmavrot4_r) == 0: sigmavrot4_r.append(0)

    # vdispsigma
    sigmavdisp1_l = [i for i in samples[:, 4] if (i - MaP[4]) < 0]
    sigmavdisp1_r = [i for i in samples[:, 4] if (i - MaP[4]) > 0]
    sigmavdisp2_l = [i for i in samples[:, 5] if (i - MaP[5]) < 0]
    sigmavdisp2_r = [i for i in samples[:, 5] if (i - MaP[5]) > 0]
    sigmavdisp3_l = [i for i in samples[:, 6] if (i - MaP[6]) < 0]
    sigmavdisp3_r = [i for i in samples[:, 6] if (i - MaP[6]) > 0]
    sigmavdisp4_l = [i for i in samples[:, 7] if (i - MaP[7]) < 0]
    sigmavdisp4_r = [i for i in samples[:, 7] if (i - MaP[7]) > 0]

    if len(sigmavdisp1_l) == 0: sigmavdisp1_l.append(0)
    if len(sigmavdisp1_r) == 0: sigmavdisp1_r.append(0)
    if len(sigmavdisp2_l) == 0: sigmavdisp2_l.append(0)
    if len(sigmavdisp2_r) == 0: sigmavdisp2_r.append(0)
    if len(sigmavdisp3_l) == 0: sigmavdisp3_l.append(0)
    if len(sigmavdisp3_r) == 0: sigmavdisp3_r.append(0)
    if len(sigmavdisp4_l) == 0: sigmavdisp4_l.append(0)
    if len(sigmavdisp4_r) == 0: sigmavdisp4_r.append(0)

    pdict['sigmavrot'] = [(np.std(sigmavrot1_l), np.std(sigmavrot1_r)),
                          (np.std(sigmavrot2_l), np.std(sigmavrot2_r)),
                          (np.std(sigmavrot3_l), np.std(sigmavrot3_r)),
                          (np.std(sigmavrot4_l), np.std(sigmavrot4_r))]
    pdict['sigmavdisp'] = [(np.std(sigmavdisp1_l), np.std(sigmavdisp1_r)),
                           (np.std(sigmavdisp2_l), np.std(sigmavdisp2_r)),
                           (np.std(sigmavdisp3_l), np.std(sigmavdisp3_r)),
                           (np.std(sigmavdisp4_l), np.std(sigmavdisp4_r))]

    if len(MaP) == 8:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]

    if len(MaP) == 9:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

    if len(MaP) == 10:
        pdict['vrot'] = MaP[0:4]
        pdict['vdisp'] = MaP[4:8]
        pdict['inc'] = MaP[8]
        pdict['phi'] = MaP[9]

        # inc
        sigmainc_l = [i for i in samples[:, 8] if (i - MaP[8]) < 0]
        sigmainc_r = [i for i in samples[:, 8] if (i - MaP[8]) > 0]
        if len(sigmainc_l) == 0: sigmainc_l.append(0)
        if len(sigmainc_r) == 0: sigmainc_r.append(0)
        pdict['sigmainc'] = [(np.std(sigmainc_l), np.std(sigmainc_r))]

        # phi
        sigmaphi_l = [i for i in samples[:, 9] if (i - MaP[9]) < 0]
        sigmaphi_r = [i for i in samples[:, 9] if (i - MaP[9]) > 0]
        if len(sigmaphi_l) == 0: sigmaphi_l.append(0)
        if len(sigmaphi_r) == 0: sigmaphi_r.append(0)
        pdict['sigmaphi'] = [(np.std(sigmaphi_l), np.std(sigmaphi_r))]

    # We don't need data entry
    pdict['Data'] = None
    with open(path + '/params_model.json', 'w') as f:
        f.write(json.dumps(pdict, cls=NumpyEncoder))
コード例 #31
0
def ns_output(datadir):
    '''
    Inputs:
    -------
    datadir : str
        the working directory for allesfitter
        must contain all the data files
        output directories and files will also be created inside datadir
            
    Outputs:
    --------
    This will output information into the console, and create a output files 
    into datadir/results/ (or datadir/QL/ if QL==True)    
    '''
    config.init(datadir)

    #::: security check
    if os.path.exists(os.path.join(config.BASEMENT.outdir, 'ns_fit.jpg')):
        overwrite = raw_input('Nested Sampling output files already exists in '+config.BASEMENT.outdir+'.\n'+\
                              'What do you want to do?\n'+\
                              '1 : overwrite the output files\n'+\
                              '2 : abort\n')
        if (overwrite == '1'):
            pass
        else:
            raise ValueError('User aborted operation.')

    #::: load the save_ns.pickle
    with open(os.path.join(config.BASEMENT.outdir, 'save_ns.pickle'),
              'rb') as f:
        results = pickle.load(f)

    #::: plot the fit
    samples = draw_ns_samples(results,
                              Nsamples=20)  #only 20 samples for plotting
    for planet in config.BASEMENT.settings['planets_all']:
        fig, axes = afplot(samples, planet)
        fig.savefig(os.path.join(config.BASEMENT.outdir,
                                 'ns_fit_' + planet + '.jpg'),
                    dpi=100,
                    bbox_inches='tight')
        plt.close(fig)

    #::: output the results
    logprint('\nResults:')
    logprint('--------------------------')
    #    print(results.summary())
    samples = draw_ns_samples(results)  #all samples
    #    plt.figure()
    #    plt.plot(np.arange(len(samples[:,0])), samples[:,0])
    #    plt.show()
    #    plt.figure()
    #    plt.plot(np.arange(len(samples[:,1])), samples[:,1])
    #    plt.show()
    logZdynesty = results.logz[-1]  # value of logZ
    logZerrdynesty = results.logzerr[
        -1]  # estimate of the statistcal uncertainty on logZ
    logprint('log(Z) = {} +- {}'.format(logZdynesty, logZerrdynesty))
    logprint('Nr. of posterior samples: {}'.format(len(samples)))

    #::: plot all the diagnositc plots
    #    rfig, raxes = dyplot.runplot(results)
    #    rfig.savefig( os.path.join(config.BASEMENT.outdir,'ns_run.jpg'), dpi=100, bbox_inches='tight' )
    #    plt.close(rfig)

    #::: make pretty titles for the plots
    labels, units = [], []
    for i, l in enumerate(config.BASEMENT.fitlabels):
        labels.append(str(config.BASEMENT.fitlabels[i]))
        units.append(str(config.BASEMENT.fitunits[i]))

    results2 = results.copy()
    params_median, params_ll, params_ul = get_params_from_samples(
        results['samples'])

    for planet in config.BASEMENT.settings['planets_all']:

        #        if planet+'_period' in config.BASEMENT.fitkeys:
        #            ind    = np.where(config.BASEMENT.fitkeys==planet+'_period')[0][0]
        #            results2['samples'][:,ind] -= np.round(params_median[planet+'_period'],decimals=3)
        #            units[ind] = str(units[ind]+'-'+np.format_float_positional(params_median[planet+'_period'],3)+'d')

        if planet + '_epoch' in config.BASEMENT.fitkeys:
            ind = np.where(config.BASEMENT.fitkeys == planet + '_epoch')[0][0]
            results2['samples'][:, ind] -= int(
                params_median[planet + '_epoch']
            )  #np.round(params_median[planet+'_epoch'],decimals=0)
            units[ind] = str(
                units[ind] + '-' + str(int(params_median[planet + '_epoch'])) +
                'd'
            )  #np.format_float_positional(params_median[planet+'_epoch'],0)+'d')
            config.BASEMENT.fittruths[ind] -= int(params_median[planet +
                                                                '_epoch'])

    for i, l in enumerate(labels):
        if units[i] != '':
            labels[i] = str(labels[i] + ' (' + units[i] + ')')

    #::: traceplot
    cmap = truncate_colormap('Greys', minval=0.2, maxval=0.8, n=256)
    tfig, taxes = dyplot.traceplot(results2,
                                   labels=labels,
                                   truths=config.BASEMENT.fittruths,
                                   post_color='grey',
                                   trace_cmap=[cmap] * config.BASEMENT.ndim)
    plt.tight_layout()

    #::: cornerplot
    cfig, caxes = dyplot.cornerplot(results2,
                                    labels=labels,
                                    truths=config.BASEMENT.fittruths,
                                    hist_kwargs={
                                        'alpha': 0.25,
                                        'linewidth': 0,
                                        'histtype': 'stepfilled'
                                    })

    #::: set allesfitter titles
    for i, key in enumerate(config.BASEMENT.fitkeys):
        params_median, params_ll, params_ul = get_params_from_samples(
            results2['samples'])
        value = round_tex(params_median[key], params_ll[key], params_ul[key])
        ttitle = r'' + labels[i] + r'$=' + value + '$'
        taxes[i, 1].set_title(ttitle)
        ctitle = r'' + labels[i] + '\n' + r'$=' + value + '$'
        caxes[i, i].set_title(ctitle)
    for i in range(caxes.shape[0]):
        for j in range(caxes.shape[1]):
            caxes[i, j].xaxis.set_label_coords(0.5, -0.5)
            caxes[i, j].yaxis.set_label_coords(-0.5, 0.5)

    #::: save and close the trace- and cornerplot
    tfig.savefig(os.path.join(config.BASEMENT.outdir, 'ns_trace.jpg'),
                 dpi=100,
                 bbox_inches='tight')
    plt.close(tfig)
    cfig.savefig(os.path.join(config.BASEMENT.outdir, 'ns_corner.jpg'),
                 dpi=100,
                 bbox_inches='tight')
    plt.close(cfig)

    #::: save the tables
    save_table(samples, 'ns')
    save_latex_table(samples, 'ns')

    #::: derive values (using stellar parameters from params_star.csv)
    if os.path.exists(os.path.join(config.BASEMENT.datadir,
                                   'params_star.csv')):
        deriver.derive(samples, 'ns')
    else:
        print(
            'File "params_star.csv" not found. Cannot derive final parameters.'
        )

    logprint('Done. For all outputs, see', config.BASEMENT.outdir)