コード例 #1
0
ファイル: nestle.py プロジェクト: lscsoft/bilby
    def run_sampler(self):
        """ Runs Nestle sampler with given kwargs and returns the result

        Returns
        =======
        bilby.core.result.Result: Packaged information about the result

        """
        import nestle
        out = nestle.sample(
            loglikelihood=self.log_likelihood,
            prior_transform=self.prior_transform,
            ndim=self.ndim, **self.kwargs)
        print("")

        self.result.sampler_output = out
        self.result.samples = nestle.resample_equal(out.samples, out.weights)
        self.result.nested_samples = DataFrame(
            out.samples, columns=self.search_parameter_keys)
        self.result.nested_samples['weights'] = out.weights
        self.result.nested_samples['log_likelihood'] = out.logl
        self.result.log_likelihood_evaluations = self.reorder_loglikelihoods(
            unsorted_loglikelihoods=out.logl, unsorted_samples=out.samples,
            sorted_samples=self.result.samples)
        self.result.log_evidence = out.logz
        self.result.log_evidence_err = out.logzerr
        self.result.information_gain = out.h
        self.calc_likelihood_count()
        return self.result
コード例 #2
0
ファイル: runtests.py プロジェクト: bd-j/nestle
def test_resample_equal():

    N = 1000

    # N randomly weighted samples
    x = np.arange(N).reshape((N, 1))
    w = np.random.random((N,))
    w /= w.sum()

    new_x = nestle.resample_equal(x, w)
    
    # Each original sample should appear in the final sample either
    # floor(w * N) or ceil(w * N) times.
    for i in range(N):
        num = (new_x == x[i]).sum()  # number of times x[i] appears in new_x
        assert math.floor(w[i]*N) <= num <= math.ceil(w[i]*N)
コード例 #3
0
def test_resample_equal():

    N = 1000

    # N randomly weighted samples
    x = np.arange(N).reshape((N, 1))
    w = np.random.random((N, ))
    w /= w.sum()

    new_x = nestle.resample_equal(x, w)

    # Each original sample should appear in the final sample either
    # floor(w * N) or ceil(w * N) times.
    for i in range(N):
        num = (new_x == x[i]).sum()  # number of times x[i] appears in new_x
        assert math.floor(w[i] * N) <= num <= math.ceil(w[i] * N)
コード例 #4
0
ファイル: retriever.py プロジェクト: exowanderer/platon
    def run_multinest(self, wavelength_bins, depths, errors, fit_info,
                      include_condensation=True, plot_best=False,
                      **nestle_kwargs):
        '''Runs nested sampling to retrieve atmospheric parameters.

        Parameters
        ----------
        wavelength_bins : array_like, shape (N,2)
            Wavelength bins, where wavelength_bins[i][0] is the start
            wavelength and wavelength_bins[i][1] is the end wavelength for
            bin i.
        depths : array_like, length N
            Measured transit depths for the specified wavelength bins
        errors : array_like, length N
            Errors on the aforementioned transit depths
        fit_info : :class:`.FitInfo` object
            Tells us what parameters to
            freely vary, and in what range those parameters can vary. Also
            sets default values for the fixed parameters.
        include_condensation : bool, optional
            When determining atmospheric abundances, whether to include
            condensation.
        plot_best : bool, optional
            If True, plots the best fit model with the data
        **nestle_kwargs : keyword arguments to pass to nestle's sample method

        Returns
        -------
        result : Result object
            This returns the object returned by nestle.sample  The object is
            dictionary-like and has many useful items.  For example,
            result.samples (or alternatively, result["samples"]) are the
            parameter values of each sample, result.weights contains the
            weights, and result.logl contains the log likelihoods.  result.logz
            is the natural logarithm of the evidence.
        '''
        calculator = TransitDepthCalculator(
            include_condensation=include_condensation)
        calculator.change_wavelength_bins(wavelength_bins)
        self._validate_params(fit_info, calculator)

        def transform_prior(cube):
            new_cube = np.zeros(len(cube))
            for i in range(len(cube)):
                new_cube[i] = fit_info._from_unit_interval(i, cube[i])
            return new_cube

        def multinest_ln_prob(cube):
            return self._ln_prob(cube, calculator, fit_info, depths, errors)

        def callback(callback_info):
            print(callback_info["it"], callback_info["logz"],
                  transform_prior(callback_info["active_u"][0]))

        result = nestle.sample(
            multinest_ln_prob, transform_prior, fit_info._get_num_fit_params(),
            callback=callback, method='multi', **nestle_kwargs)

        best_params_arr = result.samples[np.argmax(result.logl)]
        
        write_param_estimates_file(
            nestle.resample_equal(result.samples, result.weights),
            best_params_arr,
            np.max(result.logl),
            fit_info.fit_param_names)

        if plot_best:
            self._ln_prob(best_params_arr, calculator, fit_info,
                          depths, errors, plot=True)
        return result
コード例 #5
0
ファイル: silke.py プロジェクト: akutkin/SACA
def fit_model_with_nestle(uv_fits, model_file, components_priors, outdir=None,
                          **nestle_kwargs):
    """
    :param uv_fits:
        Path to uv-fits file with self-calibrated visibilities.
    :param model_file:
        Path to file with difmap model.
    :param components_priors:
        Components prior's ppf. Close to phase center component goes first.
        Iterable of dicts with keys - name of the parameter and values -
        (callable, args, kwargs,) where args & kwargs - additional arguments to
        callable. Each callable is called callable.ppf(p, *args, **kwargs).
        Thus callable should has ``ppf`` method.

        Example of prior on single component:
            {'flux': (scipy.stats.uniform.ppf, [0., 10.], dict(),),
             'bmaj': (scipy.stats.uniform.ppf, [0, 5.], dict(),),
             'e': (scipy.stats.beta.ppf, [alpha, beta], dict(),)}
        First key will result in calling: scipy.stats.uniform.ppf(u, 0, 10) as
        value from prior for ``flux`` parameter.
    :param outdir: (optional)
        Directory to output results. If ``None`` then use cwd. (default:
        ``None``)
    :param nestle_kwargs: (optional)
        Any arguments passed to ``nestle.sample`` function.

    :return
        Results of ``nestle.sample`` work on that model.
    """
    if outdir is None:
        outdir = os.getcwd()

    mdl_file = model_file
    uv_data = UVData(uv_fits)
    mdl_dir, mdl_fname = os.path.split(mdl_file)
    comps = import_difmap_model(mdl_fname, mdl_dir)

    # Sort components by distance from phase center
    comps = sorted(comps, key=lambda x: np.sqrt(x.p[1]**2 + x.p[2]**2))

    ppfs = list()
    labels = list()
    for component_prior in components_priors:
        for comp_name in ('flux', 'x', 'y', 'bmaj', 'e', 'bpa'):
            try:
                ppfs.append(_function_wrapper(*component_prior[comp_name]))
                labels.append(comp_name)
            except KeyError:
                pass

    for ppf in ppfs:
        print(ppf.args)

    hypercube = hypercube_partial(ppfs)

    # Create model
    mdl = Model(stokes=stokes)
    # Add components to model
    mdl.add_components(*comps)
    loglike = LnLikelihood(uv_data, mdl)
    time0 = time.time()
    result = nestle.sample(loglikelihood=loglike, prior_transform=hypercube,
                           ndim=mdl.size, npoints=50, method='multi',
                           callback=nestle.print_progress, **nestle_kwargs)
    print("Time spent : {}".format(time.time()-time0))
    samples = nestle.resample_equal(result.samples, result.weights)
    # Save re-weighted samples from posterior to specified ``outdir``
    # directory
    np.savetxt(os.path.join(outdir, 'samples.txt'), samples)
    fig = corner.corner(samples, show_titles=True, labels=labels,
                        quantiles=[0.16, 0.5, 0.84], title_fmt='.3f')
    # Save corner plot os samples from posterior to specified ``outdir``
    # directory
    fig.savefig(os.path.join(outdir, "corner.png"), bbox_inches='tight',
                dpi=200)

    return result
コード例 #6
0
ファイル: run_model.py プロジェクト: abhisrkckl/opha
def plot_posterior(model,
                   data,
                   result,
                   display_params,
                   save_prefix,
                   nbins=15,
                   z=0.306):

    ndim = model.N_PARAMS

    samples = result.samples.copy()
    for iparam in range(ndim):
        unit_frac = display_params[iparam][1]
        shift = display_params[iparam][3]
        samples[:, iparam] /= unit_frac
        samples[:, iparam] -= shift

    plot_labels = [
        make_plot_label(*display_param) for display_param in display_params
    ]

    #print plot_labels
    #means, cov = nestle.mean_and_cov(result.samples, result.weights)
    #print "Estimated parameters :"
    #for ipar, mean in enumerate(means):
    #    print latex.latex_to_text(plot_labels[ipar]), "=", mean, "+/-", cov[ipar,ipar]

    corner.corner(
        samples,
        weights=result.weights,
        quantiles=[0.0455, 0.5, 0.9545],
        bins=nbins,
        labels=plot_labels,
        label_kwargs={
            "labelpad": 100,
            "fontsize": 12
        },
        #show_titles=True,
        range=[0.99999999999999] * ndim,
        use_math_text=True,
        title_fmt="0.3f")

    #mean, cov = nestle.mean_and_cov(result.samples, result.weights)
    idx_t0 = model.N_STATE_PARAMS - 1
    #t0 = mean[idx_t0]
    data_x, data_y, data_yerr = data
    #outburst_time_mean = model.outburst_times(mean, data_x, 1e-14, 1e-14, 0.1)
    #outburst_time_mean_z = t0 + (outburst_time_mean-t0)*(1+z)
    #outburst_time_samples_yr = np.zeros_like(outburst_time_samples)
    #chisq = sum(((data_y-outburst_time_mean_z)/data_yerr)**2) / (len(data_x)-len(mean))

    ##################

    # Inset showing timing residuals.

    samples_new = nestle.resample_equal(result.samples, result.weights)
    outburst_time_samples = model.outburst_times_x(samples_new, data[0], 1e-14,
                                                   1e-14, 0.1)
    t0s = samples_new[:, idx_t0]
    outburst_time_samples_yr = np.zeros_like(outburst_time_samples)
    for idx, (tob_sample, t0) in enumerate(zip(outburst_time_samples, t0s)):
        outburst_time_samples_yr[idx] = (t0 + (tob_sample - t0) *
                                         (1 + z)) / year
    tob_pred_means = np.mean(outburst_time_samples_yr, axis=0)
    tob_pred_stds = np.std(outburst_time_samples_yr, axis=0)

    chisq = 0
    plt.subplot(4, 3, 3)
    #plt.errorbar(data_y/year, (data_y-outburst_time_mean_z)/day, data_yerr/day, fmt='+', label="$\\chi^2/\\nu = %f$"%(chisq))
    plt.errorbar(data_y / year,
                 np.zeros_like(data_y),
                 tob_pred_stds * year / day,
                 label="$\\chi^2/\\nu = %f$" % (chisq),
                 elinewidth=5,
                 fmt=".")
    plt.errorbar(data_y / year, (data_y - tob_pred_means * year) / day,
                 data_yerr / day,
                 fmt='+',
                 label="$\\chi^2/\\nu = %f$" % (chisq),
                 elinewidth=2.5)
    plt.xlabel("$t_{ob}$ (yr)")
    plt.ylabel("Residuals (day)")
    #plt.legend()
    plt.grid()

    outburst_time_samples_all = model.outburst_times_x(
        samples_new, np.pi * np.arange(5, 25), 1e-14, 1e-14, 0.1)
    outburst_time_samples_yr_all = np.zeros_like(outburst_time_samples_all)
    for idx, (tob_sample, t0) in enumerate(zip(outburst_time_samples_all,
                                               t0s)):
        outburst_time_samples_yr_all[idx] = (t0 + (tob_sample - t0) *
                                             (1 + z)) / year
    tob_pred_means_all = np.mean(outburst_time_samples_yr_all, axis=0)
    tob_pred_stds_all = np.std(outburst_time_samples_yr_all, axis=0)
    print tob_pred_means_all
    """
    mean, cov = nestle.mean_and_cov(samples, result.weights)
    std = np.diag(cov)
    plt.subplot(3,3,6)
    params_text = ""
    for m,s, label in zip(mean,std,plot_labels):
        params_text += "%s = %0.6f $\\pm$ %0.6f\n"%(label,m,s)
    plt.text(0.1,0.1,params_text)
    plt.axis('off')
    """

    plt.savefig(save_prefix + "_post.pdf")