Пример #1
0
def boot_to_posterior(values, weights):
    q = [0.5 - 0.5 * 0.999999426697, 0.5 + 0.5 * 0.999999426697]
    span = _quantile(values, q, weights=weights)

    s = 0.02

    bins = int(round(10. / 0.02))
    n, b = np.histogram(values, bins=bins, weights=weights,range=np.sort(span))
    n = norm_kde(n, 20.)
    x0 = 0.5 * (b[1:] + b[:-1])
    y0 = n
    
    return x0, y0 / np.trapz(y0,x0)
Пример #2
0
def Get_posterior(sample, logwt, logz):
    weight = np.exp(logwt - logz[-1])

    q = [0.5 - 0.5 * 0.999999426697, 0.5 + 0.5 * 0.999999426697]
    span = _quantile(sample.T, q, weights=weight)

    s = 0.02

    bins = int(round(10. / 0.02))
    n, b = np.histogram(sample, bins=bins, weights=weight, range=np.sort(span))
    n = norm_kde(n, 10.)
    x0 = 0.5 * (b[1:] + b[:-1])
    y0 = n

    return x0, y0 / np.trapz(y0, x0)
Пример #3
0
def plot_kde(arr, name, lab, bins, outname):
	n, b = np.histogram(arr, bins = bins, density = True)
	n = norm_kde(n, 10.)
	x = 0.5 * (b[1:] + b[:-1])
	y = n
	plt.fill_between(x, y, color='b', alpha = 0.6)
	plt.xlabel(lab)
	plt.ylabel("(Integral-normalized) Probability Density")
	plt.xlim(min(x), max(x))
	plt.ylim(0, max(y)*1.05)
	if outname == None:
		plt.show()
	else:
		plt.savefig(outname + name)
		plt.close('all')
	return None
Пример #4
0
def plot_pdf(ax,samples,weights):

    # smoothing routine from dynesty
    bins = int(round(10. / 0.02))
    n, b = np.histogram(samples, bins=bins, weights=weights, range=[samples.min(),samples.max()],density=True)
    n = norm_kde(n, 10.)
    x0 = 0.5 * (b[1:] + b[:-1])
    y0 = n
    ax.fill_between(x0, y0/y0.max(), color=colors['samples'], alpha=alpha_posterior)

    # error bars
    qvalues = quantile(samples,np.array([0.16, 0.50, 0.84]),weights=weights)
    q_m = qvalues[1]-qvalues[0]
    q_p = qvalues[2]-qvalues[1]
    fmt = "{{0:{0}}}".format(".1f").format
    title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
    title = title.format(fmt(float(qvalues[1])), fmt(float(q_m)), fmt(float(q_p)))
    ax.set_title(title, va='center', fontsize=fs_global-2)

    return y0.max()
Пример #5
0
def traceplot(results, span=None, quantiles=[0.025, 0.5, 0.975], smooth=0.02,
              post_color='blue', post_kwargs=None, kde=True, nkde=1000,
              trace_cmap='plasma', trace_color=None, trace_kwargs=None,
              connect=False, connect_highlight=10, connect_color='red',
              connect_kwargs=None, max_n_ticks=5, use_math_text=False,
              labels=None, label_kwargs=None,
              show_titles=False, title_fmt=".2f", title_kwargs=None,
              truths=None, truth_color='red', truth_kwargs=None,
              verbose=False, fig=None):
    """Plot traces and marginalized posteriors for each parameter.

    Parameters
    ----------
    results : `~dynesty.results.Results` instance
        A `~dynesty.results.Results` instance from a nested
        sampling run. **Compatible with results derived from**
        `nestle <http://kylebarbary.com/nestle/>`_.
    span : iterable with shape (ndim,), optional
        A list where each element is either a length-2 tuple containing
        lower and upper bounds or a float from `(0., 1.]` giving the
        fraction of (weighted) samples to include. If a fraction is provided,
        the bounds are chosen to be equal-tailed. An example would be::
            span = [(0., 10.), 0.95, (5., 6.)]
        Default is `0.999999426697` (5-sigma credible interval) for each
        parameter.
    quantiles : iterable, optional
        A list of fractional quantiles to overplot on the 1-D marginalized
        posteriors as vertical dashed lines. Default is `[0.025, 0.5, 0.975]`
        (the 95%/2-sigma credible interval).
    smooth : float or iterable with shape (ndim,), optional
        The standard deviation (either a single value or a different value for
        each subplot) for the Gaussian kernel used to smooth the 1-D
        marginalized posteriors, expressed as a fraction of the span.
        Default is `0.02` (2% smoothing). If an integer is provided instead,
        this will instead default to a simple (weighted) histogram with
        `bins=smooth`.
    post_color : str or iterable with shape (ndim,), optional
        A `~matplotlib`-style color (either a single color or a different
        value for each subplot) used when plotting the histograms.
        Default is `'blue'`.
    post_kwargs : dict, optional
        Extra keyword arguments that will be used for plotting the
        marginalized 1-D posteriors.
    kde : bool, optional
        Whether to use kernel density estimation to estimate and plot
        the PDF of the importance weights as a function of log-volume
        (as opposed to the importance weights themselves). Default is
        `True`.
    nkde : int, optional
        The number of grid points used when plotting the kernel density
        estimate. Default is `1000`.
    trace_cmap : str or iterable with shape (ndim,), optional
        A `~matplotlib`-style colormap (either a single colormap or a
        different colormap for each subplot) used when plotting the traces,
        where each point is colored according to its weight. Default is
        `'plasma'`.
    trace_color : str or iterable with shape (ndim,), optional
        A `~matplotlib`-style color (either a single color or a
        different color for each subplot) used when plotting the traces.
        This overrides the `trace_cmap` option by giving all points
        the same color. Default is `None` (not used).
    trace_kwargs : dict, optional
        Extra keyword arguments that will be used for plotting the traces.
    connect : bool, optional
        Whether to draw lines connecting the paths of unique particles.
        Default is `False`.
    connect_highlight : int or iterable, optional
        If `connect=True`, highlights the paths of a specific set of
        particles. If an integer is passed, :data:`connect_highlight`
        random particle paths will be highlighted. If an iterable is passed,
        then the particle paths corresponding to the provided indices
        will be highlighted.
    connect_color : str, optional
        The color of the highlighted particle paths. Default is `'red'`.
    connect_kwargs : dict, optional
        Extra keyword arguments used for plotting particle paths.
    max_n_ticks : int, optional
        Maximum number of ticks allowed. Default is `5`.
    use_math_text : bool, optional
        Whether the axis tick labels for very large/small exponents should be
        displayed as powers of 10 rather than using `e`. Default is `False`.
    labels : iterable with shape (ndim,), optional
        A list of names for each parameter. If not provided, the default name
        used when plotting will follow :math:`x_i` style.
    label_kwargs : dict, optional
        Extra keyword arguments that will be sent to the
        `~matplotlib.axes.Axes.set_xlabel` and
        `~matplotlib.axes.Axes.set_ylabel` methods.
    show_titles : bool, optional
        Whether to display a title above each 1-D marginalized posterior
        showing the 0.5 quantile along with the upper/lower bounds associated
        with the 0.025 and 0.975 (95%/2-sigma credible interval) quantiles.
        Default is `True`.
    title_fmt : str, optional
        The format string for the quantiles provided in the title. Default is
        `'.2f'`.
    title_kwargs : dict, optional
        Extra keyword arguments that will be sent to the
        `~matplotlib.axes.Axes.set_title` command.
    truths : iterable with shape (ndim,), optional
        A list of reference values that will be overplotted on the traces and
        marginalized 1-D posteriors as solid horizontal/vertical lines.
        Individual values can be exempt using `None`. Default is `None`.
    truth_color : str or iterable with shape (ndim,), optional
        A `~matplotlib`-style color (either a single color or a different
        value for each subplot) used when plotting `truths`.
        Default is `'red'`.
    truth_kwargs : dict, optional
        Extra keyword arguments that will be used for plotting the vertical
        and horizontal lines with `truths`.
    verbose : bool, optional
        Whether to print the values of the computed quantiles associated with
        each parameter. Default is `False`.
    fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
        If provided, overplot the traces and marginalized 1-D posteriors
        onto the provided figure. Otherwise, by default an
        internal figure is generated.

    Returns
    -------
    traceplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
        Output trace plot.

    """
    # Initialize values.
    if title_kwargs is None:
        title_kwargs = dict()
    if label_kwargs is None:
        label_kwargs = dict()
    if trace_kwargs is None:
        trace_kwargs = dict()
    if connect_kwargs is None:
        connect_kwargs = dict()
    if post_kwargs is None:
        post_kwargs = dict()
    if truth_kwargs is None:
        truth_kwargs = dict()

    # Set defaults.
    connect_kwargs['alpha'] = connect_kwargs.get('alpha', 0.7)
    post_kwargs['alpha'] = post_kwargs.get('alpha', 0.6)
    trace_kwargs['s'] = trace_kwargs.get('s', 3)
    truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
    truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)

    # Extract weighted samples.
    samples = results['samples']
    logvol = results['logvol']
    try:
        weights = np.exp(results['logwt'] - results['logz'][-1])
    except:
        weights = results['weights']

    wts = weights
    if kde:
        try:
            from scipy.ndimage import gaussian_filter as norm_kde
            from scipy.stats import gaussian_kde
            # Derive kernel density estimate.
            wt_kde = gaussian_kde(resample_equal(-logvol, weights))  # KDE
            logvol_grid = np.linspace(logvol[0], logvol[-1], nkde)  # resample
            wt_grid = wt_kde.pdf(-logvol_grid)  # evaluate KDE PDF
            wts = np.interp(-logvol, -logvol_grid, wt_grid)  # interpolate
        except ImportError:
            kde = False

    # Deal with 1D results. A number of extra catches are also here
    # in case users are trying to plot other results besides the `Results`
    # instance generated by `dynesty`.
    samples = np.atleast_1d(samples)
    if len(samples.shape) == 1:
        samples = np.atleast_2d(samples)
    else:
        assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
        samples = samples.T
    assert samples.shape[0] <= samples.shape[1], "There are more " \
                                                 "dimensions than samples!"
    ndim, nsamps = samples.shape

    # Check weights.
    if weights.ndim != 1:
        raise ValueError("Weights must be 1-D.")
    if nsamps != weights.shape[0]:
        raise ValueError("The number of weights and samples disagree!")

    # Check ln(volume).
    if logvol.ndim != 1:
        raise ValueError("Ln(volume)'s must be 1-D.")
    if nsamps != logvol.shape[0]:
        raise ValueError("The number of ln(volume)'s and samples disagree!")

    # Check sample IDs.
    if connect:
        try:
            samples_id = results['samples_id']
            uid = np.unique(samples_id)
        except:
            raise ValueError("Sample IDs are not defined!")
        try:
            ids = connect_highlight[0]
            ids = connect_highlight
        except:
            ids = np.random.choice(uid, size=connect_highlight, replace=False)

    # Determine plotting bounds for marginalized 1-D posteriors.
    if span is None:
        span = [0.999999426697 for i in range(ndim)]
    span = list(span)
    if len(span) != ndim:
        raise ValueError("Dimension mismatch between samples and span.")
    for i, _ in enumerate(span):
        try:
            xmin, xmax = span[i]
        except:
            q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
            span[i] = _quantile(samples[i], q, weights=weights)

    # Setting up labels.
    if labels is None:
        labels = [r"$x_{"+str(i+1)+"}$" for i in range(ndim)]

    # Setting up smoothing.
    if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
        smooth = [smooth for i in range(ndim)]

    # Setting up default plot layout.
    if fig is None:
        fig, axes = pl.subplots(ndim, 2, figsize=(12, 3*ndim))
    else:
        fig, axes = fig
        try:
            axes.reshape(ndim, 2)
        except:
            raise ValueError("Provided axes do not match the required shape "
                             "for plotting samples.")

    # Plotting.
    for i, x in enumerate(samples):

        # Plot trace.

        # Establish axes.
        if np.shape(samples)[0] == 1:
            ax = axes[1]
        else:
            ax = axes[i, 0]
        # Set color(s)/colormap(s).
        if trace_color is not None:
            if isinstance(trace_color, str_type):
                color = trace_color
            else:
                color = trace_color[i]
        else:
            color = wts
        if isinstance(trace_cmap, str_type):
            cmap = trace_cmap
        else:
            cmap = trace_cmap[i]
        # Setup axes.
        ax.set_xlim([0., -min(logvol)])
        ax.set_ylim([min(x), max(x)])
        if max_n_ticks == 0:
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())
        else:
            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
            ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks))
        # Label axes.
        sf = ScalarFormatter(useMathText=use_math_text)
        ax.yaxis.set_major_formatter(sf)
        ax.set_xlabel(r"$-\ln X$", **label_kwargs)
        ax.set_ylabel(labels[i], **label_kwargs)
        # Generate scatter plot.
        ax.scatter(-logvol, x, c=color, cmap=cmap, **trace_kwargs)
        if connect:
            # Add lines highlighting specific particle paths.
            for j in ids:
                sel = (samples_id == j)
                ax.plot(-logvol[sel], x[sel], color=connect_color,
                        **connect_kwargs)
        # Add truth value(s).
        if truths is not None and truths[i] is not None:
            try:
                [ax.axhline(t, color=truth_color, **truth_kwargs)
                 for t in truths[i]]
            except:
                ax.axhline(truths[i], color=truth_color, **truth_kwargs)

        # Plot marginalized 1-D posterior.

        # Establish axes.
        if np.shape(samples)[0] == 1:
            ax = axes[0]
        else:
            ax = axes[i, 1]
        # Set color(s).
        if isinstance(post_color, str_type):
            color = post_color
        else:
            color = post_color[i]
        # Setup axes
        ax.set_xlim(span[i])
        if max_n_ticks == 0:
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())
        else:
            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks))
            ax.yaxis.set_major_locator(NullLocator())
        # Label axes.
        sf = ScalarFormatter(useMathText=use_math_text)
        ax.xaxis.set_major_formatter(sf)
        ax.set_xlabel(labels[i], **label_kwargs)
        # Generate distribution.
        s = smooth[i]
        if isinstance(s, int_type):
            # If `s` is an integer, plot a weighted histogram with
            # `s` bins within the provided bounds.
            n, b, _ = ax.hist(x, bins=s, weights=weights, color=color,
                              range=np.sort(span[i]), **post_kwargs)
            x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
            y0 = np.array(list(zip(n, n))).flatten()
        else:
            # If `s` is a float, oversample the data relative to the
            # smoothing filter by a factor of 10, then use a Gaussian
            # filter to smooth the results.
            if kde:
                bins = int(round(10. / s))
                n, b = np.histogram(x, bins=bins, weights=weights,
                                    range=np.sort(span[i]))
                x0 = 0.5 * (b[1:] + b[:-1])
                n = norm_kde(n, 10.)
                y0 = n
                ax.fill_between(x0, y0, color=color, **post_kwargs)
            else:
                bins = 40
                n, b = np.histogram(x, bins=bins, weights=weights,
                                    range=np.sort(span[i]))
                x0 = 0.5 * (b[1:] + b[:-1])
                y0 = n
                ax.fill_between(x0, y0, color=color, **post_kwargs)
        ax.set_ylim([0., max(y0) * 1.05])
        # Plot quantiles.
        if quantiles is not None and len(quantiles) > 0:
            qs = _quantile(x, quantiles, weights=weights)
            for q in qs:
                ax.axvline(q, lw=2, ls="dashed", color=color)
            if verbose:
                print("Quantiles:")
                print(labels[i], [blob for blob in zip(quantiles, qs)])
        # Add truth value(s).
        if truths is not None and truths[i] is not None:
            try:
                [ax.axvline(t, color=truth_color, **truth_kwargs)
                 for t in truths[i]]
            except:
                ax.axvline(truths[i], color=truth_color, **truth_kwargs)
        # Set titles.
        if show_titles:
            title = None
            if title_fmt is not None:
                ql, qm, qh = _quantile(x, [0.025, 0.5, 0.975], weights=weights)
                q_minus, q_plus = qm - ql, qh - qm
                fmt = "{{0:{0}}}".format(title_fmt).format
                title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
                title = "{0} = {1}".format(labels[i], title)
                ax.set_title(title, **title_kwargs)

    return fig, axes
Пример #6
0
def plot_prior(ax,mod,par,max,runname,nsamp=100000,ssfr_prior=None):
    """plot prior by using information from Prospector prior object
    """
    
    # for some of these we custom sample a prior
    # for most, we use the built-in Prospector prior object
    parsamp = None
    if (par == 'ssfr_100'):
        prior = ssfr_prior
    elif (par == 'mean_age'):

        if runname == 'vis':

            # grab zfraction prior, sample
            logmass = 1 # doesn't matter, but needs definition
            logsfr_prior = mod._config_dict['logsfr_ratios']['prior']
            agebins = mod.params['agebins']
            mass = np.zeros(shape=(agebins.shape[0],nsamp))
            for n in range(nsamp): mass[:,n] = vis_params.logmass_to_masses(logmass=logmass, logsfr_ratios=logsfr_prior.sample(), agebins=agebins)

            # final conversion
            # convert to sSFR or mean age
            time_per_bin = 10**agebins[0,1] - 10**agebins[0,0]
            age_in_bin = np.sum(10**agebins,axis=-1)/2.
            prior = ((age_in_bin[:,None] * mass).mean(axis=0) / mass.mean(axis=0))/1e9

        elif runname == 'vis_expsfh':

            tage_prior = mod._config_dict['tage']['prior']
            logtau_prior = mod._config_dict['logtau']['prior']

            tage_samp = tage_prior.distribution.rvs(size=nsamp,*tage_prior.args,loc=tage_prior.loc,scale=tage_prior.scale)
            tau_samp = 10**logtau_prior.distribution.rvs(size=nsamp,*logtau_prior.args,loc=logtau_prior.loc,scale=logtau_prior.scale)
            prior = np.array([prosp_dutils.linexp_decl_sfh_avg_age(ta,tau) for (ta,tau) in zip(tage_samp,tau_samp)])

    elif par == 'dust1_fraction':

        # sample, then multiply dust2 by dust1_fraction prior 
        d2_prior = mod._config_dict['dust2']['prior']
        d1_prior = mod._config_dict['dust1_fraction']['prior']

        prior = d2_prior.distribution.rvs(size=nsamp,*d2_prior.args,loc=d2_prior.loc,scale=d2_prior.scale) * \
                d1_prior.distribution.rvs(size=nsamp,*d1_prior.args,loc=d1_prior.loc,scale=d1_prior.scale)

    elif par == 'fagn':

        parsamp = np.array([limits[par][0], limits[par][1]])
        fagn_lim = np.log10(mod._config_dict['fagn']['prior'].range)
        priorsamp = np.repeat(1./(fagn_lim[1]-fagn_lim[0]),2)

    else:
        prior = mod._config_dict[par]['prior']

        # sample PDF at regular intervals
        parsamp = np.linspace(prior.range[0],prior.range[1],nsamp)
        priorsamp = prior.distribution.pdf(parsamp,*prior.args,loc=prior.loc, scale=prior.scale)


    # build our own prior histogram if we don't have fancy built-in Prospector objects
    if parsamp is None:
        if par == 'ssfr_100':
            bins = int(round(10. / 0.02))
            n, b = np.histogram(prior, bins=bins, range=[prior.min(),prior.max()],density=True)
            priorsamp = norm_kde(n, 10.)
            parsamp = 0.5 * (b[1:] + b[:-1])
        else:
            priorsamp, bins = np.histogram(prior,bins=20,density=True,range=limits.get(par,None))
            parsamp = (bins[1:]+bins[:-1])/2.

    # plot prior
    if max is None:
        max = priorsamp.max()

    ax.plot(parsamp,priorsamp/max,color=colors['prior'],lw=lw,linestyle='--',label='prior')

    if par in limits.keys():
        ax.set_xlim(limits[par])

    # simple y-axis labels
    ax.yaxis.set_major_formatter(FormatStrFormatter('%i'))
    ax.yaxis.set_major_locator(plt.MultipleLocator(1))
Пример #7
0
def bin_pdfs_distred(data,
                     cdf=False,
                     ebv=False,
                     dist_type='distance_modulus',
                     lndistprior=None,
                     coord=None,
                     avlim=(0., 6.),
                     rvlim=(1., 8.),
                     parallaxes=None,
                     parallax_errors=None,
                     Nr=100,
                     bins=(750, 300),
                     span=None,
                     smooth=0.01,
                     rstate=None,
                     verbose=False):
    """
    Generate binned versions of the 2-D posteriors for the distance and
    reddening.

    Parameters
    ----------
    data : 3-tuple or 4-tuple containing `~numpy.ndarray`s of shape `(Nsamps)`
        The data that will be plotted. Either a collection of
        `(dists, reds, dreds)` that were saved, or a collection of
        `(scales, avs, rvs, covs_sar)` that will be used to regenerate
        `(dists, reds)` in conjunction with any applied distance
        and/or parallax priors.

    cdf : bool, optional
        Whether to compute the CDF along the reddening axis instead of the
        PDF. Useful when evaluating the MAP LOS fit. Default is `False`.

    ebv : bool, optional
        If provided, will convert from Av to E(B-V) when plotting using
        the provided Rv values. Default is `False`.

    dist_type : str, optional
        The distance format to be plotted. Options include `'parallax'`,
        `'scale'`, `'distance'`, and `'distance_modulus'`.
        Default is `'distance_modulus`.

    lndistprior : func, optional
        The log-distsance prior function used. If not provided, the galactic
        model from Green et al. (2014) will be assumed.

    coord : 2-tuple, optional
        The galactic `(l, b)` coordinates for the object, which is passed to
        `lndistprior` when re-generating the fits.

    avlim : 2-tuple, optional
        The Av limits used to truncate results. Default is `(0., 6.)`.

    rvlim : 2-tuple, optional
        The Rv limits used to truncate results. Default is `(1., 8.)`.

    parallaxes : `~numpy.ndarray` of shape `(Nobj,)`, optional
        The parallax estimates for the sources.

    parallax_errors : `~numpy.ndarray` of shape `(Nobj,)`, optional
        The parallax errors for the sources.

    Nr : int, optional
        The number of Monte Carlo realizations used when sampling using the
        provided parallax prior. Default is `100`.

    bins : int or list of ints with length `(ndim,)`, optional
        The number of bins to be used in each dimension. Default is `300`.

    span : iterable with shape `(ndim, 2)`, optional
        A list where each element is a length-2 tuple containing
        lower and upper bounds. If not provided, the x-axis will use the
        provided Av bounds while the y-axis will span `(4., 19.)` in
        distance modulus (both appropriately transformed).

    smooth : float or list of floats with shape `(ndim,)`, optional
        The standard deviation (either a single value or a different value for
        each subplot) for the Gaussian kernel used to smooth the 2-D
        marginalized posteriors, expressed as a fraction of the span.
        Default is `0.01` (1% smoothing).

    rstate : `~numpy.random.RandomState`, optional
        `~numpy.random.RandomState` instance.

    verbose : bool, optional
        Whether to print progress to `~sys.stderr`. Default is `False`.

    Returns
    -------
    binned_vals : `~numpy.ndarray` of shape `(Nobj, Nxbin, Nybin)`
        Binned versions of the PDFs or CDFs.

    xedges : `~numpy.ndarray` of shape `(Nxbin+1,)`
        The edges defining the bins in distance.

    yedges : `~numpy.ndarray` of shape `(Nybin+1,)`
        The edges defining the bins in reddening.

    """

    # Initialize values.
    nobjs, nsamps = data[0].shape
    if rstate is None:
        try:
            # Attempt to use intel-specific version.
            rstate = np.random_intel
        except:
            # Fall back to default if not present.
            rstate = np.random
    if lndistprior is None:
        lndistprior = gal_lnprior
    if parallaxes is None:
        parallaxes = np.full(nobjs, np.nan)
    if parallax_errors is None:
        parallax_errors = np.full(nobjs, np.nan)

    # Set up bins.
    if dist_type not in ['parallax', 'scale', 'distance', 'distance_modulus']:
        raise ValueError("The provided `dist_type` is not valid.")
    if span is None:
        avlims = avlim
        dlims = 10**(np.array([4., 19.]) / 5. - 2.)
    else:
        avlims, dlims = span
    try:
        xbin, ybin = bins
    except:
        xbin = ybin = bins
    if ebv:
        ylims = avlims  # default Rv goes from [1., 8.] -> min(Rv) = 1.
    else:
        ylims = avlims
    if dist_type == 'scale':
        xlims = (1. / dlims[::-1])**2
    elif dist_type == 'parallax':
        xlims = 1. / dlims[::-1]
    elif dist_type == 'distance':
        xlims = dlims
    elif dist_type == 'distance_modulus':
        xlims = 5. * np.log10(dlims) + 10.
    xbins = np.linspace(xlims[0], xlims[1], xbin + 1)
    ybins = np.linspace(ylims[0], ylims[1], ybin + 1)
    dx, dy = xbins[1] - xbins[0], ybins[1] - ybins[0]
    xspan, yspan = xlims[1] - xlims[0], ylims[1] - ylims[0]

    # Set smoothing.
    try:
        if smooth[0] < 1:
            xsmooth = smooth[0] * xspan
        else:
            xsmooth = smooth[0] * dx
        if smooth[1] < 1:
            ysmooth = smooth[1] * yspan
        else:
            ysmooth = smooth[1] * dy
    except:
        if smooth < 1:
            xsmooth, ysmooth = smooth * xspan, smooth * yspan
        else:
            xsmooth, ysmooth = smooth * dx, smooth * dy

    # Compute binned PDFs.
    binned_vals = np.zeros((nobjs, xbin, ybin), dtype='float32')
    try:
        # Grab (distance, reddening (Av), differential reddening (Rv)) samples.
        ddraws, adraws, rdraws = copy.deepcopy(data)
        pdraws = 1. / ddraws
        sdraws = pdraws**2
        dmdraws = 5. * np.log10(ddraws) + 10.

        # Grab relevant draws.
        ydraws = adraws
        if ebv:
            ydraws /= rdraws
        if dist_type == 'scale':
            xdraws = sdraws
        elif dist_type == 'parallax':
            xdraws = pdraws
        elif dist_type == 'distance':
            xdraws = ddraws
        elif dist_type == 'distance_modulus':
            xdraws = dmdraws

        # Bin draws.
        for i, (xs, ys) in enumerate(zip(xdraws, ydraws)):
            # Print progress.
            if verbose:
                sys.stderr.write('\rBinning object {0}/{1}'.format(
                    i + 1, nobjs))
            H, xedges, yedges = np.histogram2d(xs, ys, bins=(xbins, ybins))
            binned_vals[i] = H / nsamps
    except:
        # Regenerate distance and reddening samples from inputs.
        scales, avs, rvs, covs_sar = copy.deepcopy(data)

        if lndistprior is None and coord is None:
            raise ValueError("`coord` must be passed if the default distance "
                             "prior was used.")

        # Generate parallax and Av realizations.
        for i, stuff in enumerate(
                zip(scales, avs, rvs, covs_sar, parallaxes, parallax_errors,
                    coord)):
            (scales_obj, avs_obj, rvs_obj, covs_sar_obj, parallax,
             parallax_err, crd) = stuff

            # Print progress.
            if verbose:
                sys.stderr.write('\rBinning object {0}/{1}'.format(
                    i + 1, nobjs))

            # Draw random samples.
            sdraws, adraws, rdraws = draw_sar(scales_obj,
                                              avs_obj,
                                              rvs_obj,
                                              covs_sar_obj,
                                              ndraws=Nr,
                                              avlim=avlim,
                                              rvlim=rvlim,
                                              rstate=rstate)
            pdraws = np.sqrt(sdraws)
            ddraws = 1. / pdraws
            dmdraws = 5. * np.log10(ddraws) + 10.

            # Re-apply distance and parallax priors to realizations.
            lnp_draws = lndistprior(ddraws, crd)
            if parallax is not None and parallax_err is not None:
                lnp_draws += parallax_lnprior(pdraws, parallax, parallax_err)
            lnp = logsumexp(lnp_draws, axis=1)
            weights = np.exp(lnp_draws - lnp[:, None])
            weights /= weights.sum(axis=1)[:, None]
            weights = weights.flatten()

            # Grab draws.
            ydraws = adraws.flatten()
            if ebv:
                ydraws /= rdraws.flatten()
            if dist_type == 'scale':
                xdraws = sdraws.flatten()
            elif dist_type == 'parallax':
                xdraws = pdraws.flatten()
            elif dist_type == 'distance':
                xdraws = ddraws.flatten()
            elif dist_type == 'distance_modulus':
                xdraws = dmdraws.flatten()

            # Generate 2-D histogram.
            H, xedges, yedges = np.histogram2d(xdraws,
                                               ydraws,
                                               bins=(xbins, ybins),
                                               weights=weights)
            binned_vals[i] = H / nsamps

    # Apply smoothing.
    for i, (H, parallax, parallax_err) in enumerate(
            zip(binned_vals, parallaxes, parallax_errors)):
        # Establish minimum smoothing in distance.
        p1sig = np.array(
            [parallax + parallax_err,
             max(parallax - parallax_err, 1e-10)])
        if dist_type == 'scale':
            x_min_smooth = abs(np.diff(p1sig**2)) / 2.
        elif dist_type == 'parallax':
            x_min_smooth = abs(np.diff(p1sig)) / 2.
        elif dist_type == 'distance':
            x_min_smooth = abs(np.diff(1. / p1sig)) / 2.
        elif dist_type == 'distance_modulus':
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")  # ignore bad values
                x_min_smooth = abs(np.diff(5. * np.log10(1. / p1sig))) / 2.
        if np.isfinite(x_min_smooth):
            xsmooth_t = min(x_min_smooth, xsmooth)
        else:
            xsmooth_t = xsmooth
        try:
            xsmooth_t = xsmooth_t[0]  # catch possible list
        except:
            pass
        # Smooth 2-D PDF.
        binned_vals[i] = norm_kde(H, (xsmooth_t / dx, ysmooth / dy))

    # Compute CDFs.
    if cdf:
        for i, H in enumerate(binned_vals):
            binned_vals[i] = H.cumsum(axis=0)

    return binned_vals, xedges, yedges
Пример #8
0
def add_sfh_plot(eout,fig,ax_loc=None,
                 main_color=None,tmin=0.01,smooth_sfh=False,
                 text_size=1,ax_inset=None,lw=1,truth_dict=None):
    """add a small SFH plot at ax_loc
    text_size: multiply font size by this, to accomodate larger/smaller figures
    """

    # set up plotting
    if ax_inset is None:
        if fig is None:
            ax_inset = ax_loc
        else:
            ax_inset = fig.add_axes(ax_loc,zorder=32)
    axfontsize=4*text_size

    xmin, ymin = np.inf, np.inf
    xmax, ymax = -np.inf, -np.inf

    for i, eout in enumerate(eout):
        
        # create master time bin
        min_time = np.max([eout['sfh']['t'].min(),0.01])
        max_time = eout['sfh']['t'].max()
        tvec = 10**np.linspace(np.log10(min_time),np.log10(max_time),num=50)

        # create median SFH
        perc = np.zeros(shape=(len(tvec),3))
        for jj in range(len(tvec)): 
            # nearest-neighbor 'interpolation'
            # exact answer for binned SFHs
            idx = np.abs(eout['sfh']['t'] - tvec[jj]).argmin(axis=-1)
            perc[jj,:] = dyplot._quantile(eout['sfh']['sfh'][np.arange(idx.shape[0]),idx],[0.16,0.50,0.84],weights=eout['weights'])

        if smooth_sfh:
            for j in range(3):
                perc[:,j] = norm_kde(perc[:,j],1)

        #### plot SFH
        ax_inset.plot(tvec, perc[:,1],'-',color=main_color[i],lw=lw)
        ax_inset.fill_between(tvec, perc[:,0], perc[:,2], color=main_color[i], alpha=0.3)
        ax_inset.plot(tvec, perc[:,0],'-',color=main_color[i],alpha=0.3,lw=lw)
        ax_inset.plot(tvec, perc[:,2],'-',color=main_color[i],alpha=0.3,lw=lw)

        #### update plot ranges
        if 'tage' in eout['thetas'].keys():
            xmin = np.min([xmin,tvec.min()])
            xmax = np.max([xmax,tvec.max()])
            ymax = np.max([ymax,perc.max()])
            ymin = ymax*1e-4
        else:
            xmin = np.min([xmin,tvec.min()])
            xmax = np.max([xmax,tvec.max()])
            ymin = np.min([ymin,perc[perc>0].min()])
            ymax = np.max([ymax,perc.max()])

    if truth_dict is not None:
        ax_inset.plot(truth_dict['t'],truth_dict['sfh'],':',color=truth_color,lw=lw)

    #### labels, format, scales !
    xmin = np.min(tvec[tvec>0.01])
    ymin = np.clip(ymin,ymax*1e-5,np.inf)

    axlim_sfh=[xmax*1.01, xmin*1.0001, ymin*.7, ymax*1.4]
    ax_inset.axis(axlim_sfh)
    ax_inset.set_ylabel(r'SFR [M$_{\odot}$/yr]',fontsize=axfontsize*3,labelpad=1.5*text_size)
    ax_inset.set_xlabel(r't$_{\mathrm{lookback}}$ [Gyr]',fontsize=axfontsize*3,labelpad=1.5*text_size)
    
    ax_inset.xaxis.set_minor_formatter(FormatStrFormatter('%2.5g'))
    ax_inset.xaxis.set_major_formatter(FormatStrFormatter('%2.5g'))
    ax_inset.set_xscale('log',subsx=([3]))
    ax_inset.set_yscale('log',subsy=([3]))
    ax_inset.tick_params('both', length=lw*3, width=lw*.6, which='both',labelsize=axfontsize*3)
    for axis in ['top','bottom','left','right']: ax_inset.spines[axis].set_linewidth(lw*.6)

    ax_inset.xaxis.set_minor_formatter(FormatStrFormatter('%2.5g'))
    ax_inset.xaxis.set_major_formatter(FormatStrFormatter('%2.5g'))
    ax_inset.yaxis.set_major_formatter(FormatStrFormatter('%2.5g'))
Пример #9
0
def subcorner(res, eout, parnames, outname=None, maxprob=False, truth_dict=None, truths=None, **opts):
    """ wrapper around dyplot.cornerplot()
    adds in a star formation history and marginalized parameters
    for some key outputs (stellar mass, SFR, sSFR, half-mass time)
    """

    # write down some keywords
    title_kwargs = {'fontsize':fs*.7}
    label_kwargs = {'fontsize':fs*.7}

    if truth_dict is not None:
        truths = []
        for par in parnames:
            if par in truth_dict.keys():
                truths += [truth_dict[par]]
            else:
                truths += [np.nan]

    if (maxprob) & (truths is None):
        truths = res['samples'][eout['sample_idx'][0],:]

    # create dynesty plot
    # maximum probability solution in red
    fig, axes = dyplot.cornerplot(res, show_titles=True, labels=parnames, truths=truths,
                                  truth_color=truth_color,
                                  label_kwargs=label_kwargs, title_kwargs=title_kwargs)
    for ax in axes.ravel():
        ax.xaxis.set_tick_params(labelsize=tick_fs*.7)
        ax.yaxis.set_tick_params(labelsize=tick_fs*.7)

    # extra parameters
    eout_toplot = ['stellar_mass','sfr_100', 'ssfr_100', 'avg_age', 'H alpha 6563', 'H alpha/H beta']
    not_log = ['half_time','H alpha/H beta']
    ptitle = [r'log(M$_*$)',r'log(SFR$_{\mathrm{100 Myr}}$)',
              r'log(sSFR$_{\mathrm{100 Myr}}$)',r'log(t$_{\mathrm{avg}}$) [Gyr]',
              r'log(EW$_{\mathrm{H \alpha}}$)',r'Balmer decrement']

    # either we create a new figure for extra parameters
    # or add to old figure
    # depending on dimensionality of model (and thus of the plot)
    #if axes.shape[0] <= 10:
    #    label_kwargs['fontsize'] *= 0.7
    #    title_kwargs['fontsize'] *= 0.7


    if (axes.shape[0] <= 6):

        # only plot a subset of parameters
        eout_toplot, ptitle = eout_toplot[:4], ptitle[:4]

        # generate fake results file for dynesty
        nsamp, nvar = eout['weights'].shape[0], len(eout_toplot)
        fres = {'samples': np.empty(shape=(nsamp,nvar)), 'weights': eout['weights']}
        for i in range(nvar): 
            the_chain = eout['extras'][eout_toplot[i]]['chain']
            if eout_toplot[i] in not_log:
                fres['samples'][:,i] = the_chain
            else:
                fres['samples'][:,i] = np.log10(the_chain)

        fig2, axes2 = dyplot.cornerplot(fres, show_titles=True, labels=ptitle, label_kwargs=label_kwargs, title_kwargs=title_kwargs)
       
        # add SFH plot
        sfh_ax = fig2.add_axes([0.7,0.7,0.25,0.25],zorder=32)
        add_sfh_plot([eout], fig2,
                     main_color = ['black'],
                     ax_inset=sfh_ax,
                     text_size=1.5,lw=2,truth_dict=truth_dict)
        fig2.savefig('{0}.corner.extra.pdf'.format(outname))
        plt.close(fig2)

    else:

        # add SFH plot
        sfh_ax = fig.add_axes([0.75,0.435,0.22,0.22],zorder=32)
        add_sfh_plot([eout], fig, main_color = ['black'], ax_inset=sfh_ax, text_size=2,lw=4,truth_dict=truth_dict)

        # create extra parameters
        axis_size = fig.get_axes()[0].get_position().size
        xs, ys = 0.4, 1.0-axis_size[1]*1.3
        xdelta, ydelta = axis_size[0]*1.2, axis_size[1]*1.8
        plotloc = 0
        for jj, ename in enumerate(eout_toplot):

            # pull out chain, quantiles
            weights = eout['weights']
            if 'H alpha' not in ename:
                pchain = eout['extras'][ename]['chain']
                qvalues = [eout['extras'][ename]['q16'],
                           eout['extras'][ename]['q50'],
                           eout['extras'][ename]['q84']]
            elif '6563' in ename:
                pchain = eout['obs']['elines'][ename]['ew']['chain']
                qvalues = [eout['obs']['elines'][ename]['ew']['q16'],
                           eout['obs']['elines'][ename]['ew']['q50'],
                           eout['obs']['elines'][ename]['ew']['q84']]
            else:
                pchain = eout['obs']['elines']['H alpha 6563']['flux']['chain'] / eout['obs']['elines']['H beta 4861']['flux']['chain']
                qvalues = dyplot._quantile(pchain,np.array([0.16, 0.50, 0.84]),weights=weights)

            # logify. 
            if ename not in not_log:
                pchain = np.log10(pchain)
                qvalues = np.log10(qvalues)

            # make sure we're not producing infinities.
            # if we are, replace them with minimum.
            # if everything is infinity, skip and don't add the axis!
            # one failure mode here: if qvalues include an infinity!
            infty = ~np.isfinite(pchain)
            if infty.sum() == pchain.shape[0]:
                continue
            if infty.sum():
                pchain[infty] = pchain[~infty].min()

            # total obfuscated way to add in axis
            ax = fig.add_axes([xs+(jj%4)*xdelta, ys-int(jj/4)*ydelta, axis_size[0], axis_size[1]])

            # complex smoothing routine to match dynesty
            bins = int(round(10. / 0.02))
            n, b = np.histogram(pchain, bins=bins, weights=weights,
                                range=[pchain.min(),pchain.max()])
            n = norm_kde(n, 10.)
            x0 = 0.5 * (b[1:] + b[:-1])
            y0 = n
            ax.fill_between(x0, y0, color='k', alpha = 0.6)

            # plot and show quantiles
            for q in qvalues: ax.axvline(q, ls="dashed", color='red')

            q_m = qvalues[1]-qvalues[0]
            q_p = qvalues[2]-qvalues[1]
            fmt = "{{0:{0}}}".format(".2f").format
            title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
            title = title.format(fmt(float(qvalues[1])), fmt(float(q_m)), fmt(float(q_p)))
            #title = "{0}\n={1}".format(ptitle[jj], title)
            ax.set_title(title, va='bottom',**title_kwargs)
            ax.set_xlabel(ptitle[jj],**label_kwargs)

            # look for truth
            min, max = np.percentile(pchain,0.5), np.percentile(pchain,99.5)
            if truth_dict is not None:
                if ename in truth_dict.keys():
                    if ename not in not_log:
                        tplt = np.log10(truth_dict[ename])
                    else:
                        tplt = truth_dict[ename]
                    ax.axvline(tplt, ls=":", color=truth_color,lw=1.5)

                    min = np.min([min,tplt.min()])
                    max = np.max([max,tplt.max()])

            if ename in not_log:
                min, max = min*0.99, max*1.01
            else:
                min = min - 0.02
                max = max + 0.02

            # set range
            ax.set_xlim(min,max)
            ax.set_ylim(0, 1.1 * np.max(n))
            ax.set_yticklabels([])
            ax.xaxis.set_major_locator(MaxNLocator(5))
            [l.set_rotation(45) for l in ax.get_xticklabels()]
            ax.xaxis.set_tick_params(labelsize=label_kwargs['fontsize'])

    fig.savefig('{0}.corner.pdf'.format(outname))
    plt.close(fig)