예제 #1
0
def lnprior(param: np.ndarray,
            bounds: dict,
            param_index: Dict[str, int],
            prior: Optional[Dict[str, Tuple[float, float]]] = None):
    """
    Internal function for calculating the log prior.

    Parameters
    ----------
    param : np.ndarray
        Parameter values.
    bounds : dict
        Dictionary with the parameter boundaries.
    param_index : dict(str, int)
        Dictionary with the parameter indices of ``param``.
    prior : dict(str, tuple(float, float)), None
        Dictionary with Gaussian priors for one or multiple parameters. The prior can be set
        for any of the atmosphere or calibration parameters, e.g.
        ``prior={'teff': (1200., 100.)}``. Additionally, a prior can be set for the mass, e.g.
        ``prior={'mass': (13., 3.)}`` for an expected mass of 13 Mjup with an uncertainty of
        3 Mjup. The parameter is not used if set to ``None``.

    Returns
    -------
    floats
        Log prior.
    """

    ln_prior = 0

    for key, value in bounds.items():

        if value[0] <= param[param_index[key]] <= value[1]:
            ln_prior += 0.

        else:
            ln_prior = -np.inf
            break

    if prior is not None:
        for key, value in prior.items():
            if key == 'mass':
                mass = read_util.get_mass(param[param_index['logg']],
                                          param[param_index['radius']])

                ln_prior += -0.5 * (mass - value[0])**2 / value[1]**2

            else:
                ln_prior += -0.5 * (param[param_index[key]] -
                                    value[0])**2 / value[1]**2

    return ln_prior
예제 #2
0
def lnprior(param,
            bounds,
            modelpar,
            prior):
    """
    Function for the prior probability.

    Parameters
    ----------
    param : numpy.ndarray
        Parameter values.
    bounds : dict
        Parameter boundaries.
    modelpar : list(str, )
        Parameter names.
    prior : tuple(str, float, float)
        Gaussian prior on one of the parameters. Currently only possible for the mass, e.g.
        ('mass', 13., 3.) for an expected mass of 13 Mjup with an uncertainty of 3 Mjup. Not
        used if set to None.

    Returns
    -------
    float
        Log prior probability.
    """

    if prior:

        modeldict = {}
        for i, item in enumerate(modelpar):
            modeldict[item] = param[i]

    for i, item in enumerate(modelpar):

        if bounds[item][0] <= param[i] <= bounds[item][1]:

            if prior is None:
                ln_prior = 0.

            elif prior[0] == 'mass':
                mass = read_util.get_mass(modeldict)
                ln_prior = -0.5*(mass-prior[1])**2/prior[2]**2

        else:
            ln_prior = -np.inf
            break

    return ln_prior
예제 #3
0
def plot_posterior(tag: str,
                   burnin: Optional[int] = None,
                   title: Optional[str] = None,
                   offset: Optional[Tuple[float, float]] = None,
                   title_fmt: Union[str, List[str]] = '.2f',
                   limits: Optional[List[Tuple[float, float]]] = None,
                   max_posterior: bool = False,
                   inc_luminosity: bool = False,
                   inc_mass: bool = False,
                   output: str = 'posterior.pdf') -> None:
    """
    Function to plot the posterior distribution.

    Parameters
    ----------
    tag : str
        Database tag with the samples.
    burnin : int, None
        Number of burnin steps to exclude. All samples are used if set to ``None``.
    title : str, None
        Plot title. No title is shown if set to ``None``.
    offset : tuple(float, float), None
        Offset of the x- and y-axis label. Default values are used if set to ``None``.
    title_fmt : str, list(str)
        Format of the titles above the 1D distributions. Either a single string, which will be used
        for all parameters, or a list with the title format for each parameter separately (in the
        order as shown in the corner plot).
    limits : list(tuple(float, float), ), None
        Axis limits of all parameters. Automatically set if set to ``None``.
    max_posterior : bool
        Plot the position of the sample with the maximum posterior probability.
    inc_luminosity : bool
        Include the log10 of the luminosity in the posterior plot as calculated from the
        effective temperature and radius.
    inc_mass : bool
        Include the mass in the posterior plot as calculated from the surface gravity and radius.
    output : str
        Output filename.

    Returns
    -------
    NoneType
        None
    """

    mpl.rcParams['font.serif'] = ['Bitstream Vera Serif']
    mpl.rcParams['font.family'] = 'serif'

    plt.rc('axes', edgecolor='black', linewidth=2.2)

    if burnin is None:
        burnin = 0

    species_db = database.Database()
    box = species_db.get_samples(tag, burnin=burnin)

    print('Median sample:')
    for key, value in box.median_sample.items():
        print(f'   - {key} = {value:.2f}')

    samples = box.samples
    ndim = samples.shape[-1]

    if box.prob_sample is not None:
        par_val = tuple(box.prob_sample.values())

        print('Maximum posterior sample:')
        for key, value in box.prob_sample.items():
            print(f'   - {key} = {value:.2f}')

    print(f'Plotting the posterior: {output}...', end='', flush=True)

    if inc_luminosity:
        if 'teff' in box.parameters and 'radius' in box.parameters:
            teff_index = np.argwhere(np.array(box.parameters) == 'teff')[0]
            radius_index = np.argwhere(np.array(box.parameters) == 'radius')[0]

            luminosity = 4. * np.pi * (samples[..., radius_index]*constants.R_JUP)**2 * \
                constants.SIGMA_SB * samples[..., teff_index]**4. / constants.L_SUN

            samples = np.append(samples, np.log10(luminosity), axis=-1)
            box.parameters.append('luminosity')
            ndim += 1

        elif 'teff_0' in box.parameters and 'radius_0' in box.parameters:
            luminosity = 0.

            for i in range(100):
                teff_index = np.argwhere(
                    np.array(box.parameters) == f'teff_{i}')
                radius_index = np.argwhere(
                    np.array(box.parameters) == f'radius_{i}')

                if len(teff_index) > 0 and len(radius_index) > 0:
                    luminosity += 4. * np.pi * (samples[..., radius_index[0]]*constants.R_JUP)**2 \
                        * constants.SIGMA_SB * samples[..., teff_index[0]]**4. / constants.L_SUN

                else:
                    break

            samples = np.append(samples, np.log10(luminosity), axis=-1)
            box.parameters.append('luminosity')
            ndim += 1

            # teff_index = np.argwhere(np.array(box.parameters) == 'teff_0')
            # radius_index = np.argwhere(np.array(box.parameters) == 'radius_0')
            #
            # luminosity_0 = 4. * np.pi * (samples[..., radius_index[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index[0]]**4. / constants.L_SUN
            #
            # samples = np.append(samples, np.log10(luminosity_0), axis=-1)
            # box.parameters.append('luminosity_0')
            # ndim += 1
            #
            # teff_index = np.argwhere(np.array(box.parameters) == 'teff_1')
            # radius_index = np.argwhere(np.array(box.parameters) == 'radius_1')
            #
            # luminosity_1 = 4. * np.pi * (samples[..., radius_index[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index[0]]**4. / constants.L_SUN
            #
            # samples = np.append(samples, np.log10(luminosity_1), axis=-1)
            # box.parameters.append('luminosity_1')
            # ndim += 1
            #
            # teff_index_0 = np.argwhere(np.array(box.parameters) == 'teff_0')
            # radius_index_0 = np.argwhere(np.array(box.parameters) == 'radius_0')
            #
            # teff_index_1 = np.argwhere(np.array(box.parameters) == 'teff_1')
            # radius_index_1 = np.argwhere(np.array(box.parameters) == 'radius_1')
            #
            # luminosity_0 = 4. * np.pi * (samples[..., radius_index_0[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index_0[0]]**4. / constants.L_SUN
            #
            # luminosity_1 = 4. * np.pi * (samples[..., radius_index_1[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index_1[0]]**4. / constants.L_SUN
            #
            # samples = np.append(samples, np.log10(luminosity_0/luminosity_1), axis=-1)
            # box.parameters.append('luminosity_ratio')
            # ndim += 1

            # r_tmp = samples[..., radius_index_0[0]]*constants.R_JUP
            # lum_diff = (luminosity_1*constants.L_SUN-luminosity_0*constants.L_SUN)
            #
            # m_mdot = (3600.*24.*365.25)*lum_diff*r_tmp/constants.GRAVITY/constants.M_JUP**2
            #
            # samples = np.append(samples, m_mdot, axis=-1)
            # box.parameters.append('m_mdot')
            # ndim += 1

    if inc_mass:
        if 'logg' in box.parameters and 'radius' in box.parameters:
            logg_index = np.argwhere(np.array(box.parameters) == 'logg')[0]
            radius_index = np.argwhere(np.array(box.parameters) == 'radius')[0]

            mass_samples = read_util.get_mass(samples[..., logg_index],
                                              samples[..., radius_index])

            samples = np.append(samples, mass_samples, axis=-1)
            box.parameters.append('mass')
            ndim += 1

        else:
            warnings.warn(
                'Samples with the log(g) and radius are required for \'inc_mass=True\'.'
            )

    if isinstance(title_fmt, list) and len(title_fmt) != ndim:
        raise ValueError(
            f'The number of items in the list of \'title_fmt\' ({len(title_fmt)}) is '
            f'not equal to the number of dimensions of the samples ({ndim}).')

    labels = plot_util.update_labels(box.parameters)

    # Check if parameter values were fixed

    index_sel = []
    index_del = []

    # Use only last axis for parameter dimensions
    for i in range(ndim):
        if np.amin(samples[..., i]) == np.amax(samples[..., i]):
            index_del.append(i)
        else:
            index_sel.append(i)

    samples = samples[..., index_sel]

    for i in range(len(index_del) - 1, -1, -1):
        del labels[index_del[i]]

    ndim -= len(index_del)

    samples = samples.reshape((-1, ndim))

    hist_titles = []

    for i, item in enumerate(labels):
        unit_start = item.find('(')

        if unit_start == -1:
            param_label = item
            unit_label = None

        else:
            param_label = item[:unit_start]
            # Remove parenthesis from the units
            unit_label = item[unit_start + 1:-1]

        q_16, q_50, q_84 = corner.quantile(samples[:, i], [0.16, 0.5, 0.84])
        q_minus, q_plus = q_50 - q_16, q_84 - q_50

        if isinstance(title_fmt, str):
            fmt = '{{0:{0}}}'.format(title_fmt).format

        elif isinstance(title_fmt, list):
            fmt = '{{0:{0}}}'.format(title_fmt[i]).format

        best_fit = r'${{{0}}}_{{-{1}}}^{{+{2}}}$'
        best_fit = best_fit.format(fmt(q_50), fmt(q_minus), fmt(q_plus))

        if unit_label is None:
            hist_title = f'{param_label} = {best_fit}'

        else:
            hist_title = f'{param_label} = {best_fit} {unit_label}'

        hist_titles.append(hist_title)

    fig = corner.corner(samples,
                        quantiles=[0.16, 0.5, 0.84],
                        labels=labels,
                        label_kwargs={'fontsize': 13},
                        titles=hist_titles,
                        show_titles=True,
                        title_fmt=None,
                        title_kwargs={'fontsize': 12})

    axes = np.array(fig.axes).reshape((ndim, ndim))

    for i in range(ndim):
        for j in range(ndim):
            if i >= j:
                ax = axes[i, j]

                ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
                ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))

                labelleft = j == 0 and i != 0
                labelbottom = i == ndim - 1

                ax.tick_params(axis='both',
                               which='major',
                               colors='black',
                               labelcolor='black',
                               direction='in',
                               width=1,
                               length=5,
                               labelsize=12,
                               top=True,
                               bottom=True,
                               left=True,
                               right=True,
                               labelleft=labelleft,
                               labelbottom=labelbottom,
                               labelright=False,
                               labeltop=False)

                ax.tick_params(axis='both',
                               which='minor',
                               colors='black',
                               labelcolor='black',
                               direction='in',
                               width=1,
                               length=3,
                               labelsize=12,
                               top=True,
                               bottom=True,
                               left=True,
                               right=True,
                               labelleft=labelleft,
                               labelbottom=labelbottom,
                               labelright=False,
                               labeltop=False)

                if limits is not None:
                    ax.set_xlim(limits[j])

                if max_posterior:
                    ax.axvline(par_val[j], color='tomato')

                if i > j:
                    if max_posterior:
                        ax.axhline(par_val[i], color='tomato')
                        ax.plot(par_val[j], par_val[i], 's', color='tomato')

                    if limits is not None:
                        ax.set_ylim(limits[i])

                if offset is not None:
                    ax.get_xaxis().set_label_coords(0.5, offset[0])
                    ax.get_yaxis().set_label_coords(offset[1], 0.5)

                else:
                    ax.get_xaxis().set_label_coords(0.5, -0.26)
                    ax.get_yaxis().set_label_coords(-0.27, 0.5)

    if title:
        fig.suptitle(title, y=1.02, fontsize=16)

    plt.savefig(os.getcwd() + '/' + output, bbox_inches='tight')
    plt.clf()
    plt.close()

    print(' [DONE]')
예제 #4
0
def plot_posterior(
    tag: str,
    burnin: Optional[int] = None,
    title: Optional[str] = None,
    offset: Optional[Tuple[float, float]] = None,
    title_fmt: Union[str, List[str]] = ".2f",
    limits: Optional[List[Tuple[float, float]]] = None,
    max_prob: bool = False,
    vmr: bool = False,
    inc_luminosity: bool = False,
    inc_mass: bool = False,
    inc_pt_param: bool = False,
    inc_loglike: bool = False,
    output: Optional[str] = "posterior.pdf",
) -> None:
    """
    Function to plot the posterior distribution of the fitted parameters.

    Parameters
    ----------
    tag : str
        Database tag with the samples.
    burnin : int, None
        Number of burnin steps to exclude. All samples are used if set to ``None``.
    title : str, None
        Plot title. No title is shown if set to ``None``.
    offset : tuple(float, float), None
        Offset of the x- and y-axis label. Default values are used if set to ``None``.
    title_fmt : str, list(str)
        Format of the titles above the 1D distributions. Either a single string, which will be used
        for all parameters, or a list with the title format for each parameter separately (in the
        order as shown in the corner plot).
    limits : list(tuple(float, float), ), None
        Axis limits of all parameters. Automatically set if set to ``None``.
    max_prob : bool
        Plot the position of the sample with the maximum posterior probability.
    vmr : bool
        Plot the volume mixing ratios (i.e. number fractions) instead of the mass fractions of the
        retrieved species with :class:`~species.analysis.retrieval.AtmosphericRetrieval`.
    inc_luminosity : bool
        Include the log10 of the luminosity in the posterior plot as calculated from the
        effective temperature and radius.
    inc_mass : bool
        Include the mass in the posterior plot as calculated from the surface gravity and radius.
    inc_pt_param : bool
        Include the parameters of the pressure-temperature profile. Only used if the ``tag``
        contains samples obtained with :class:`~species.analysis.retrieval.AtmosphericRetrieval`.
    inc_loglike : bool
        Include the log10 of the likelihood as additional parameter in the corner plot.
    output : str
        Output filename for the plot. The plot is shown in an
        interface window if the argument is set to ``None``.

    Returns
    -------
    NoneType
        None
    """

    mpl.rcParams["font.serif"] = ["Bitstream Vera Serif"]
    mpl.rcParams["font.family"] = "serif"

    plt.rc("axes", edgecolor="black", linewidth=2.2)

    if burnin is None:
        burnin = 0

    species_db = database.Database()

    box = species_db.get_samples(tag, burnin=burnin)
    samples = box.samples

    # index_sel = [0, 1, 8, 9, 14]
    # samples = samples[:, index_sel]
    #
    # for i in range(13, 9, -1):
    #     del box.parameters[i]
    #
    # del box.parameters[2]
    # del box.parameters[2]
    # del box.parameters[2]
    # del box.parameters[2]
    # del box.parameters[2]
    # del box.parameters[2]

    ndim = len(box.parameters)

    if not inc_pt_param and box.spectrum == "petitradtrans":
        pt_param = ["tint", "t1", "t2", "t3", "alpha", "log_delta"]

        index_del = []
        item_del = []

        for i in range(100):
            pt_item = f"t{i}"

            if pt_item in box.parameters:
                param_index = np.argwhere(
                    np.array(box.parameters) == pt_item)[0]
                index_del.append(param_index)
                item_del.append(pt_item)

            else:
                break

        for item in pt_param:
            if item in box.parameters and item not in item_del:
                param_index = np.argwhere(np.array(box.parameters) == item)[0]
                index_del.append(param_index)
                item_del.append(item)

        samples = np.delete(samples, index_del, axis=1)
        ndim -= len(index_del)

        for item in item_del:
            box.parameters.remove(item)

    if box.spectrum == "petitradtrans" and box.attributes[
            "chemistry"] == "free":
        box.parameters.append("c_h_ratio")
        box.parameters.append("o_h_ratio")
        box.parameters.append("c_o_ratio")

        ndim += 3

        abund_index = {}
        for i, item in enumerate(box.parameters):
            if item == "CH4":
                abund_index["CH4"] = i

            elif item == "CO":
                abund_index["CO"] = i

            elif item == "CO_all_iso":
                abund_index["CO_all_iso"] = i

            elif item == "CO_all_iso_HITEMP":
                abund_index["CO_all_iso_HITEMP"] = i

            elif item == "CO2":
                abund_index["CO2"] = i

            elif item == "FeH":
                abund_index["FeH"] = i

            elif item == "H2O":
                abund_index["H2O"] = i

            elif item == "H2O_HITEMP":
                abund_index["H2O_HITEMP"] = i

            elif item == "H2S":
                abund_index["H2S"] = i

            elif item == "Na":
                abund_index["Na"] = i

            elif item == "NH3":
                abund_index["NH3"] = i

            elif item == "K":
                abund_index["K"] = i

            elif item == "PH3":
                abund_index["PH3"] = i

            elif item == "TiO":
                abund_index["TiO"] = i

            elif item == "TiO_all_Exomol":
                abund_index["TiO_all_Exomol"] = i

            elif item == "VO":
                abund_index["VO"] = i

            elif item == "VO_Plez":
                abund_index["VO_Plez"] = i

        c_h_ratio = np.zeros(samples.shape[0])
        o_h_ratio = np.zeros(samples.shape[0])
        c_o_ratio = np.zeros(samples.shape[0])

        for i, item in enumerate(samples):
            abund = {}

            if "CH4" in box.parameters:
                abund["CH4"] = item[abund_index["CH4"]]

            if "CO" in box.parameters:
                abund["CO"] = item[abund_index["CO"]]

            if "CO_all_iso" in box.parameters:
                abund["CO_all_iso"] = item[abund_index["CO"]]

            if "CO_all_iso_HITEMP" in box.parameters:
                abund["CO_all_iso_HITEMP"] = item[
                    abund_index["CO_all_iso_HITEMP"]]

            if "CO2" in box.parameters:
                abund["CO2"] = item[abund_index["CO2"]]

            if "FeH" in box.parameters:
                abund["FeH"] = item[abund_index["FeH"]]

            if "H2O" in box.parameters:
                abund["H2O"] = item[abund_index["H2O"]]

            if "H2O_HITEMP" in box.parameters:
                abund["H2O_HITEMP"] = item[abund_index["H2O_HITEMP"]]

            if "H2S" in box.parameters:
                abund["H2S"] = item[abund_index["H2S"]]

            if "Na" in box.parameters:
                abund["Na"] = item[abund_index["Na"]]

            if "K" in box.parameters:
                abund["K"] = item[abund_index["K"]]

            if "NH3" in box.parameters:
                abund["NH3"] = item[abund_index["NH3"]]

            if "PH3" in box.parameters:
                abund["PH3"] = item[abund_index["PH3"]]

            if "TiO" in box.parameters:
                abund["TiO"] = item[abund_index["TiO"]]

            if "TiO_all_Exomol" in box.parameters:
                abund["TiO_all_Exomol"] = item[abund_index["TiO_all_Exomol"]]

            if "VO" in box.parameters:
                abund["VO"] = item[abund_index["VO"]]

            if "VO_Plez" in box.parameters:
                abund["VO_Plez"] = item[abund_index["VO_Plez"]]

            c_h_ratio[i], o_h_ratio[i], c_o_ratio[
                i] = retrieval_util.calc_metal_ratio(abund)

    if (vmr and box.spectrum == "petitradtrans"
            and box.attributes["chemistry"] == "free"):
        print("Changing mass fractions to number fractions...",
              end="",
              flush=True)

        # Get all available line species
        line_species = retrieval_util.get_line_species()

        # Get the atomic and molecular masses
        masses = retrieval_util.atomic_masses()

        # Create array for the updated samples
        updated_samples = np.zeros(samples.shape)

        for i, samples_item in enumerate(samples):
            # Initiate a dictionary for the log10 mass fraction of the metals
            log_x_abund = {}

            for param_item in box.parameters:
                if param_item in line_species:
                    # Get the index of the parameter
                    param_index = box.parameters.index(param_item)

                    # Store log10 mass fraction in the dictionary
                    log_x_abund[param_item] = samples_item[param_index]

            # Create a dictionary with all mass fractions, including H2 and He
            x_abund = retrieval_util.mass_fractions(log_x_abund)

            # Calculate the mean molecular weight from the input mass fractions
            mmw = retrieval_util.mean_molecular_weight(x_abund)

            for param_item in box.parameters:
                if param_item in line_species:
                    # Get the index of the parameter
                    param_index = box.parameters.index(param_item)

                    # Overwrite the sample with the log10 number fraction
                    samples_item[param_index] = np.log10(
                        10.0**samples_item[param_index] * mmw /
                        masses[param_item])

            # Store the updated sample to the array
            updated_samples[i, ] = samples_item

        # Overwrite the samples in the SamplesBox
        box.samples = updated_samples

        print(" [DONE]")

    print("Median sample:")
    for key, value in box.median_sample.items():
        print(f"   - {key} = {value:.2e}")

    if "gauss_mean" in box.parameters:
        param_index = np.argwhere(np.array(box.parameters) == "gauss_mean")[0]
        samples[:, param_index] *= 1e3  # (um) -> (nm)

    if "gauss_sigma" in box.parameters:
        param_index = np.argwhere(np.array(box.parameters) == "gauss_sigma")[0]
        samples[:, param_index] *= 1e3  # (um) -> (nm)

    if box.prob_sample is not None:
        par_val = tuple(box.prob_sample.values())

        print("Maximum posterior sample:")
        for key, value in box.prob_sample.items():
            print(f"   - {key} = {value:.2e}")

    for item in box.parameters:
        if item[0:11] == "wavelength_":
            param_index = box.parameters.index(item)

            # (um) -> (nm)
            box.samples[:, param_index] *= 1e3

    if output is None:
        print("Plotting the posterior...", end="", flush=True)
    else:
        print(f"Plotting the posterior: {output}...", end="", flush=True)

    if "H2O" in box.parameters or "H2O_HITEMP" in box.parameters:
        samples = np.column_stack((samples, c_h_ratio, o_h_ratio, c_o_ratio))

    if inc_luminosity:
        if "teff" in box.parameters and "radius" in box.parameters:
            teff_index = np.argwhere(np.array(box.parameters) == "teff")[0]
            radius_index = np.argwhere(np.array(box.parameters) == "radius")[0]

            lum_planet = (4.0 * np.pi *
                          (samples[..., radius_index] * constants.R_JUP)**2 *
                          constants.SIGMA_SB * samples[..., teff_index]**4.0 /
                          constants.L_SUN)

            if "disk_teff" in box.parameters and "disk_radius" in box.parameters:
                teff_index = np.argwhere(
                    np.array(box.parameters) == "disk_teff")[0]
                radius_index = np.argwhere(
                    np.array(box.parameters) == "disk_radius")[0]

                lum_disk = (4.0 * np.pi *
                            (samples[..., radius_index] * constants.R_JUP)**2 *
                            constants.SIGMA_SB *
                            samples[..., teff_index]**4.0 / constants.L_SUN)

                samples = np.append(samples,
                                    np.log10(lum_planet + lum_disk),
                                    axis=-1)
                box.parameters.append("luminosity")
                ndim += 1

                samples = np.append(samples, lum_disk / lum_planet, axis=-1)
                box.parameters.append("luminosity_disk_planet")
                ndim += 1

            else:
                samples = np.append(samples, np.log10(lum_planet), axis=-1)
                box.parameters.append("luminosity")
                ndim += 1

        elif "teff_0" in box.parameters and "radius_0" in box.parameters:
            luminosity = 0.0

            for i in range(100):
                teff_index = np.argwhere(
                    np.array(box.parameters) == f"teff_{i}")
                radius_index = np.argwhere(
                    np.array(box.parameters) == f"radius_{i}")

                if len(teff_index) > 0 and len(radius_index) > 0:
                    luminosity += (
                        4.0 * np.pi *
                        (samples[..., radius_index[0]] * constants.R_JUP)**2 *
                        constants.SIGMA_SB * samples[..., teff_index[0]]**4.0 /
                        constants.L_SUN)

                else:
                    break

            samples = np.append(samples, np.log10(luminosity), axis=-1)
            box.parameters.append("luminosity")
            ndim += 1

            # teff_index = np.argwhere(np.array(box.parameters) == 'teff_0')
            # radius_index = np.argwhere(np.array(box.parameters) == 'radius_0')
            #
            # luminosity_0 = 4. * np.pi * (samples[..., radius_index[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index[0]]**4. / constants.L_SUN
            #
            # samples = np.append(samples, np.log10(luminosity_0), axis=-1)
            # box.parameters.append('luminosity_0')
            # ndim += 1
            #
            # teff_index = np.argwhere(np.array(box.parameters) == 'teff_1')
            # radius_index = np.argwhere(np.array(box.parameters) == 'radius_1')
            #
            # luminosity_1 = 4. * np.pi * (samples[..., radius_index[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index[0]]**4. / constants.L_SUN
            #
            # samples = np.append(samples, np.log10(luminosity_1), axis=-1)
            # box.parameters.append('luminosity_1')
            # ndim += 1
            #
            # teff_index_0 = np.argwhere(np.array(box.parameters) == 'teff_0')
            # radius_index_0 = np.argwhere(np.array(box.parameters) == 'radius_0')
            #
            # teff_index_1 = np.argwhere(np.array(box.parameters) == 'teff_1')
            # radius_index_1 = np.argwhere(np.array(box.parameters) == 'radius_1')
            #
            # luminosity_0 = 4. * np.pi * (samples[..., radius_index_0[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index_0[0]]**4. / constants.L_SUN
            #
            # luminosity_1 = 4. * np.pi * (samples[..., radius_index_1[0]]*constants.R_JUP)**2 \
            #     * constants.SIGMA_SB * samples[..., teff_index_1[0]]**4. / constants.L_SUN
            #
            # samples = np.append(samples, np.log10(luminosity_0/luminosity_1), axis=-1)
            # box.parameters.append('luminosity_ratio')
            # ndim += 1

            # r_tmp = samples[..., radius_index_0[0]]*constants.R_JUP
            # lum_diff = (luminosity_1*constants.L_SUN-luminosity_0*constants.L_SUN)
            #
            # m_mdot = (3600.*24.*365.25)*lum_diff*r_tmp/constants.GRAVITY/constants.M_JUP**2
            #
            # samples = np.append(samples, m_mdot, axis=-1)
            # box.parameters.append('m_mdot')
            # ndim += 1

    if inc_mass:
        if "logg" in box.parameters and "radius" in box.parameters:
            logg_index = np.argwhere(np.array(box.parameters) == "logg")[0]
            radius_index = np.argwhere(np.array(box.parameters) == "radius")[0]

            mass_samples = read_util.get_mass(samples[..., logg_index],
                                              samples[..., radius_index])

            samples = np.append(samples, mass_samples, axis=-1)
            box.parameters.append("mass")
            ndim += 1

        else:
            warnings.warn(
                "Samples with the log(g) and radius are required for 'inc_mass=True'."
            )

    if inc_loglike:
        # Get ln(L) of the samples
        ln_prob = box.ln_prob[..., np.newaxis]

        # Normalized by the maximum ln(L)
        ln_prob -= np.amax(ln_prob)

        # Convert ln(L) to log10(L)
        log_prob = ln_prob * np.exp(1.0)

        # Convert log10(L) to L
        prob = 10.0**log_prob

        # Normalize to an integrated probability of 1
        prob /= np.sum(prob)

        samples = np.append(samples, np.log10(prob), axis=-1)
        box.parameters.append("log_prob")
        ndim += 1

    labels = plot_util.update_labels(box.parameters)

    # Check if parameter values were fixed

    index_sel = []
    index_del = []

    for i in range(ndim):
        if np.amin(samples[:, i]) == np.amax(samples[:, i]):
            index_del.append(i)
        else:
            index_sel.append(i)

    samples = samples[:, index_sel]

    for i in range(len(index_del) - 1, -1, -1):
        del labels[index_del[i]]

    ndim -= len(index_del)

    samples = samples.reshape((-1, ndim))

    if isinstance(title_fmt, list) and len(title_fmt) != ndim:
        raise ValueError(
            f"The number of items in the list of 'title_fmt' ({len(title_fmt)}) is "
            f"not equal to the number of dimensions of the samples ({ndim}).")

    hist_titles = []

    for i, item in enumerate(labels):
        unit_start = item.find("(")

        if unit_start == -1:
            param_label = item
            unit_label = None

        else:
            param_label = item[:unit_start]
            # Remove parenthesis from the units
            unit_label = item[unit_start + 1:-1]

        q_16, q_50, q_84 = corner.quantile(samples[:, i], [0.16, 0.5, 0.84])
        q_minus, q_plus = q_50 - q_16, q_84 - q_50

        if isinstance(title_fmt, str):
            fmt = "{{0:{0}}}".format(title_fmt).format

        elif isinstance(title_fmt, list):
            fmt = "{{0:{0}}}".format(title_fmt[i]).format

        best_fit = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
        best_fit = best_fit.format(fmt(q_50), fmt(q_minus), fmt(q_plus))

        if unit_label is None:
            hist_title = f"{param_label} = {best_fit}"

        else:
            hist_title = f"{param_label} = {best_fit} {unit_label}"

        hist_titles.append(hist_title)

    fig = corner.corner(
        samples,
        quantiles=[0.16, 0.5, 0.84],
        labels=labels,
        label_kwargs={"fontsize": 13},
        titles=hist_titles,
        show_titles=True,
        title_fmt=None,
        title_kwargs={"fontsize": 12},
    )

    axes = np.array(fig.axes).reshape((ndim, ndim))

    for i in range(ndim):
        for j in range(ndim):
            if i >= j:
                ax = axes[i, j]

                ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
                ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))

                labelleft = j == 0 and i != 0
                labelbottom = i == ndim - 1

                ax.tick_params(
                    axis="both",
                    which="major",
                    colors="black",
                    labelcolor="black",
                    direction="in",
                    width=1,
                    length=5,
                    labelsize=12,
                    top=True,
                    bottom=True,
                    left=True,
                    right=True,
                    labelleft=labelleft,
                    labelbottom=labelbottom,
                    labelright=False,
                    labeltop=False,
                )

                ax.tick_params(
                    axis="both",
                    which="minor",
                    colors="black",
                    labelcolor="black",
                    direction="in",
                    width=1,
                    length=3,
                    labelsize=12,
                    top=True,
                    bottom=True,
                    left=True,
                    right=True,
                    labelleft=labelleft,
                    labelbottom=labelbottom,
                    labelright=False,
                    labeltop=False,
                )

                if limits is not None:
                    ax.set_xlim(limits[j])

                if max_prob:
                    ax.axvline(par_val[j], color="tomato")

                if i > j:
                    if max_prob:
                        ax.axhline(par_val[i], color="tomato")
                        ax.plot(par_val[j], par_val[i], "s", color="tomato")

                    if limits is not None:
                        ax.set_ylim(limits[i])

                if offset is not None:
                    ax.get_xaxis().set_label_coords(0.5, offset[0])
                    ax.get_yaxis().set_label_coords(offset[1], 0.5)

                else:
                    ax.get_xaxis().set_label_coords(0.5, -0.26)
                    ax.get_yaxis().set_label_coords(-0.27, 0.5)

    if title:
        fig.suptitle(title, y=1.02, fontsize=16)

    print(" [DONE]")

    if output is None:
        plt.show()
    else:
        plt.savefig(output, bbox_inches="tight")

    plt.clf()
    plt.close()
예제 #5
0
    def get_model(self,
                  model_param: Dict[str, float],
                  spec_res: Optional[float] = None,
                  wavel_resample: Optional[np.ndarray] = None,
                  magnitude: bool = False,
                  smooth: bool = False) -> box.ModelBox:
        """
        Function for extracting a model spectrum by linearly interpolating the model grid.

        Parameters
        ----------
        model_param : dict
            Dictionary with the model parameters and values. The values should be within the
            boundaries of the grid. The grid boundaries of the spectra in the database can be
            obtained with :func:`~species.read.read_model.ReadModel.get_bounds()`.
        spec_res : float, None
            Spectral resolution that is used for smoothing the spectrum with a Gaussian kernel
            when ``smooth=True`` and/or resampling the spectrum when ``wavel_range`` of
            ``FitModel`` is not ``None``. The original wavelength points are used if both
            ``spec_res`` and ``wavel_resample`` are set to ``None``, or if ``smooth`` is set to
            ``True``.
        wavel_resample : np.ndarray, None
            Wavelength points (um) to which the spectrum is resampled. In that case, ``spec_res``
            can still be used for smoothing the spectrum with a Gaussian kernel.
        magnitude : bool
            Normalize the spectrum with a flux calibrated spectrum of Vega and return the magnitude
            instead of flux density.
        smooth : bool
            If ``True``, the spectrum is smoothed with a Gaussian kernel to the spectral resolution
            of ``spec_res``. This requires either a uniform spectral resolution of the input
            spectra (fast) or a uniform wavelength spacing of the input spectra (slow).

        Returns
        -------
        species.core.box.ModelBox
            Box with the model spectrum.
        """

        if smooth and spec_res is None:
            warnings.warn('The \'spec_res\' argument is required for smoothing the spectrum when '
                          '\'smooth\' is set to True.')

        grid_bounds = self.get_bounds()

        extra_param = ['radius', 'distance', 'mass', 'luminosity', 'lognorm_radius',
                       'lognorm_sigma', 'lognorm_ext', 'ism_ext', 'ism_red', 'powerlaw_max',
                       'powerlaw_exp', 'powerlaw_ext']

        for key in self.get_parameters():
            if key not in model_param.keys():
                raise ValueError(f'The \'{key}\' parameter is required by \'{self.model}\'. '
                                 f'The mandatory parameters are {self.get_parameters()}.')

            if model_param[key] < grid_bounds[key][0]:
                raise ValueError(f'The input value of \'{key}\' is smaller than the lower '
                                 f'boundary of the model grid ({model_param[key]} < '
                                 f'{grid_bounds[key][0]}).')

            if model_param[key] > grid_bounds[key][1]:
                raise ValueError(f'The input value of \'{key}\' is larger than the upper '
                                 f'boundary of the model grid ({model_param[key]} > '
                                 f'{grid_bounds[key][1]}).')

        for key in model_param.keys():
            if key not in self.get_parameters() and key not in extra_param:
                warnings.warn(f'The \'{key}\' parameter is not required by \'{self.model}\' so '
                              f'the parameter will be ignored. The mandatory parameters are '
                              f'{self.get_parameters()}.')

        if 'mass' in model_param and 'radius' not in model_param:
            mass = 1e3 * model_param['mass'] * constants.M_JUP  # (g)
            radius = math.sqrt(1e3 * constants.GRAVITY * mass / (10.**model_param['logg']))  # (cm)
            model_param['radius'] = 1e-2 * radius / constants.R_JUP  # (Rjup)

        if self.spectrum_interp is None:
            self.interpolate_model()

        if self.wavel_range is None:
            wl_points = self.get_wavelengths()
            self.wavel_range = (wl_points[0], wl_points[-1])

        parameters = []

        if 'teff' in model_param:
            parameters.append(model_param['teff'])

        if 'logg' in model_param:
            parameters.append(model_param['logg'])

        if 'feh' in model_param:
            parameters.append(model_param['feh'])

        if 'co' in model_param:
            parameters.append(model_param['co'])

        if 'fsed' in model_param:
            parameters.append(model_param['fsed'])

        flux = self.spectrum_interp(parameters)[0]

        if 'radius' in model_param:
            model_param['mass'] = read_util.get_mass(model_param['logg'], model_param['radius'])

            if 'distance' in model_param:
                scaling = (model_param['radius']*constants.R_JUP)**2 / \
                          (model_param['distance']*constants.PARSEC)**2

                flux *= scaling

        if smooth:
            flux = read_util.smooth_spectrum(wavelength=self.wl_points,
                                             flux=flux,
                                             spec_res=spec_res)

        if wavel_resample is not None:
            flux = spectres.spectres(wavel_resample,
                                     self.wl_points,
                                     flux,
                                     spec_errs=None,
                                     fill=np.nan,
                                     verbose=True)

        elif spec_res is not None and not smooth:
            index = np.where(np.isnan(flux))[0]

            if index.size > 0:
                raise ValueError('Flux values should not contains NaNs. Please make sure that '
                                 'the parameter values and the wavelength range are within '
                                 'the grid boundaries as stored in the database.')

            wavel_resample = read_util.create_wavelengths(
                (self.wl_points[0], self.wl_points[-1]), spec_res)

            indices = np.where((wavel_resample > self.wl_points[0]) &
                               (wavel_resample < self.wl_points[-2]))[0]

            wavel_resample = wavel_resample[indices]

            flux = spectres.spectres(wavel_resample,
                                     self.wl_points,
                                     flux,
                                     spec_errs=None,
                                     fill=np.nan,
                                     verbose=True)

        if magnitude:
            quantity = 'magnitude'

            with h5py.File(self.database, 'r') as h5_file:
                try:
                    h5_file['spectra/calibration/vega']

                except KeyError:
                    h5_file.close()
                    species_db = database.Database()
                    species_db.add_spectrum('vega')
                    h5_file = h5py.File(self.database, 'r')

            readcalib = read_calibration.ReadCalibration('vega', filter_name=None)
            calibbox = readcalib.get_spectrum()

            if wavel_resample is not None:
                new_spec_wavs = wavel_resample
            else:
                new_spec_wavs = self.wl_points

            flux_vega, _ = spectres.spectres(new_spec_wavs,
                                             calibbox.wavelength,
                                             calibbox.flux,
                                             spec_errs=calibbox.error,
                                             fill=np.nan,
                                             verbose=True)

            flux = -2.5*np.log10(flux/flux_vega)

        else:
            quantity = 'flux'

        if np.isnan(np.sum(flux)):
            warnings.warn(f'The resampled spectrum contains {np.sum(np.isnan(flux))} NaNs, '
                          f'probably because the original wavelength range does not fully '
                          f'encompass the new wavelength range. The happened with the '
                          f'following parameters: {model_param}.')

        if wavel_resample is None:
            wavelength = self.wl_points
        else:
            wavelength = wavel_resample

        # is_finite = np.where(np.isfinite(flux))[0]
        #
        # if wavel_resample is None:
        #     wavelength = self.wl_points[is_finite]
        # else:
        #     wavelength = wavel_resample[is_finite]
        #
        # if wavelength.shape[0] == 0:
        #     raise ValueError(f'The model spectrum is empty. Perhaps the grid could not be '
        #                      f'interpolated at {model_param} because zeros are stored in the '
        #                      f'database.')

        model_box = box.create_box(boxtype='model',
                                   model=self.model,
                                   wavelength=wavelength,
                                   flux=flux,
                                   parameters=model_param,
                                   quantity=quantity)

        if 'lognorm_radius' in model_param and 'lognorm_sigma' in model_param and \
                'lognorm_ext' in model_param:

            model_box.flux = self.apply_lognorm_ext(model_box.wavelength,
                                                    model_box.flux,
                                                    model_param['lognorm_radius'],
                                                    model_param['lognorm_sigma'],
                                                    model_param['lognorm_ext'])

        if 'powerlaw_max' in model_param and 'powerlaw_exp' in model_param and \
                'powerlaw_ext' in model_param:

            model_box.flux = self.apply_powerlaw_ext(model_box.wavelength,
                                                     model_box.flux,
                                                     model_param['powerlaw_max'],
                                                     model_param['powerlaw_exp'],
                                                     model_param['powerlaw_ext'])

        if 'ism_ext' in model_param and 'ism_red' in model_param:

            model_box.flux = self.apply_ism_ext(model_box.wavelength,
                                                model_box.flux,
                                                model_param['ism_ext'],
                                                model_param['ism_red'])

        if 'radius' in model_box.parameters:
            model_box.parameters['luminosity'] = 4. * np.pi * (
                model_box.parameters['radius'] * constants.R_JUP)**2 * constants.SIGMA_SB * \
                model_box.parameters['teff']**4. / constants.L_SUN  # (Lsun)

        return model_box
예제 #6
0
        def lnlike_multinest(cube, n_dim: int, n_param: int) -> np.float64:
            """
            Function for the logarithm of the likelihood, computed from the parameter cube.

            Parameters
            ----------
            cube : pymultinest.run.LP_c_double
                Unit cube.
            n_dim : int
                Number of dimensions.
            n_param : int
                Number of parameters.

            Returns
            -------
            float
                Log likelihood.
            """

            param_dict = {}
            spec_scaling = {}
            err_offset = {}
            corr_len = {}
            corr_amp = {}
            dust_param = {}

            for item in self.bounds:
                if item[:8] == 'scaling_' and item[8:] in self.spectrum:
                    spec_scaling[item[8:]] = cube[cube_index[item]]

                elif item[:6] == 'error_' and item[6:] in self.spectrum:
                    err_offset[item[6:]] = cube[cube_index[item]]  # log10(um)

                elif item[:9] == 'corr_len_' and item[9:] in self.spectrum:
                    corr_len[item[9:]] = 10.**cube[cube_index[item]]  # (um)

                elif item[:9] == 'corr_amp_' and item[9:] in self.spectrum:
                    corr_amp[item[9:]] = cube[cube_index[item]]

                elif item[:8] == 'lognorm_':
                    dust_param[item] = cube[cube_index[item]]

                elif item[:9] == 'powerlaw_':
                    dust_param[item] = cube[cube_index[item]]

                elif item[:4] == 'ism_':
                    dust_param[item] = cube[cube_index[item]]

                else:
                    param_dict[item] = cube[cube_index[item]]

            if self.model == 'planck':
                param_dict['distance'] = self.distance[0]

            else:
                flux_scaling = (param_dict['radius']*constants.R_JUP)**2 / \
                               (self.distance[0]*constants.PARSEC)**2

                # The scaling is applied manually because of the interpolation
                del param_dict['radius']

            for item in self.spectrum:
                if item not in spec_scaling:
                    spec_scaling[item] = 1.

                if item not in err_offset:
                    err_offset[item] = None

            ln_like = 0.

            if self.model == 'planck' and self.n_planck > 1:
                for i in range(self.n_planck - 1):
                    if param_dict[f'teff_{i+1}'] > param_dict[f'teff_{i}']:
                        return -np.inf

                    if param_dict[f'radius_{i}'] > param_dict[f'radius_{i+1}']:
                        return -np.inf

            if prior is not None:
                for key, value in prior.items():
                    if key == 'mass':
                        mass = read_util.get_mass(cube[cube_index['logg']],
                                                  cube[cube_index['radius']])

                        ln_like += -0.5 * (mass - value[0])**2 / value[1]**2

                    else:
                        ln_like += -0.5 * (cube[cube_index[key]] -
                                           value[0])**2 / value[1]**2

            if 'lognorm_ext' in dust_param:
                cross_tmp = self.cross_sections['Generic/Bessell.V'](
                    dust_param['lognorm_sigma'],
                    10.**dust_param['lognorm_radius'])[0]

                n_grains = dust_param[
                    'lognorm_ext'] / cross_tmp / 2.5 / np.log10(np.exp(1.))

            elif 'powerlaw_ext' in dust_param:
                cross_tmp = self.cross_sections['Generic/Bessell.V'](
                    dust_param['powerlaw_exp'],
                    10.**dust_param['powerlaw_max'])

                n_grains = dust_param[
                    'powerlaw_ext'] / cross_tmp / 2.5 / np.log10(np.exp(1.))

            for i, obj_item in enumerate(self.objphot):
                if self.model == 'planck':
                    readplanck = read_planck.ReadPlanck(
                        filter_name=self.modelphot[i].filter_name)
                    phot_flux = readplanck.get_flux(
                        param_dict, synphot=self.modelphot[i])[0]

                else:
                    phot_flux = self.modelphot[i].spectrum_interp(
                        list(param_dict.values()))[0][0]
                    phot_flux *= flux_scaling

                if 'lognorm_ext' in dust_param:
                    cross_tmp = self.cross_sections[
                        self.modelphot[i].filter_name](
                            dust_param['lognorm_sigma'],
                            10.**dust_param['lognorm_radius'])[0]

                    phot_flux *= np.exp(-cross_tmp * n_grains)

                elif 'powerlaw_ext' in dust_param:
                    cross_tmp = self.cross_sections[
                        self.modelphot[i].filter_name](
                            dust_param['powerlaw_exp'],
                            10.**dust_param['powerlaw_max'])[0]

                    phot_flux *= np.exp(-cross_tmp * n_grains)

                elif 'ism_ext' in dust_param:
                    read_filt = read_filter.ReadFilter(
                        self.modelphot[i].filter_name)
                    filt_wavel = np.array([read_filt.mean_wavelength()])

                    ext_filt = dust_util.ism_extinction(
                        dust_param['ism_ext'], dust_param['ism_red'],
                        filt_wavel)

                    phot_flux *= 10.**(-0.4 * ext_filt[0])

                if obj_item.ndim == 1:
                    ln_like += -0.5 * (obj_item[0] -
                                       phot_flux)**2 / obj_item[1]**2

                else:
                    for j in range(obj_item.shape[1]):
                        ln_like += -0.5 * (obj_item[0, j] -
                                           phot_flux)**2 / obj_item[1, j]**2

            for i, item in enumerate(self.spectrum.keys()):
                data_flux = spec_scaling[item] * self.spectrum[item][0][:, 1]

                if err_offset[item] is None:
                    data_var = self.spectrum[item][0][:, 2]**2
                else:
                    data_var = (self.spectrum[item][0][:, 2] +
                                10.**err_offset[item])**2

                if self.spectrum[item][2] is not None:
                    if err_offset[item] is None:
                        data_cov_inv = self.spectrum[item][2]

                    else:
                        # Ratio of the inflated and original uncertainties
                        sigma_ratio = np.sqrt(
                            data_var) / self.spectrum[item][0][:, 2]
                        sigma_j, sigma_i = np.meshgrid(sigma_ratio,
                                                       sigma_ratio)

                        # Calculate the inversion of the infalted covariances
                        data_cov_inv = np.linalg.inv(self.spectrum[item][1] *
                                                     sigma_i * sigma_j)

                if self.model == 'planck':
                    readplanck = read_planck.ReadPlanck(
                        (0.9 * self.spectrum[item][0][0, 0],
                         1.1 * self.spectrum[item][0][-1, 0]))

                    model_box = readplanck.get_spectrum(param_dict,
                                                        1000.,
                                                        smooth=True)

                    model_flux = spectres.spectres(
                        self.spectrum[item][0][:, 0], model_box.wavelength,
                        model_box.flux)

                else:
                    model_flux = self.modelspec[i].spectrum_interp(
                        list(param_dict.values()))[0, :]
                    model_flux *= flux_scaling

                if 'lognorm_ext' in dust_param:
                    for j, cross_item in enumerate(self.cross_sections[item]):
                        cross_tmp = cross_item(
                            dust_param['lognorm_sigma'],
                            10.**dust_param['lognorm_radius'])[0]

                        model_flux[j] *= np.exp(-cross_tmp * n_grains)

                elif 'powerlaw_ext' in dust_param:
                    for j, cross_item in enumerate(self.cross_sections[item]):
                        cross_tmp = cross_item(
                            dust_param['powerlaw_exp'],
                            10.**dust_param['powerlaw_max'])[0]

                        model_flux[j] *= np.exp(-cross_tmp * n_grains)

                elif 'ism_ext' in dust_param:
                    ext_filt = dust_util.ism_extinction(
                        dust_param['ism_ext'], dust_param['ism_red'],
                        self.spectrum[item][0][:, 0])

                    model_flux *= 10.**(-0.4 * ext_filt)

                if self.spectrum[item][2] is not None:
                    # Use the inverted covariance matrix
                    dot_tmp = np.dot(
                        data_flux - model_flux,
                        np.dot(data_cov_inv, data_flux - model_flux))

                    ln_like += -0.5 * dot_tmp - 0.5 * np.nansum(
                        np.log(2. * np.pi * data_var))

                else:
                    if item in self.fit_corr:
                        # Covariance model (Wang et al. 2020)
                        wavel = self.spectrum[item][0][:, 0]  # (um)
                        wavel_j, wavel_i = np.meshgrid(wavel, wavel)

                        error = np.sqrt(data_var)  # (W m-2 um-1)
                        error_j, error_i = np.meshgrid(error, error)

                        cov_matrix = corr_amp[item]**2 * error_i * error_j * \
                            np.exp(-(wavel_i-wavel_j)**2 / (2.*corr_len[item]**2)) + \
                            (1.-corr_amp[item]**2) * np.eye(wavel.shape[0])*error_i**2

                        dot_tmp = np.dot(
                            data_flux - model_flux,
                            np.dot(np.linalg.inv(cov_matrix),
                                   data_flux - model_flux))

                        ln_like += -0.5 * dot_tmp - 0.5 * np.nansum(
                            np.log(2. * np.pi * data_var))

                    else:
                        # Calculate the chi-square without a covariance matrix
                        ln_like += np.nansum(
                            -0.5 * (data_flux - model_flux)**2 / data_var -
                            0.5 * np.log(2. * np.pi * data_var))

            return ln_like
예제 #7
0
    def get_model(self, model_par, specres=None):
        """
        Parameters
        ----------
        model_par : dict
            Model parameter values.
        specres : float
            Spectral resolution, achieved by smoothing with a Gaussian kernel. The original
            wavelength points are used if set to None. Using a high spectral resolution is
            computationally faster if the original wavelength grid has a fine sampling.

        Returns
        -------
        species.core.box.ModelBox
            Box with the model spectrum.
        """

        if 'mass' in model_par:
            mass = 1e3 * model_par['mass'] * constants.M_JUP  # [g]
            radius = math.sqrt(1e3 * constants.GRAVITY * mass /
                               (10.**model_par['logg']))  # [cm]
            model_par['radius'] = 1e-2 * radius / constants.R_JUP  # [Rjup]

        if self.spectrum_interp is None:
            self.interpolate()

        if self.wavelength is None:
            wl_points = self.get_wavelength()
            self.wavelength = (wl_points[0], wl_points[-1])

        if self.model in ('drift-phoenix', 'bt-nextgen',
                          'petitcode_warm_clear'):
            parameters = [
                model_par['teff'], model_par['logg'], model_par['feh']
            ]

        elif self.model in ('bt-settl', 'ames-dusty', 'ames-cond'):
            parameters = [model_par['teff'], model_par['logg']]

        elif self.model == 'petitcode_warm_cloudy':
            parameters = [
                model_par['teff'], model_par['logg'], model_par['feh'],
                model_par['fsed']
            ]

        elif self.model == 'petitcode_hot_clear':
            parameters = [
                model_par['teff'], model_par['logg'], model_par['feh'],
                model_par['co']
            ]

        elif self.model == 'petitcode_hot_cloudy':
            parameters = [
                model_par['teff'], model_par['logg'], model_par['feh'],
                model_par['co'], model_par['fsed']
            ]

        flux = self.spectrum_interp(parameters)[0]

        if 'radius' in model_par:
            model_par['mass'] = read_util.get_mass(model_par)

            if 'distance' in model_par:
                scaling = (model_par['radius']*constants.R_JUP)**2 / \
                          (model_par['distance']*constants.PARSEC)**2

                flux *= scaling

        if specres is not None:
            index = np.where(np.isnan(flux))[0]

            if index.size > 0:
                raise ValueError('Flux values should not contains NaNs.')

            flux = read_util.smooth_spectrum(wavelength=self.wl_points,
                                             flux=flux,
                                             specres=specres,
                                             size=11)

        return box.create_box(boxtype='model',
                              model=self.model,
                              wavelength=self.wl_points,
                              flux=flux,
                              parameters=model_par)
예제 #8
0
    def get_model(
        self,
        model_param: Dict[str, float],
        quenching: Optional[str] = None,
        spec_res: Optional[float] = None,
        wavel_resample: Optional[np.ndarray] = None,
        plot_contribution: Optional[Union[bool, str]] = False,
        temp_nodes: Optional[int] = None,
    ) -> box.ModelBox:
        """
        Function for calculating a model spectrum with
        ``petitRADTRANS``.

        Parameters
        ----------
        model_param : dict
            Dictionary with the model parameters and values.
        quenching : str, None
            Quenching type for CO/CH4/H2O abundances. Either the
            quenching pressure (bar) is a free parameter
            (``quenching='pressure'``) or the quenching pressure is
            calculated from the mixing and chemical timescales
            (``quenching='diffusion'``). The quenching is not applied
            if the argument is set to ``None``.
        spec_res : float, None
            Spectral resolution, achieved by smoothing with a Gaussian
            kernel. No smoothing is applied when the argument is set to
            ``None``.
        wavel_resample : np.ndarray, None
            Wavelength points (um) to which the spectrum will be
            resampled. The original wavelengths points will be used if
            the argument is set to ``None``.
        plot_contribution : bool, str, None
            Filename for the plot with the emission contribution. The
            plot is not created if the argument is set to ``False`` or
            ``None``. If set to ``True``, the plot is shown in an
            interface window instead of written to a file.
        temp_nodes : int, None
            Number of free temperature nodes.

        Returns
        -------
        species.core.box.ModelBox
            Box with the petitRADTRANS model spectrum.
        """

        # Set chemistry type

        if "metallicity" in model_param and "c_o_ratio" in model_param:
            chemistry = "equilibrium"

        else:
            chemistry = "free"

            # Check if all line species from the Radtrans object
            # are also present in the model_param dictionary

            for item in self.line_species:
                if item not in model_param:
                    raise RuntimeError(f"The abundance of {item} is not found "
                                       f"in the dictionary with parameters of "
                                       f"'model_param'. Please add the log10 "
                                       f"mass fraction of {item}.")

        # Check quenching parameter

        if not hasattr(self, "quenching"):
            self.quenching = quenching

        if self.quenching is not None and chemistry != "equilibrium":
            raise ValueError(
                "The 'quenching' parameter can only be used in combination with "
                "chemistry='equilibrium'.")

        if self.quenching is not None and self.quenching not in [
                "pressure",
                "diffusion",
        ]:
            raise ValueError(
                "The argument of 'quenching' should be of the following: "
                "'pressure', 'diffusion', or None.")

        # C/O and [Fe/H]

        if chemistry == "equilibrium":
            # Equilibrium chemistry
            metallicity = model_param["metallicity"]
            c_o_ratio = model_param["c_o_ratio"]

            log_x_abund = None

        elif chemistry == "free":
            # Free chemistry

            # TODO Set [Fe/H] = 0 for Molliere P-T profile and
            # cloud condensation profiles
            metallicity = 0.0

            # Create a dictionary with the mass fractions

            log_x_abund = {}

            for item in self.line_species:
                log_x_abund[item] = model_param[item]

            _, _, c_o_ratio = retrieval_util.calc_metal_ratio(log_x_abund)

        # Create the P-T profile

        if self.pressure_grid == "manual":
            temp = self.pt_manual[:, 1]

        elif ("tint" in model_param and "log_delta" in model_param
              and "alpha" in model_param):
            temp, _, _ = retrieval_util.pt_ret_model(
                np.array(
                    [model_param["t1"], model_param["t2"], model_param["t3"]]),
                10.0**model_param["log_delta"],
                model_param["alpha"],
                model_param["tint"],
                self.pressure,
                metallicity,
                c_o_ratio,
            )

        elif "tint" in model_param and "log_delta" in model_param:
            tau = self.pressure * 1e6 * 10.0**model_param["log_delta"]
            temp = (0.75 * model_param["tint"]**4.0 * (2.0 / 3.0 + tau))**0.25

        else:
            if temp_nodes is None:
                temp_nodes = 0

                for i in range(100):
                    if f"t{i}" in model_param:
                        temp_nodes += 1
                    else:
                        break

            knot_press = np.logspace(np.log10(self.pressure[0]),
                                     np.log10(self.pressure[-1]), temp_nodes)

            knot_temp = []
            for i in range(temp_nodes):
                knot_temp.append(model_param[f"t{i}"])

            knot_temp = np.asarray(knot_temp)

            if "pt_smooth" in model_param:
                pt_smooth = model_param["pt_smooth"]

            else:
                pt_smooth = None

            temp = retrieval_util.pt_spline_interp(
                knot_press,
                knot_temp,
                self.pressure,
                pt_smooth=pt_smooth,
            )

        # Set the log quenching pressure, log(P/bar)

        if self.quenching == "pressure":
            p_quench = 10.0**model_param["log_p_quench"]

        elif self.quenching == "diffusion":
            p_quench = retrieval_util.quench_pressure(
                self.pressure,
                temp,
                model_param["metallicity"],
                model_param["c_o_ratio"],
                model_param["logg"],
                model_param["log_kzz"],
            )

        else:
            if "log_p_quench" in model_param:
                warnings.warn("The 'model_param' dictionary contains the "
                              "'log_p_quench' parameter but 'quenching=None'. "
                              "The quenching pressure from the dictionary is "
                              "therefore ignored.")

            p_quench = None

        if (len(self.cloud_species) > 0 or "log_kappa_0" in model_param
                or "log_kappa_gray" in model_param
                or "log_kappa_abs" in model_param):

            tau_cloud = None
            log_x_base = None

            if ("log_kappa_0" in model_param or "log_kappa_gray" in model_param
                    or "log_kappa_abs" in model_param):
                if "log_tau_cloud" in model_param:
                    tau_cloud = 10.0**model_param["log_tau_cloud"]

                elif "tau_cloud" in model_param:
                    tau_cloud = model_param["tau_cloud"]

            elif chemistry == "equilibrium":
                # Create the dictionary with the mass fractions of the
                # clouds relative to the maximum values allowed from
                # elemental abundances

                cloud_fractions = {}

                for item in self.cloud_species:

                    if f"{item[:-3].lower()}_fraction" in model_param:
                        cloud_fractions[item] = model_param[
                            f"{item[:-3].lower()}_fraction"]

                    elif f"{item[:-3].lower()}_tau" in model_param:
                        # Import the chemistry module here because it is slow

                        from poor_mans_nonequ_chem.poor_mans_nonequ_chem import (
                            interpol_abundances, )

                        # Interpolate the abundances, following chemical equilibrium

                        abund_in = interpol_abundances(
                            np.full(self.pressure.size, c_o_ratio),
                            np.full(self.pressure.size, metallicity),
                            temp,
                            self.pressure,
                            Pquench_carbon=p_quench,
                        )

                        # Extract the mean molecular weight

                        mmw = abund_in["MMW"]

                        # Calculate the scaled mass fraction of the clouds

                        cloud_fractions[
                            item] = retrieval_util.scale_cloud_abund(
                                model_param,
                                self.rt_object,
                                self.pressure,
                                temp,
                                mmw,
                                "equilibrium",
                                abund_in,
                                item,
                                model_param[f"{item[:-3].lower()}_tau"],
                                pressure_grid=self.pressure_grid,
                            )

                if "log_tau_cloud" in model_param:
                    # Set the log mass fraction to zero and use the
                    # optical depth parameter to scale the cloud mass
                    # fraction with petitRADTRANS

                    tau_cloud = 10.0**model_param["log_tau_cloud"]

                elif "tau_cloud" in model_param:
                    # Set the log mass fraction to zero and use the
                    # optical depth parameter to scale the cloud mass
                    # fraction with petitRADTRANS

                    tau_cloud = model_param["tau_cloud"]

                if tau_cloud is not None:
                    for i, item in enumerate(self.cloud_species):
                        if i == 0:
                            cloud_fractions[item] = 0.0

                        else:
                            cloud_1 = item[:-3].lower()
                            cloud_2 = self.cloud_species[0][:-3].lower()

                            cloud_fractions[item] = model_param[
                                f"{cloud_1}_{cloud_2}_ratio"]

                # Create a dictionary with the log mass fractions at the cloud base

                log_x_base = retrieval_util.log_x_cloud_base(
                    c_o_ratio, metallicity, cloud_fractions)

            elif chemistry == "free":
                # Add the log10 mass fractions of the clouds to the dictionary

                log_x_base = {}

                if "log_tau_cloud" in model_param:
                    # Set the log mass fraction to zero and use the
                    # optical depth parameter to scale the cloud mass
                    # fraction with petitRADTRANS

                    tau_cloud = 10.0**model_param["log_tau_cloud"]

                elif "tau_cloud" in model_param:
                    # Set the log mass fraction to zero and use the
                    # optical depth parameter to scale the cloud mass
                    # fraction with petitRADTRANS

                    tau_cloud = model_param["tau_cloud"]

                if tau_cloud is None:
                    for item in self.cloud_species:
                        # Set the log10 of the mass fractions at th
                        # cloud base equal to the value from the
                        # parameter dictionary
                        log_x_base[item[:-3]] = model_param[item]

                else:
                    # Set the log10 of the mass fractions with the
                    # ratios from the parameter dictionary and
                    # scale to the actual mass fractions with
                    # tau_cloud that is used in calc_spectrum_clouds
                    for i, item in enumerate(self.cloud_species):
                        if i == 0:
                            log_x_base[item[:-3]] = 0.0

                        else:
                            cloud_1 = item[:-3].lower()
                            cloud_2 = self.cloud_species[0][:-3].lower()

                            log_x_base[item[:-3]] = model_param[
                                f"{cloud_1}_{cloud_2}_ratio"]

            # Calculate the petitRADTRANS spectrum
            # for a cloudy atmosphere

            if "fsed_1" in model_param and "fsed_2" in model_param:
                cloud_dict = model_param.copy()
                cloud_dict["fsed"] = cloud_dict["fsed_1"]

                (
                    wavelength,
                    flux_1,
                    emission_contr_1,
                    _,
                ) = retrieval_util.calc_spectrum_clouds(
                    self.rt_object,
                    self.pressure,
                    temp,
                    c_o_ratio,
                    metallicity,
                    p_quench,
                    log_x_abund,
                    log_x_base,
                    cloud_dict,
                    model_param["logg"],
                    chemistry=chemistry,
                    pressure_grid=self.pressure_grid,
                    plotting=False,
                    contribution=True,
                    tau_cloud=tau_cloud,
                    cloud_wavel=self.cloud_wavel,
                )

                cloud_dict = model_param.copy()
                cloud_dict["fsed"] = cloud_dict["fsed_2"]

                (
                    wavelength,
                    flux_2,
                    emission_contr_2,
                    _,
                ) = retrieval_util.calc_spectrum_clouds(
                    self.rt_object,
                    self.pressure,
                    temp,
                    c_o_ratio,
                    metallicity,
                    p_quench,
                    log_x_abund,
                    log_x_base,
                    cloud_dict,
                    model_param["logg"],
                    chemistry=chemistry,
                    pressure_grid=self.pressure_grid,
                    plotting=False,
                    contribution=True,
                    tau_cloud=tau_cloud,
                    cloud_wavel=self.cloud_wavel,
                )

                flux = (model_param["f_clouds"] * flux_1 +
                        (1.0 - model_param["f_clouds"]) * flux_2)

                emission_contr = (
                    model_param["f_clouds"] * emission_contr_1 +
                    (1.0 - model_param["f_clouds"]) * emission_contr_2)

            else:
                (
                    wavelength,
                    flux,
                    emission_contr,
                    _,
                ) = retrieval_util.calc_spectrum_clouds(
                    self.rt_object,
                    self.pressure,
                    temp,
                    c_o_ratio,
                    metallicity,
                    p_quench,
                    log_x_abund,
                    log_x_base,
                    model_param,
                    model_param["logg"],
                    chemistry=chemistry,
                    pressure_grid=self.pressure_grid,
                    plotting=False,
                    contribution=True,
                    tau_cloud=tau_cloud,
                    cloud_wavel=self.cloud_wavel,
                )

        elif chemistry == "equilibrium":
            # Calculate the petitRADTRANS spectrum for a clear atmosphere

            wavelength, flux, emission_contr = retrieval_util.calc_spectrum_clear(
                self.rt_object,
                self.pressure,
                temp,
                model_param["logg"],
                model_param["c_o_ratio"],
                model_param["metallicity"],
                p_quench,
                None,
                pressure_grid=self.pressure_grid,
                chemistry=chemistry,
                contribution=True,
            )

        elif chemistry == "free":
            log_x_abund = {}

            for ab_item in self.rt_object.line_species:
                log_x_abund[ab_item] = model_param[ab_item]

            wavelength, flux, emission_contr = retrieval_util.calc_spectrum_clear(
                self.rt_object,
                self.pressure,
                temp,
                model_param["logg"],
                None,
                None,
                None,
                log_x_abund,
                chemistry=chemistry,
                pressure_grid=self.pressure_grid,
                contribution=True,
            )

        if "radius" in model_param:
            # Calculate the planet mass from log(g) and radius

            model_param["mass"] = read_util.get_mass(model_param["logg"],
                                                     model_param["radius"])

            # Scale the flux to the observer

            if "parallax" in model_param:
                scaling = (model_param["radius"] * constants.R_JUP)**2 / (
                    1e3 * constants.PARSEC / model_param["parallax"])**2

                flux *= scaling

            elif "distance" in model_param:
                scaling = (model_param["radius"] * constants.R_JUP)**2 / (
                    model_param["distance"] * constants.PARSEC)**2

                flux *= scaling

        # Apply ISM extinction

        if "ism_ext" in model_param:
            if "ism_red" in model_param:
                ism_reddening = model_param["ism_red"]

            else:
                # Use default ISM reddening (R_V = 3.1) if ism_red is not provided
                ism_reddening = 3.1

            flux = dust_util.apply_ism_ext(wavelength, flux,
                                           model_param["ism_ext"],
                                           ism_reddening)

        # Plot 2D emission contribution

        if plot_contribution:
            # Calculate the total optical depth (line and continuum opacities)
            # self.rt_object.calc_opt_depth(10.**model_param['logg'])

            # From Paul: The first axis of total_tau is the coordinate
            # of the cumulative opacity distribution function (ranging
            # from 0 to 1). A correct average is obtained by
            # multiplying the first axis with self.w_gauss, then
            # summing them. This is then the actual wavelength-mean.

            if self.scattering:
                # From petitRADTRANS: Only use 0 index for species
                # because for lbl or test_ck_shuffle_comp = True
                # everything has been moved into the 0th index
                w_gauss = self.rt_object.w_gauss[..., np.newaxis, np.newaxis]
                optical_depth = np.sum(w_gauss *
                                       self.rt_object.total_tau[:, :, 0, :],
                                       axis=0)

            else:
                # TODO Is this correct?
                w_gauss = self.rt_object.w_gauss[..., np.newaxis, np.newaxis,
                                                 np.newaxis]
                optical_depth = np.sum(w_gauss *
                                       self.rt_object.total_tau[:, :, :, :],
                                       axis=0)

                # Sum over all species
                optical_depth = np.sum(optical_depth, axis=1)

            mpl.rcParams["font.serif"] = ["Bitstream Vera Serif"]
            mpl.rcParams["font.family"] = "serif"

            plt.rc("axes", edgecolor="black", linewidth=2.5)

            plt.figure(1, figsize=(8.0, 4.0))
            gridsp = mpl.gridspec.GridSpec(1, 1)
            gridsp.update(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)

            ax = plt.subplot(gridsp[0, 0])

            ax.tick_params(
                axis="both",
                which="major",
                colors="black",
                labelcolor="black",
                direction="in",
                width=1,
                length=5,
                labelsize=12,
                top=True,
                bottom=True,
                left=True,
                right=True,
            )

            ax.tick_params(
                axis="both",
                which="minor",
                colors="black",
                labelcolor="black",
                direction="in",
                width=1,
                length=3,
                labelsize=12,
                top=True,
                bottom=True,
                left=True,
                right=True,
            )

            ax.set_xlabel(r"Wavelength ($\mu$m)", fontsize=13)
            ax.set_ylabel("Pressure (bar)", fontsize=13)

            ax.get_xaxis().set_label_coords(0.5, -0.09)
            ax.get_yaxis().set_label_coords(-0.07, 0.5)

            ax.set_yscale("log")

            ax.xaxis.set_major_locator(MultipleLocator(1.0))
            ax.xaxis.set_minor_locator(MultipleLocator(0.2))

            press_bar = 1e-6 * self.rt_object.press  # (Ba) -> (Bar)

            xx_grid, yy_grid = np.meshgrid(wavelength, press_bar)

            ax.pcolormesh(
                xx_grid,
                yy_grid,
                emission_contr,
                cmap=plt.cm.bone_r,
                shading="gouraud",
            )

            photo_press = np.zeros(wavelength.shape[0])

            for i in range(photo_press.shape[0]):
                press_interp = interp1d(optical_depth[i, :],
                                        self.rt_object.press)
                photo_press[i] = press_interp(1.0) * 1e-6  # cgs to (bar)

            ax.plot(wavelength, photo_press, lw=0.5, color="gray")

            ax.set_xlim(np.amin(wavelength), np.amax(wavelength))
            ax.set_ylim(np.amax(press_bar), np.amin(press_bar))

            if isinstance(plot_contribution, str):
                plt.savefig(plot_contribution, bbox_inches="tight")
            else:
                plt.show()

            plt.clf()
            plt.close()

        # Convolve the spectrum with a Gaussian LSF

        if spec_res is not None:
            flux = retrieval_util.convolve(wavelength, flux, spec_res)

        # Resample the spectrum

        if wavel_resample is not None:
            flux = spectres.spectres(
                wavel_resample,
                wavelength,
                flux,
                spec_errs=None,
                fill=np.nan,
                verbose=True,
            )

            wavelength = wavel_resample

        if hasattr(self.rt_object, "h_bol"):
            pressure = 1e-6 * self.rt_object.press  # (bar)
            f_bol = -4.0 * np.pi * self.rt_object.h_bol
            f_bol *= 1e-3  # (erg s-1 cm-2) -> (W m-2)
            bol_flux = np.column_stack((pressure, f_bol))

        else:
            bol_flux = None

        return box.create_box(
            boxtype="model",
            model="petitradtrans",
            wavelength=wavelength,
            flux=flux,
            parameters=model_param,
            quantity="flux",
            contribution=emission_contr,
            bol_flux=bol_flux,
        )