def test_get_values_from_distribution():
    center, std = 1, 0.2
    np.random.seed(12345)
    n_distr = unc.normal(center * u.kpc, std=std * u.kpc, n_samples=100)
    result = _get_values_from_distribution(n_distr)
    assert center == pytest.approx(result['value'], abs=1e-2)
    assert std == pytest.approx(result['error'], abs=1e-2)
    assert 'kpc' == result['unit']

    np.random.seed(12345)
    n_distr = unc.normal(center, std=std, n_samples=100)
    result = _get_values_from_distribution(n_distr)
    assert center == pytest.approx(result['value'], abs=1e-2)
    assert std == pytest.approx(result['error'], abs=1e-2)
    assert result.get('unit', None) is None

    result = _get_values_from_distribution(n_distr, unit='kpc')
    assert 'kpc' == result['unit']

    distr = [
        1.20881063, 0.93766121, 1.20136033, 1.11122468, 0.88140548, 0.98529047,
        0.83750181, 0.95603778, 0.90262727, 0.76719971, 0.96954131, 0.83957612,
        1.05208742, 0.9203976, 0.5388856, 0.82028187, 0.99002746, 0.99821842,
        1.08264829, 0.88236597, 1.07393172, 0.68800062, 0.95087714, 0.95349601,
        1.20331926, 1.1427941, 1.13346843, 1.12862014, 1.32770298
    ]
    result = _get_values_from_distribution(distr)
    assert center == pytest.approx(result['value'], abs=5e-2)
    assert std == pytest.approx(result['error'], abs=5e-2)
Esempio n. 2
0
        nsteps, nwalkers = reader.get_log_prob().shape
        samples = reader.get_chain(discard=int(mcmc_burnfrac * nsteps),
                                   flat=True)

        avs_dist = unc.Distribution(samples[:, -1])
        av_per = avs_dist.pdf_percentiles([16.0, 50.0, 84.0])
        avs[k] = av_per[1]
        avs_unc[1, k] = av_per[2] - av_per[1]
        avs_unc[0, k] = av_per[1] - av_per[0]
        # print(avs_dist.pdf_percentiles([33., 50., 87.]))

        (indxs, ) = np.where((cext.waves["BAND"] > 0.4 * u.micron)
                             & (cext.waves["BAND"] < 0.5 * u.micron))
        ebvs_dist = unc.normal(
            cext.exts["BAND"][indxs[0]],
            std=cext.uncs["BAND"][indxs[0]],
            n_samples=avs_dist.n_samples,
        )
        ebvs[k] = ebvs_dist.pdf_mean()
        ebvs_unc[k] = ebvs_dist.pdf_std()

        rvs_dist = avs_dist / ebvs_dist
        rv_per = rvs_dist.pdf_percentiles([16.0, 50.0, 84.0])
        rvs[k] = rv_per[1]
        rvs_unc[1, k] = rv_per[2] - rv_per[1]
        rvs_unc[0, k] = rv_per[1] - rv_per[0]

        (indxs, ) = np.where((cext.waves["BAND"] > 2.1 * u.micron)
                             & (cext.waves["BAND"] < 2.3 * u.micron))
        # print(cext.waves["BAND"][indxs[0]])
        ekvs_dist = unc.normal(
def fit_spex_ext(
    starpair,
    path,
    functype="pow",
    dense=False,
    profile="drude_asym",
    exclude=None,
    bootstrap=False,
    fixed=False,
):
    """
    Fit the observed SpeX NIR extinction curve

    Parameters
    ----------
    starpair : string
        Name of the star pair for which to fit the extinction curve, in the format "reddenedstarname_comparisonstarname" (no spaces), or "average" to fit the average extinction curve

    path : string
        Path to the data files

    functype : string [default="pow"]
        Fitting function type ("pow" for powerlaw or "pol" for polynomial)

    dense : boolean [default=False]
        Whether or not to fit the features around 3 and 3.4 micron

    profile : string [default="drude_asym"]
        Profile to use for the features if dense = True (options are "gauss", "drude", "lorentz", "gauss_asym", "drude_asym", "lorentz_asym")

    exclude : list of tuples [default=None]
        list of tuples (min,max) with wavelength regions (in micron) that need to be excluded from the fitting, e.g. [(0.8,1.2),(2.2,5)]

    bootstrap : boolean [default=False]
        Whether or not to do a quick bootstrap fitting to get more realistic uncertainties on the fitting results

    fixed : boolean [default=False]
        Whether or not to add a fixed feature around 3 micron (for diffuse sightlines)

    Returns
    -------
    Updates extdata.model["type", "waves", "exts", "residuals", "chi2", "params"] and extdata.columns["AV"] with the fitting results:
        - type: string with the type of model (e.g. "pow_elx_Drude")
        - waves: np.ndarray with the SpeX wavelengths
        - exts: np.ndarray with the fitted model to the extinction curve at "waves" wavelengths
        - residuals: np.ndarray with the residuals, i.e. data-fit, at "waves" wavelengths
        - chi2 : float with the chi square of the fitting
        - params: list with output Parameter objects
    """
    # retrieve the SpeX data to be fitted, and sort the curve from short to long wavelengths
    filename = "%s%s_ext.fits" % (path, starpair.lower())
    if fixed:
        filename = filename.replace(".", "_ice.")
    extdata = ExtData(filename)
    (waves, exts, exts_unc) = extdata.get_fitdata(["SpeX_SXD", "SpeX_LXD"])
    indx = np.argsort(waves)
    waves = waves[indx].value
    exts = exts[indx]
    exts_unc = exts_unc[indx]

    # exclude wavelength regions if requested
    if exclude:
        mask = np.full_like(waves, False, dtype=bool)
        for region in exclude:
            mask += (waves > region[0]) & (waves < region[1])
        waves = waves[~mask]
        exts = exts[~mask]
        exts_unc = exts_unc[~mask]

    # get a quick estimate of A(V)
    if extdata.type == "elx":
        extdata.calc_AV()
        AV_guess = extdata.columns["AV"]
    else:
        AV_guess = None

    # convert to A(lambda)/A(1 micron)
    # ind1 = np.abs(waves - 1).argmin()
    # exts = exts / exts[ind1]
    # exts_unc = exts_unc / exts[ind1]

    # obtain the function to fit
    if "SpeX_LXD" not in extdata.waves.keys():
        dense = False
        fixed = False
    func = fit_function(
        dattype=extdata.type,
        functype=functype,
        dense=dense,
        profile=profile,
        AV_guess=AV_guess,
        fixed=fixed,
    )

    # for dense sightlines, add more weight to the feature region
    weights = 1 / exts_unc
    if dense:
        mask_ice = (waves > 2.88) & (waves < 3.19)
        mask_tail = (waves > 3.4) & (waves < 4)
        weights[mask_ice + mask_tail] *= 2

    # use the Levenberg-Marquardt algorithm to fit the data with the model
    fit = LevMarLSQFitter()
    fit_result_lev = fit(func, waves, exts, weights=weights, maxiter=10000)

    # set up the backend to save the samples for the emcee runs
    emcee_samples_file = path + "Fitting_results/" + starpair + "_emcee_samples.h5"

    # do the fitting again, with MCMC, using the results from the first fitting as input
    fit2 = EmceeFitter(nsteps=10000, burnfrac=0.1, save_samples=emcee_samples_file)

    # add parameter bounds
    for param in fit_result_lev.param_names:
        if "amplitude" in param:
            getattr(fit_result_lev, param).bounds = (0, 2)
        elif "alpha" in param:
            getattr(fit_result_lev, param).bounds = (0, 4)
        elif "Av" in param:
            getattr(fit_result_lev, param).bounds = (0, 10)

    fit_result_mcmc = fit2(fit_result_lev, waves, exts, weights=weights)

    # create standard MCMC plots
    fit2.plot_emcee_results(
        fit_result_mcmc, filebase=path + "Fitting_results/" + starpair
    )

    # choose the fit result to save
    fit_result = fit_result_mcmc
    # fit_result = fit_result_lev
    print(fit_result)

    # determine the wavelengths at which to evaluate and save the fitted model curve: all SpeX wavelengths, sorted from short to long (to avoid problems with overlap between SXD and LXD), and shortest and longest wavelength should have data
    if "SpeX_LXD" not in extdata.waves.keys():
        full_waves = extdata.waves["SpeX_SXD"].value
        full_npts = extdata.npts["SpeX_SXD"]
    else:
        full_waves = np.concatenate(
            (extdata.waves["SpeX_SXD"].value, extdata.waves["SpeX_LXD"].value)
        )
        full_npts = np.concatenate((extdata.npts["SpeX_SXD"], extdata.npts["SpeX_LXD"]))
    # sort the wavelengths
    indxs_sort = np.argsort(full_waves)
    full_waves = full_waves[indxs_sort]
    full_npts = full_npts[indxs_sort]
    # cut the wavelength region
    indxs = np.logical_and(full_waves >= np.min(waves), full_waves <= np.max(waves))
    full_waves = full_waves[indxs]
    full_npts = full_npts[indxs]

    # calculate the residuals and put them in an array of the same length as "full_waves" for plotting
    residuals = exts - fit_result(waves)
    full_res = np.full_like(full_npts, np.nan)
    if exclude:
        mask = np.full_like(full_waves, False, dtype=bool)
        for region in exclude:
            mask += (full_waves > region[0]) & (full_waves < region[1])
        full_res[(full_npts > 0) * ~mask] = residuals

    else:
        full_res[(full_npts > 0)] = residuals

    # bootstrap to get more realistic uncertainties on the parameter results
    if bootstrap:
        red_star = StarData(extdata.red_file, path=path, use_corfac=True)
        comp_star = StarData(extdata.comp_file, path=path, use_corfac=True)
        red_V_unc = red_star.data["BAND"].get_band_mag("V")[1]
        comp_V_unc = comp_star.data["BAND"].get_band_mag("V")[1]
        unc_V = np.sqrt(red_V_unc ** 2 + comp_V_unc ** 2)
        fit_result_mcmc_low = fit2(fit_result_lev, waves, exts - unc_V, weights=weights)
        fit_result_mcmc_high = fit2(
            fit_result_lev, waves, exts + unc_V, weights=weights
        )

    # save the fitting results to the fits file
    if dense:
        functype += "_" + profile
    extdata.model["type"] = functype + "_" + extdata.type
    extdata.model["waves"] = full_waves
    extdata.model["exts"] = fit_result(full_waves)
    extdata.model["residuals"] = full_res
    extdata.model["chi2"] = np.sum((residuals / exts_unc) ** 2)
    print("Chi2", extdata.model["chi2"])
    extdata.model["params"] = []
    for param in fit_result.param_names:
        # update the uncertainties when bootstrapping
        if bootstrap:
            min_val = min(
                getattr(fit_result_mcmc, param).value,
                getattr(fit_result_mcmc_low, param).value,
                getattr(fit_result_mcmc_high, param).value,
            )
            max_val = max(
                getattr(fit_result_mcmc, param).value,
                getattr(fit_result_mcmc_low, param).value,
                getattr(fit_result_mcmc_high, param).value,
            )
            sys_unc = (max_val - min_val) / 2
            getattr(fit_result, param).unc_minus = np.sqrt(
                getattr(fit_result, param).unc_minus ** 2 + sys_unc ** 2
            )
            getattr(fit_result, param).unc_plus = np.sqrt(
                getattr(fit_result, param).unc_plus ** 2 + sys_unc ** 2
            )

        extdata.model["params"].append(getattr(fit_result, param))

        # save the column information (A(V), E(B-V) and R(V))
        if "Av" in param:
            extdata.columns["AV"] = (
                getattr(fit_result, param).value,
                getattr(fit_result, param).unc_minus,
                getattr(fit_result, param).unc_plus,
            )
            # calculate the distrubtion of R(V) and 1/R(V) from the distributions of A(V) and E(B-V)
            nsamples = getattr(fit_result, param).posterior.n_samples
            av_dist = unc.normal(
                extdata.columns["AV"][0],
                std=(extdata.columns["AV"][1] + extdata.columns["AV"][2]) / 2,
                n_samples=nsamples,
            )
            b_indx = np.abs(extdata.waves["BAND"] - 0.438 * u.micron).argmin()
            ebv_dist = unc.normal(
                extdata.exts["BAND"][b_indx],
                std=extdata.uncs["BAND"][b_indx],
                n_samples=nsamples,
            )
            ebv_per = ebv_dist.pdf_percentiles([16.0, 50.0, 84.0])
            extdata.columns["EBV"] = (
                ebv_per[1],
                ebv_per[1] - ebv_per[0],
                ebv_per[2] - ebv_per[1],
            )
            rv_dist = av_dist / ebv_dist
            rv_per = rv_dist.pdf_percentiles([16.0, 50.0, 84.0])
            extdata.columns["RV"] = (
                rv_per[1],
                rv_per[1] - rv_per[0],
                rv_per[2] - rv_per[1],
            )
            inv_rv_dist = ebv_dist / av_dist
            inv_rv_per = inv_rv_dist.pdf_percentiles([16.0, 50.0, 84.0])
            extdata.columns["IRV"] = (
                inv_rv_per[1],
                inv_rv_per[1] - inv_rv_per[0],
                inv_rv_per[2] - inv_rv_per[1],
            )
            print(extdata.columns)

    # save the fits file
    extdata.save(filename)

    # print information about the ice feature
    if fixed:
        print(
            "Ice feature strength: ",
            extdata.model["params"][3].value,
            extdata.model["params"][3].unc_minus,
            extdata.model["params"][3].unc_plus,
        )
Esempio n. 4
0
from astropy.cosmology import FlatLambdaCDM, Planck15
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.uncertainty as aun
import astropy.constants as ac
from uncertainties import ufloat

coords = SkyCoord('09h47m40.156s -30d56m55.44s', frame='fk5')

# from page 64 of
# https://ui.adsabs.harvard.edu/abs/2003AJ....126.2268W/abstract
z = ((2544 * u.km / u.s) / ac.c).to('')
deltaz = ((12 * u.km / u.s) / ac.c).to('')

# from
# http://simbad.u-strasbg.fr/simbad/sim-id?Ident=MCG-5-23-16
z2 = ((2466.09 * u.km / u.s) / ac.c).to('')
deltaz2 = ((47.97 * u.km / u.s) / ac.c).to('')

compat = ufloat(z, deltaz) - ufloat(z, deltaz2)

# cosmo = FlatLambdaCDM(H0=67.8, Om0=.308)
cosmo = Planck15

z_pdf = aun.normal(z.value, std=deltaz.value, n_samples=10_000)

dist = aun.Distribution(cosmo.comoving_distance(z_pdf.distribution))
def ESO280_params(PRINT=True):
    params_dict = {
        "eso_280_m_M_V": 17.011,
        "eso_280_e_m_M_V": 0.045,
        "eso_280_ebv": 0.141,
        "eso_280_e_ebv": 0.006,
        "rv": 94.644,
        "e_rv": 0.476,
        "std_rv": 2.305,
        "e_std_rv": 0.363,
        "r_t": [0.14693 * u.deg, 0.04126 * u.deg],
        "r_c": [0.00410 * u.deg, 0.00009 * u.deg]
    }
    n_samples = 10000
    eso_280_m_M_V_dist = unc.normal(params_dict['eso_280_m_M_V'],
                                    std=params_dict['eso_280_e_m_M_V'],
                                    n_samples=n_samples)
    eso_280_ebv_dist = unc.normal(params_dict['eso_280_ebv'],
                                  std=params_dict['eso_280_e_ebv'],
                                  n_samples=n_samples)
    eso_280_m_M_0_dist = eso_280_m_M_V_dist - 3.1 * eso_280_ebv_dist
    eso_280_dist_dist = unc.Distribution(
        10**(1 + eso_280_m_M_0_dist / 5).distribution * u.pc)

    # Hardcoded values. Calculated using velocity_estimate.py
    rv_dist = unc.normal(params_dict['rv'] * u.km / u.s,
                         std=params_dict['e_rv'] * u.km / u.s,
                         n_samples=n_samples)
    rv_std_dist = unc.normal(params_dict['std_rv'] * u.km / u.s,
                             std=params_dict['e_std_rv'] * u.km / u.s,
                             n_samples=10000)
    # Size values from ASteCA
    r_0_dist = unc.normal(params_dict['r_c'][0],
                          std=params_dict['r_c'][1],
                          n_samples=10000)
    r_t_dist = unc.normal(params_dict['r_t'][0],
                          std=params_dict['r_t'][1],
                          n_samples=10000)
    size_dist = (np.tan(r_0_dist) * eso_280_dist_dist)
    tidal_dist = (np.tan(r_t_dist) * eso_280_dist_dist)
    cluster_mass = ((7.5 * rv_std_dist**2 * 4 / 3 * size_dist) / const.G)

    sc_best = SkyCoord(ra=cluster_centre.ra,
                       dec=cluster_centre.dec,
                       radial_velocity=rv_dist.pdf_mean(),
                       distance=eso_280_dist_dist.pdf_mean(),
                       pm_ra_cosdec=-0.548 * u.mas / u.yr,
                       pm_dec=-2.688 * u.mas / u.yr)
    eso_280_pmra_dist = unc.normal(sc_best.pm_ra_cosdec,
                                   std=0.073 * u.mas / u.yr,
                                   n_samples=n_samples)
    eso_280_pmdec_dist = unc.normal(sc_best.pm_dec,
                                    std=0.052 * u.mas / u.yr,
                                    n_samples=n_samples)
    sc_dist = SkyCoord(
        ra=np.ones(eso_280_dist_dist.n_samples) * cluster_centre.ra,
        dec=np.ones(eso_280_dist_dist.n_samples) * cluster_centre.dec,
        radial_velocity=rv_dist.distribution,
        distance=eso_280_dist_dist.distribution,
        pm_ra_cosdec=eso_280_pmra_dist.distribution,
        pm_dec=eso_280_pmdec_dist.distribution)
    if PRINT:
        print(
            rf"$r_c$ & ${params_dict['r_c'][0].to(u.arcsec).value:0.2f}\pm{params_dict['r_c'][1].to(u.arcsec).value:0.2f}$~arcsec\\"
        )
        print(
            rf"$r_t$ & ${params_dict['r_t'][0].to(u.arcmin).value:0.2f}\pm{params_dict['r_t'][1].to(u.arcmin).value:0.2f}$~arcmin\\"
        )
        print(
            rf"$(m-M)_V$ & ${params_dict['eso_280_m_M_V']:0.2f}\pm{params_dict['eso_280_e_m_M_V']:0.2f}$\\"
        )
        print(
            rf"$\ebv$ & ${params_dict['eso_280_ebv']:0.2f}\pm{params_dict['eso_280_e_ebv']:0.2f}$\\"
        )
        print(
            rf"$(m-M)_0$ & ${eso_280_m_M_0_dist.pdf_mean:0.2f}\pm{eso_280_m_M_0_dist.pdf_std:0.2f}$\\"
        )
        print(
            rf"$d_\odot$ & ${eso_280_dist_dist.pdf_mean.to(u.kpc).value:0.1f}\pm{eso_280_dist_dist.pdf_std.to(u.kpc).value:0.1f}$~kpc\\"
        )
        print(
            rf"$r_c$ & ${size_dist.pdf_mean.to(u.pc).value:0.2f}\pm{size_dist.pdf_std.to(u.pc).value:0.2f}$~pc\\"
        )
        print(
            rf"$r_t$ & ${tidal_dist.pdf_mean.to(u.pc).value:0.1f}\pm{tidal_dist.pdf_std.to(u.pc).value:0.1f}$~pc\\"
        )
        print(
            rf"Mass & $({cluster_mass.pdf_mean.to(u.solMass).value/1000:0.1f}\pm{cluster_mass.pdf_std.to(u.solMass).value/1000:0.1f})\times10^3$~M$_\odot$\\"
        )
        print(
            rf"$v_r$ & ${params_dict['rv']:0.2f}\pm{params_dict['e_rv']:0.2f}$\kms\\"
        )
        print(
            rf"$\sigma_r$ & ${params_dict['std_rv']:0.2f}\pm{params_dict['e_std_rv']:0.2f}$\kms\\"
        )
    return params_dict, sc_best, sc_dist
Esempio n. 6
0
# ## 常量 [constants](https://docs.astropy.org/en/stable/constants/)

from astropy import constants as const

const.c

const.hbar

print(const.c)

print(const.hbar)

from astropy import uncertainty as unc

a = unc.normal([1, 2] * u.kpc, std=[30, 50] * u.pc, n_samples=100000)

a

a.pdf_mean()

a.pdf_std()

# # 表格 [Table](https://docs.astropy.org/en/stable/table/)

from astropy.table import Table

t = Table.read('../data/AF.csv')
t

t[0]
                if sname != "DIFFUS":
                    mcmcfile = bfile.replace(".fits", ".h5")
                    reader = emcee.backends.HDFBackend(mcmcfile)
                    nsteps, nwalkers = reader.get_log_prob().shape
                    samples = reader.get_chain(discard=int(mcmc_burnfrac *
                                                           nsteps),
                                               flat=True)

                    # R(V) calc
                    avs_dist = unc.Distribution(samples[:, -1])
                    (indxs,
                     ) = np.where((edata.waves["BAND"] > 0.4 * u.micron)
                                  & (edata.waves["BAND"] < 0.5 * u.micron))
                    ebvs_dist = unc.normal(
                        edata.exts["BAND"][indxs[0]],
                        std=edata.uncs["BAND"][indxs[0]],
                        n_samples=avs_dist.n_samples,
                    )

                    rvs_dist = avs_dist / ebvs_dist
                    rv_per = rvs_dist.pdf_percentiles([16.0, 50.0, 84.0])
                    val = rv_per[1]
                    punc = rv_per[2] - rv_per[1]
                    munc = rv_per[1] - rv_per[0]
                else:
                    (indxs,
                     ) = np.where((edata.waves["BAND"] > 0.4 * u.micron)
                                  & (edata.waves["BAND"] < 0.5 * u.micron))
                    val, punc, munc = (1.0 /
                                       (edata.exts["BAND"][indxs[0]] - 1), 0.0,
                                       0.0)