def test_get_fitdata(): data_path = pkg_resources.resource_filename("measure_extinction", "data/") # read in the observed data of the stars redstar = StarData("hd229238.dat", path=data_path) compstar = StarData("hd204172.dat", path=data_path) # calculate the extinction curve ext = ExtData() ext.calc_elx(redstar, compstar) # once wavelenth units saved, update FITS file and use this line instead # of the 4 lines above # ext = ExtData(filename=data_path + "hd283809_hd064802_ext.fits") wave, y, unc = ext.get_fitdata( ["BAND", "IUE"], remove_uvwind_region=True, remove_lya_region=True ) # fitting routines often cannot handle units, make sure none are present for cursrc in ext.waves.keys(): assert isinstance(wave, u.Quantity) assert not isinstance(y, u.Quantity) assert not isinstance(unc, u.Quantity)
def SNR_ext(data_path, plot_path, starpair_list, plot=False): """ - Calculate the median SNR of the extinction curves in certain wavelength regions - Plot the SNR of the extinction curves if requested Parameters ---------- data_path : string Path to the data files plot_path : string Path to save the plots starpair_list : list of strings List of star pairs for which to calculate (and plot) the SNR, in the format "reddenedstarname_comparisonstarname" (no spaces) plot : boolean [default=False] Whether or not to plot the SNR vs. wavelength for every curve Returns ------- - Median SNRs in certain wavelength regions - Plots of the SNR vs. wavelength (if requested) """ meds = np.zeros((3, len(starpair_list))) for j, starpair in enumerate(starpair_list): # obtain the extinction curve data extdata = ExtData("%s%s_ext.fits" % (data_path, starpair.lower())) # transform the curve from E(lambda-V) to A(lambda)/A(V) extdata.trans_elv_alav() # obtain flat arrays waves, exts, uncs = extdata.get_fitdata(["SpeX_SXD", "SpeX_LXD"]) # calculate the median SNR in certain wavelength regions ranges = [ (0.79, 2.54), (2.85, 4.05), (4.55, 5.5), ] SNR = exts / uncs for i, range in enumerate(ranges): mask = (waves.value > range[0]) & (waves.value < range[1]) meds[i][j] = np.median(np.abs(SNR[mask])) # plot SNR vs. wavelength if requested if plot: fig, ax = plt.subplots() ax.scatter(waves, SNR, s=1) plt.savefig(plot_path + starpair + "_SNR.pdf") print(ranges[0], np.nanmin(meds[0]), np.nanmax(meds[0])) print(ranges[1], np.nanmin(meds[1]), np.nanmax(meds[1])) print(ranges[2], np.nanmin(meds[2]), np.nanmax(meds[2]))
def calc_ave_ext(starpair_list, path, min_number=1): extdatas = [] for starpair in starpair_list: extdata = ExtData("%s%s_ext.fits" % (path, starpair.lower())) extdatas.append(extdata) average = AverageExtData(extdatas, min_number=min_number) average.save(path + "average_ext.fits")
def test_fit_band_ext(): # only for alax=False (for now) # get the location of the data files data_path = pkg_resources.resource_filename("measure_extinction", "data/") # read in the extinction curve data extdata = ExtData(data_path + "hd229238_hd204172_ext.fits") # fit the extinction curve with a powerlaw based on the band data extdata.fit_band_ext() # test the fitting results waves, exts, res = np.loadtxt( data_path + "fit_band_ext_result_hd229238_hd204172.txt", unpack=True ) np.testing.assert_almost_equal(extdata.model["waves"], waves) np.testing.assert_almost_equal(extdata.model["exts"], exts) np.testing.assert_almost_equal(extdata.model["residuals"], res) np.testing.assert_almost_equal( extdata.model["params"], (0.7593262393303228, 1.345528276482045, 2.6061368004634025), ) np.testing.assert_almost_equal(extdata.columns["AV"], 2.6061368004634025)
def test_fit_spex_ext(): # only for alax=False (for now) # get the location of the data files data_path = pkg_resources.resource_filename("measure_extinction", "data/") # read in the extinction curve data extdata = ExtData(data_path + "hd229238_hd204172_ext.fits") # fit the extinction curve with a powerlaw based on the SpeX data extdata.fit_spex_ext() # test the fitting results waves, exts, res = np.loadtxt( data_path + "fit_spex_ext_result_hd229238_hd204172.txt", unpack=True ) np.testing.assert_almost_equal(extdata.model["waves"], waves) np.testing.assert_almost_equal(extdata.model["exts"], exts) np.testing.assert_almost_equal(extdata.model["residuals"], res) np.testing.assert_almost_equal( extdata.model["params"], (0.8680132704511972, 2.023865293614347, 2.5626900237367805), ) np.testing.assert_almost_equal(extdata.columns["AV"], 2.5626900237367805)
def test_calc_ext(): # get the location of the data files data_path = pkg_resources.resource_filename("measure_extinction", "data/") # read in the observed data of the stars redstar = StarData("hd229238.dat", path=data_path) compstar = StarData("hd204172.dat", path=data_path) # calculate the extinction curve ext = ExtData() ext.calc_elx(redstar, compstar) # test that the quantities have units (or not as appropriate) for cursrc in ext.waves.keys(): assert isinstance(ext.waves[cursrc], u.Quantity) assert not isinstance(ext.exts[cursrc], u.Quantity) assert not isinstance(ext.uncs[cursrc], u.Quantity) assert not isinstance(ext.npts[cursrc], u.Quantity) # check that the wavelengths can be converted to microns for cursrc in ext.waves.keys(): twave = ext.waves[cursrc].to(u.micron) assert twave.unit == u.micron
def calc_extinction(redstarname, compstarname, path): # read in the observed data for both stars redstarobs = StarData("%s.dat" % redstarname.lower(), path=path) compstarobs = StarData("%s.dat" % compstarname.lower(), path=path) # calculate the extinction curve extdata = ExtData() extdata.calc_elx(redstarobs, compstarobs) extdata.save(path + "%s_%s_ext.fits" % (redstarname.lower(), compstarname.lower()))
def test_calc_AV_RV(): # get the location of the data files data_path = pkg_resources.resource_filename("measure_extinction", "data/") # read in the observed data of the stars redstar = StarData("hd229238.dat", path=data_path) compstar = StarData("hd204172.dat", path=data_path) # calculate the extinction curve ext = ExtData() ext.calc_elx(redstar, compstar) # calculate A(V) ext.calc_AV() np.testing.assert_almost_equal(ext.columns["AV"], 2.5626900237367805) # calculate R(V) ext.calc_RV() np.testing.assert_almost_equal(ext.columns["RV"], 2.614989769244703)
def plot_extinction( starpair, path, alax=False, extmodels=False, fitmodel=False, HI_lines=False, range=None, exclude=[], log=False, pdf=False, ): """ Plot the extinction curve of a star Parameters ---------- starpair : string Name of the star pair for which to plot the extinction curve, in the format "reddenedstarname_comparisonstarname" (no spaces) path : string Path to the data files alax : boolean [default=False] Whether or not to plot A(lambda)/A(X) instead of E(lambda-X) extmodels: boolean [default=False] Whether or not to overplot Milky Way extinction curve models fitmodel: boolean [default=False] Whether or not to overplot a fitted model HI_lines : boolean [default=False] Whether or not to indicate the HI-lines in the plot range : list of 2 floats [default=None] Wavelength range to be plotted (in micron) - [min,max] exclude : list of strings [default=[]] List of data type(s) to exclude from the plot (e.g., IRS) log : boolean [default=False] Whether or not to plot the wavelengths on a log scale pdf : boolean [default=False] Whether or not to save the figure as a pdf file Returns ------- Figure with extinction curve """ # plotting setup for easier to read plots fontsize = 18 font = {"size": fontsize} plt.rc("font", **font) plt.rc("lines", linewidth=1) plt.rc("axes", linewidth=2) plt.rc("xtick.major", width=2, size=10) plt.rc("xtick.minor", width=1, size=5) plt.rc("ytick.major", width=2, size=10) plt.rc("ytick.minor", width=1, size=5) plt.rc("axes.formatter", min_exponent=2) # create the plot fig, ax = plt.subplots(figsize=(13, 10)) # read in and plot the extinction curve data for this star extdata = ExtData("%s%s_ext.fits" % (path, starpair.lower())) extdata.plot(ax, alax=alax, exclude=exclude, color="k") # define the output name outname = "%s_ext_%s.pdf" % (starpair.lower(), extdata.type) # plot Milky Way extinction models if requested if extmodels: plot_extmodels(extdata, alax) # overplot a fitted model if requested if fitmodel: plot_fitmodel(extdata, res=True) # plot HI-lines if requested if HI_lines: plot_HI(path, ax) # zoom in on a specific region if requested if range is not None: zoom(ax, range) outname = outname.replace(".pdf", "_zoom.pdf") # finish configuring the plot ax.set_title(starpair.split("_")[0], fontsize=50) ax.text( 0.99, 0.95, "comparison: " + starpair.split("_")[1], fontsize=25, horizontalalignment="right", transform=ax.transAxes, ) if log: ax.set_xscale("log") plt.xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.5 * fontsize) ax.set_ylabel(extdata._get_ext_ytitle(ytype=extdata.type), fontsize=1.5 * fontsize) # show the figure or save it to a pdf file if pdf: fig.savefig(path + outname, bbox_inches="tight") plt.close() else: plt.show()
format="ascii.basic", data_start=1) ax[1].plot( a["lambda"], a["alk"], "b-.", label="Cyg OB-12: Hensley & Draine 2020", lw=2, alpha=0.25, ) # New measurements avefilename = "data/all_ext_14oct20_diffuse_ave.fits" # get G20_MWAvg G20 = ExtData() G20.read(avefilename) G20_wave = G20.waves["BAND"].value G20_ext = G20.exts["BAND"] G20_ext_uncs = G20.uncs["BAND"] gindxs_IRS = np.where(G20.npts["IRS"] > 0) G20_IRS_wave = G20.waves["IRS"][gindxs_IRS].value G20_IRS_ext = G20.exts["IRS"][gindxs_IRS] G20_IRS_uncs = G20.uncs["IRS"][gindxs_IRS] # photometry G20_y, ji, ki = get_elkejk_from_alav(G20_wave, G20_ext) ax[1].errorbar(
parser.add_argument("--pdf", help="save figure as a pdf file", action="store_true") parser.add_argument("--path", help="path for the extinction curves") args = parser.parse_args() if args.path: locpath = args.path + "/" else: locpath = "" file = args.file ofile = file.replace(".fits", "_POWLAW2DRUDE.fits") # read in the observed E(l-V) or A(l)/A(V) extinction curve obsext = ExtData(filename=locpath + file) # get an observed extinction curve to fit (wave, y, y_unc) = obsext.get_fitdata(["BAND", "IRS"]) # remove units as fitting routines often cannot take numbers with units x = wave.to(1.0 / u.micron, equivalencies=u.spectral()).value if obsext.type == "elx": # determine the initial guess at the A(V) values # just use the average at wavelengths > 5 # limit as lambda -> inf, E(lamda-V) -> -A(V) (indxs, ) = np.where(1.0 / x > 5.0) av_guess = -1.0 * np.average(y[indxs]) if not np.isfinite(av_guess): av_guess = 1.0
def fit_plot_inv_rv_dep(inpath, plot_path, table_path, starpair_list_diff, starpair_list_dense, norm="V"): """ Fit and plot the relationship between A(lambda)/A(V) and 1/R(V) Parameters ---------- inpath : string Path to the input data files plot_path : string Path to save the plots table_path : string Path to save the table starpair_list_diffuse : list of strings List of diffuse star pairs to include in the fitting, in the format "reddenedstarname_comparisonstarname" (no spaces) starpair_list_dense : list of strings List of dense star pairs to include in the fitting, in the format "reddenedstarname_comparisonstarname" (no spaces) norm : string [default="V"] Band or wavelength for the normalization Returns ------- Several plots related to the R(V)-dependence """ # collect the data to be fitted inv_RVs, AVs, alavs, alav_uncs, waves, dense_bool = get_data( inpath, starpair_list_diff, starpair_list_dense, norm) inv_RV_vals = inv_RVs[:, 0] inv_RV_uncs = (inv_RVs[:, 1] + inv_RVs[:, 2]) / 2 AV_vals = AVs[:, 0] AV_uncs = (AVs[:, 1] + AVs[:, 2]) / 2 # for every wavelength, fit a straight line through the A(lambda)/A(V) vs. 1/R(V)-1/3.1 data fit = fitting.LinearLSQFitter() line_func = models.Linear1D() slopes, intercepts, stds = np.full((3, len(waves)), np.nan) corr_list = [] for j, wave in enumerate(waves): mask = ~np.isnan(alavs[j]) npts = np.sum(mask) # require at least 5 data points for the fitting if npts < 5: continue fitted_line = fit( line_func, (inv_RV_vals[mask] - 1 / 3.1), alavs[j][mask], weights=1 / alav_uncs[j][mask], ) # calculate the standard deviation about the fit # the "residuals" in the fit_info is the sum of the squared residuals # std = np.sqrt(fit.fit_info["residuals"] / (npts - 2)) # this does not work when using weights in the fitting # dividing by npts-2 is needed, because there are npts-2 degrees of freedom (subtract 1 for the slope and 1 for the intercept) std = np.sqrt( np.sum((fitted_line(inv_RV_vals[mask] - 1 / 3.1) - alavs[j][mask]) **2) / (npts - 2)) slopes[j] = fitted_line.slope.value intercepts[j] = fitted_line.intercept.value stds[j] = std # calculate the correlation coefficient between A(lambda)/A(V) and 1/R(V) for every sightline, and find out the range in correlation coefficients and the median value over all sightlines and all wavelengths corr = calc_corr( alavs[j][mask], alav_uncs[j][mask], inv_RV_vals[mask], inv_RV_uncs[mask], AV_vals[mask], AV_uncs[mask], ) for value in corr: corr_list.append(value) # print information about the correlation coefficients print( "Minimum, median and maximum correlation coefficients: ", np.min(corr_list), np.median(corr_list), np.max(corr_list), ) # plot A(lambda)/A(V) vs. 1/R(V) at certain wavelengths plot_waves = [0.89864296, 1.6499686, 2.4527225, 3.5002365, 4.697073] plot_inv_rv_dep( plot_path, inv_RVs, alavs, alav_uncs, waves, plot_waves, slopes, intercepts, dense_bool, norm=norm, ) # plot the slopes, intercepts and standard deviations vs. wavelength # color the data points at wavelengths > 4.03 gray fig, ax = plt.subplots(3, figsize=(9, 9), sharex=True) short_waves = (waves > 0.809) & (waves < 4.02) ms = 0.8 ax[0].scatter(waves, intercepts, color="k", s=ms) ax[1].scatter(waves[short_waves], slopes[short_waves], color="k", s=ms) ax[1].scatter(waves[~short_waves], slopes[~short_waves], color="gray", s=ms) ax[2].scatter(waves[short_waves], stds[short_waves], color="k", s=ms) ax[2].scatter(waves[~short_waves], stds[~short_waves], color="gray", s=ms) for wave in plot_waves: indx = np.abs(waves - wave).argmin() ax[0].scatter(wave, intercepts[indx], color="lime", s=50, marker="x", zorder=3) ax[1].scatter(wave, slopes[indx], color="lime", s=50, marker="x", zorder=3) ax[2].scatter(wave, stds[indx], color="lime", s=50, marker="x", zorder=3) # fit the slopes, intercepts and standard deviations vs. wavelength, and add the fit to the plot ( spline_wave, spline_slope, spline_std, fit_slopes, fit_intercepts, fit_stds, ) = fit_slopes_intercepts(slopes, intercepts, stds, waves, norm) ax[0].plot( waves, fit_intercepts(waves), color="crimson", ls="--", alpha=0.9, label=r"$%5.3f \ \lambda ^{-%5.2f}$" % (fit_intercepts.amplitude.value, fit_intercepts.alpha.value), ) slope_spline = interpolate.splev(waves[short_waves], fit_slopes) ax[1].scatter(spline_wave, spline_slope, color="r", marker="d", s=15) ax[1].plot( waves[short_waves], slope_spline, color="crimson", ls="--", alpha=0.9, ) std_spline = interpolate.splev(waves[short_waves], fit_stds) ax[2].scatter(spline_wave, spline_std, color="r", marker="d", s=10) ax[2].plot( waves[short_waves], std_spline, color="crimson", ls="--", alpha=0.9, ) # finalize and save the plot ax[0].legend(fontsize=fs) plt.xlabel(r"$\lambda\ [\mu m$]", fontsize=1.2 * fs) plt.xlim(0.75, 5.2) ax[0].set_ylim(-0.03, 0.6) ax[1].set_ylim(-1.1, 0.2) ax[2].set_ylim(0.005, 0.075) ax[0].set_ylabel(r"$a$", fontsize=1.2 * fs) ax[1].set_ylabel(r"$b$", fontsize=1.2 * fs) ax[2].set_ylabel(r"$\sigma$", fontsize=1.2 * fs) ax[0].axhline(ls="--", color="k", lw=1, alpha=0.6) ax[1].axhline(ls="--", color="k", lw=1, alpha=0.6) plt.subplots_adjust(hspace=0) plt.savefig(plot_path + "inv_RV_slope_inter" + str(norm) + ".pdf", bbox_inches="tight") # plot the residuals of the intercepts and add the residuals from the average extinction curve fitting res_inter = intercepts - fit_intercepts(waves) average = ExtData(inpath + "average_ext.fits") wave_ave = average.model["waves"] res_ave = average.model["residuals"] fig, ax = plt.subplots(figsize=(10, 7)) ax.scatter(waves, res_inter, s=1.5, color="k", alpha=0.8, label="intercepts") ax.scatter(wave_ave, res_ave, s=1.5, color="r", alpha=0.8, label="average") ax.axhline() plt.xlabel(r"$\lambda\ [\mu m$]") plt.ylabel(r"residual extinction") plt.ylim(-0.026, 0.026) plt.legend() plt.savefig(plot_path + "inv_inter_res" + str(norm) + ".pdf", bbox_inches="tight") # compare R(V)-dependent extinction curves to literature curves # only useful for extinction curves that are normalized to A(V) if norm == "V": plot_inv_RV_lit(plot_path, fit_slopes, fit_intercepts, fit_stds)
def table_inv_rv_dep(outpath, table_waves, fit_slopes, fit_intercepts, fit_stds, norm="V"): """ Create tables with the slopes, intercepts and standard deviations at wavelengths "table_waves", and the measured and fitted average extinction curve Parameters ---------- outpath : string Path to save the table table_waves : list List with wavelengths to be included in the table fit_slopes : tuple The interpolated spline for the slopes fit_intercepts : astropy model The fitted model for the intercepts fit_stds : tuple The interpolated spline for the standard deviations norm : string [default="V"] Band or wavelength for the normalization Returns ------- Tables of the R(V)-dependent relationship at wavelengths "table_waves": - in aaxtex format for the paper - in ascii format """ # obtain the slopes, intercepts and standard deviations at the table wavelengths table_slopes = interpolate.splev(table_waves, fit_slopes) table_intercepts = fit_intercepts(table_waves) table_stds = interpolate.splev(table_waves, fit_stds) # obtain the measured average extinction curve average = ExtData(inpath + "average_ext.fits") (ave_waves, exts, exts_unc) = average.get_fitdata(["SpeX_SXD", "SpeX_LXD"]) indx = np.argsort(ave_waves) ave_waves = ave_waves[indx].value exts = exts[indx] exts_unc = exts_unc[indx] # create wavelength bins and calculate the binned median extinction and uncertainty bin_edges = np.insert(table_waves + 0.025, 0, table_waves[0] - 0.025) meds, edges, indices = stats.binned_statistic( ave_waves, (exts, exts_unc), statistic="median", bins=bin_edges, ) # obtain the fitted average extinction curve ave_fit = average.model["params"][0] * table_waves**( -average.model["params"][2]) # obtain the measured average extinction in a few photometric bands bands = ["J", "H", "K", "WISE1", "L", "IRAC1"] band_waves = [1.22, 1.63, 2.19, 3.35, 3.45, 3.52] band_ave = get_phot(ave_waves, exts, bands) band_ave_unc = get_phot(ave_waves, exts_unc, bands) # obtain the fitted average extinction in a few photometric bands all_waves = np.arange(0.8, 4.05, 0.001) ave_fit_all = average.model["params"][0] * all_waves**( -average.model["params"][2]) band_ave_fit = get_phot(all_waves, ave_fit_all, bands) # obtain the slopes, intercepts and standard deviations in a few photometric bands band_slopes = get_phot(all_waves, -interpolate.splev(all_waves, fit_slopes), bands) band_intercepts = get_phot(all_waves, fit_intercepts(all_waves), bands) band_stds = get_phot(all_waves, interpolate.splev(all_waves, fit_stds), bands) # create the table table = Table( [ np.concatenate((band_waves, table_waves)), np.concatenate((band_ave, meds[0])), np.concatenate((band_ave_unc, meds[1])), np.concatenate((band_ave_fit, ave_fit)), np.concatenate((band_intercepts, table_intercepts)), np.concatenate((-band_slopes, table_slopes)), np.concatenate((band_stds, table_stds)), ], names=( "wavelength[micron]", "ave", "ave_unc", "ave_fit", "intercept", "slope", "std", ), ) # save it in ascii format table.write( outpath + "inv_RV_dep" + str(norm) + ".txt", format="ascii.commented_header", overwrite=True, ) # save it in aastex format table.write( outpath + "inv_RV_dep" + str(norm) + ".tex", format="aastex", names=( r"$\lambda\ [\micron]$", r"$\frac{A(\lambda)}{A(V)}$", "unc", "fit", r"$a(\lambda$)", r"$b(\lambda$)", r"$\sigma(\lambda)$", ), formats={ r"$\lambda\ [\micron]$": "{:.2f}", r"$\frac{A(\lambda)}{A(V)}$": "{:.3f}", "unc": "{:.3f}", "fit": "{:.3f}", r"$a(\lambda$)": "{:.3f}", r"$b(\lambda$)": "{:.3f}", r"$\sigma(\lambda)$": "{:.3f}", }, latexdict={ "col_align": "c|ccc|ccc", "tabletype": "deluxetable", "caption": r"Average diffuse Milky Way extinction curve and parameters of the linear relationship between extinction $A(\lambda)/A(V)$ and $1/R(V)$. \label{tab:RV_dep}", }, fill_values=[("nan", r"\nodata")], overwrite=True, )
def get_data(inpath, starpair_list_diff, starpair_list_dense, norm="V"): """ Obtain the required data for all stars in the star pair lists: - A(lambda)/A(V) - 1/R(V) - A(V) Parameters ---------- inpath : string Path to the input data files starpair_list_diffuse : list of strings List of diffuse star pairs to include in the fitting, in the format "reddenedstarname_comparisonstarname" (no spaces) starpair_list_dense : list of strings List of dense star pairs to include in the fitting, in the format "reddenedstarname_comparisonstarname" (no spaces) norm : string [default="V"] Band or wavelength for the normalization Returns ------- 1/R(V) with uncertainties, A(V) with uncertainties, A(lambda)/A(V) with uncertainties, wavelengths, boolean for dense/diffuse """ starpair_list = starpair_list_diff + starpair_list_dense inv_RVs = np.zeros((len(starpair_list), 3)) AVs = np.zeros((len(starpair_list), 3)) # determine the wavelengths at which to retrieve the extinction data extdata_model = ExtData("%s%s_ext.fits" % (inpath, starpair_list[0].lower())) waves = np.sort( np.concatenate(( extdata_model.waves["SpeX_SXD"].value, extdata_model.waves["SpeX_LXD"].value, ))) alavs = np.full((len(waves), len(starpair_list)), np.nan) alav_uncs = np.full((len(waves), len(starpair_list)), np.nan) dense_bool = np.full(len(starpair_list), False) # retrieve the information for all stars for i, starpair in enumerate(starpair_list): # retrieve 1/R(V) and A(V) (with uncertainties) extdata = ExtData("%s%s_ext.fits" % (inpath, starpair.lower())) inv_RVs[i] = np.array(extdata.columns["IRV"]) AVs[i] = np.array(extdata.columns["AV"]) # transform the curve from E(lambda-V) to A(lambda)/A(V) extdata.trans_elv_alav() # get the good data in flat arrays (flat_waves, flat_exts, flat_exts_unc) = extdata.get_fitdata(["SpeX_SXD", "SpeX_LXD"]) # convert extinction from A(lambda)/A(V) to A(lambda)/A(norm) if norm is not "V" if norm != "V": ind1 = np.abs(flat_waves.value - norm).argmin() flat_exts = flat_exts / flat_exts[ind1] flat_exts_unc = flat_exts_unc / flat_exts[ind1] # retrieve A(lambda)/A(V) at all wavelengths for j, wave in enumerate(waves): if wave in flat_waves.value: alavs[j][i] = flat_exts[flat_waves.value == wave] alav_uncs[j][i] = flat_exts_unc[flat_waves.value == wave] # flag the dense sightlines if starpair in dense: dense_bool[i] = True return inv_RVs, AVs, alavs, alav_uncs, waves, dense_bool
avefilenames = [ "data/all_ext_14oct20_diffuse_ave_POWLAW2DRUDE.fits", "fits/hd283809_hd064802_ext_POWLAW2DRUDE.fits", "fits/hd029647_hd195986_ext_POWLAW2DRUDE.fits", ] if not args.dense: avefilenames = avefilenames[0:1] pcol = ["k", "b", "m"] psym = ["o", "s", "^"] pline = ["-", "-.", ":"] plabel = ["diffuse", "HD283809", "HD029647"] for i, avefilename in enumerate(avefilenames): # IR Fit obsext = ExtData() obsext.read(avefilename) # UV fit obsext2 = ExtData() if plabel[i] == "diffuse": obsext2.read(avefilename.replace("POWLAW2DRUDE", "FM90")) obsext_wave = obsext.waves["BAND"].value obsext_ext = obsext.exts["BAND"] obsext_ext_uncs = obsext.uncs["BAND"] gindxs_IRS = np.where(obsext.npts["IRS"] > 0) obsext_IRS_wave = obsext.waves["IRS"][gindxs_IRS].value obsext_IRS_ext = obsext.exts["IRS"][gindxs_IRS] obsext_IRS_uncs = obsext.uncs["IRS"][gindxs_IRS] else:
type=int, default=100, help="# of steps in MCMC chain") parser.add_argument("--png", help="save figure as a png file", action="store_true") parser.add_argument("--pdf", help="save figure as a pdf file", action="store_true") args = parser.parse_args() # get a saved extnction curve file = args.extfile # file = '/home/kgordon/Python_git/spitzer_mir_ext/fits/hd147889_hd064802_ext.fits' ofile = file.replace(".fits", "_P92.fits") extdata = ExtData(filename=file) # get an observed extinction curve to fit (wave, y, y_unc) = extdata.get_fitdata(["BAND", "IRS"], remove_uvwind_region=True, remove_lya_region=True, remove_irsblue=True) # ["BAND", "IUE", "IRS"], remove_uvwind_region=True, remove_lya_region=True # remove data affected by Ly-alpha absorption/emission gindxs = wave > (1.0 / 8.0) * u.micron wave = wave[gindxs] y = y[gindxs] y_unc = y_unc[gindxs] # remove units as fitting routines often cannot take numbers with units x = wave.to(1.0 / u.micron, equivalencies=u.spectral()).value
def fit_spex_ext( starpair, path, functype="pow", dense=False, profile="drude_asym", exclude=None, bootstrap=False, fixed=False, ): """ Fit the observed SpeX NIR extinction curve Parameters ---------- starpair : string Name of the star pair for which to fit the extinction curve, in the format "reddenedstarname_comparisonstarname" (no spaces), or "average" to fit the average extinction curve path : string Path to the data files functype : string [default="pow"] Fitting function type ("pow" for powerlaw or "pol" for polynomial) dense : boolean [default=False] Whether or not to fit the features around 3 and 3.4 micron profile : string [default="drude_asym"] Profile to use for the features if dense = True (options are "gauss", "drude", "lorentz", "gauss_asym", "drude_asym", "lorentz_asym") exclude : list of tuples [default=None] list of tuples (min,max) with wavelength regions (in micron) that need to be excluded from the fitting, e.g. [(0.8,1.2),(2.2,5)] bootstrap : boolean [default=False] Whether or not to do a quick bootstrap fitting to get more realistic uncertainties on the fitting results fixed : boolean [default=False] Whether or not to add a fixed feature around 3 micron (for diffuse sightlines) Returns ------- Updates extdata.model["type", "waves", "exts", "residuals", "chi2", "params"] and extdata.columns["AV"] with the fitting results: - type: string with the type of model (e.g. "pow_elx_Drude") - waves: np.ndarray with the SpeX wavelengths - exts: np.ndarray with the fitted model to the extinction curve at "waves" wavelengths - residuals: np.ndarray with the residuals, i.e. data-fit, at "waves" wavelengths - chi2 : float with the chi square of the fitting - params: list with output Parameter objects """ # retrieve the SpeX data to be fitted, and sort the curve from short to long wavelengths filename = "%s%s_ext.fits" % (path, starpair.lower()) if fixed: filename = filename.replace(".", "_ice.") extdata = ExtData(filename) (waves, exts, exts_unc) = extdata.get_fitdata(["SpeX_SXD", "SpeX_LXD"]) indx = np.argsort(waves) waves = waves[indx].value exts = exts[indx] exts_unc = exts_unc[indx] # exclude wavelength regions if requested if exclude: mask = np.full_like(waves, False, dtype=bool) for region in exclude: mask += (waves > region[0]) & (waves < region[1]) waves = waves[~mask] exts = exts[~mask] exts_unc = exts_unc[~mask] # get a quick estimate of A(V) if extdata.type == "elx": extdata.calc_AV() AV_guess = extdata.columns["AV"] else: AV_guess = None # convert to A(lambda)/A(1 micron) # ind1 = np.abs(waves - 1).argmin() # exts = exts / exts[ind1] # exts_unc = exts_unc / exts[ind1] # obtain the function to fit if "SpeX_LXD" not in extdata.waves.keys(): dense = False fixed = False func = fit_function( dattype=extdata.type, functype=functype, dense=dense, profile=profile, AV_guess=AV_guess, fixed=fixed, ) # for dense sightlines, add more weight to the feature region weights = 1 / exts_unc if dense: mask_ice = (waves > 2.88) & (waves < 3.19) mask_tail = (waves > 3.4) & (waves < 4) weights[mask_ice + mask_tail] *= 2 # use the Levenberg-Marquardt algorithm to fit the data with the model fit = LevMarLSQFitter() fit_result_lev = fit(func, waves, exts, weights=weights, maxiter=10000) # set up the backend to save the samples for the emcee runs emcee_samples_file = path + "Fitting_results/" + starpair + "_emcee_samples.h5" # do the fitting again, with MCMC, using the results from the first fitting as input fit2 = EmceeFitter(nsteps=10000, burnfrac=0.1, save_samples=emcee_samples_file) # add parameter bounds for param in fit_result_lev.param_names: if "amplitude" in param: getattr(fit_result_lev, param).bounds = (0, 2) elif "alpha" in param: getattr(fit_result_lev, param).bounds = (0, 4) elif "Av" in param: getattr(fit_result_lev, param).bounds = (0, 10) fit_result_mcmc = fit2(fit_result_lev, waves, exts, weights=weights) # create standard MCMC plots fit2.plot_emcee_results( fit_result_mcmc, filebase=path + "Fitting_results/" + starpair ) # choose the fit result to save fit_result = fit_result_mcmc # fit_result = fit_result_lev print(fit_result) # determine the wavelengths at which to evaluate and save the fitted model curve: all SpeX wavelengths, sorted from short to long (to avoid problems with overlap between SXD and LXD), and shortest and longest wavelength should have data if "SpeX_LXD" not in extdata.waves.keys(): full_waves = extdata.waves["SpeX_SXD"].value full_npts = extdata.npts["SpeX_SXD"] else: full_waves = np.concatenate( (extdata.waves["SpeX_SXD"].value, extdata.waves["SpeX_LXD"].value) ) full_npts = np.concatenate((extdata.npts["SpeX_SXD"], extdata.npts["SpeX_LXD"])) # sort the wavelengths indxs_sort = np.argsort(full_waves) full_waves = full_waves[indxs_sort] full_npts = full_npts[indxs_sort] # cut the wavelength region indxs = np.logical_and(full_waves >= np.min(waves), full_waves <= np.max(waves)) full_waves = full_waves[indxs] full_npts = full_npts[indxs] # calculate the residuals and put them in an array of the same length as "full_waves" for plotting residuals = exts - fit_result(waves) full_res = np.full_like(full_npts, np.nan) if exclude: mask = np.full_like(full_waves, False, dtype=bool) for region in exclude: mask += (full_waves > region[0]) & (full_waves < region[1]) full_res[(full_npts > 0) * ~mask] = residuals else: full_res[(full_npts > 0)] = residuals # bootstrap to get more realistic uncertainties on the parameter results if bootstrap: red_star = StarData(extdata.red_file, path=path, use_corfac=True) comp_star = StarData(extdata.comp_file, path=path, use_corfac=True) red_V_unc = red_star.data["BAND"].get_band_mag("V")[1] comp_V_unc = comp_star.data["BAND"].get_band_mag("V")[1] unc_V = np.sqrt(red_V_unc ** 2 + comp_V_unc ** 2) fit_result_mcmc_low = fit2(fit_result_lev, waves, exts - unc_V, weights=weights) fit_result_mcmc_high = fit2( fit_result_lev, waves, exts + unc_V, weights=weights ) # save the fitting results to the fits file if dense: functype += "_" + profile extdata.model["type"] = functype + "_" + extdata.type extdata.model["waves"] = full_waves extdata.model["exts"] = fit_result(full_waves) extdata.model["residuals"] = full_res extdata.model["chi2"] = np.sum((residuals / exts_unc) ** 2) print("Chi2", extdata.model["chi2"]) extdata.model["params"] = [] for param in fit_result.param_names: # update the uncertainties when bootstrapping if bootstrap: min_val = min( getattr(fit_result_mcmc, param).value, getattr(fit_result_mcmc_low, param).value, getattr(fit_result_mcmc_high, param).value, ) max_val = max( getattr(fit_result_mcmc, param).value, getattr(fit_result_mcmc_low, param).value, getattr(fit_result_mcmc_high, param).value, ) sys_unc = (max_val - min_val) / 2 getattr(fit_result, param).unc_minus = np.sqrt( getattr(fit_result, param).unc_minus ** 2 + sys_unc ** 2 ) getattr(fit_result, param).unc_plus = np.sqrt( getattr(fit_result, param).unc_plus ** 2 + sys_unc ** 2 ) extdata.model["params"].append(getattr(fit_result, param)) # save the column information (A(V), E(B-V) and R(V)) if "Av" in param: extdata.columns["AV"] = ( getattr(fit_result, param).value, getattr(fit_result, param).unc_minus, getattr(fit_result, param).unc_plus, ) # calculate the distrubtion of R(V) and 1/R(V) from the distributions of A(V) and E(B-V) nsamples = getattr(fit_result, param).posterior.n_samples av_dist = unc.normal( extdata.columns["AV"][0], std=(extdata.columns["AV"][1] + extdata.columns["AV"][2]) / 2, n_samples=nsamples, ) b_indx = np.abs(extdata.waves["BAND"] - 0.438 * u.micron).argmin() ebv_dist = unc.normal( extdata.exts["BAND"][b_indx], std=extdata.uncs["BAND"][b_indx], n_samples=nsamples, ) ebv_per = ebv_dist.pdf_percentiles([16.0, 50.0, 84.0]) extdata.columns["EBV"] = ( ebv_per[1], ebv_per[1] - ebv_per[0], ebv_per[2] - ebv_per[1], ) rv_dist = av_dist / ebv_dist rv_per = rv_dist.pdf_percentiles([16.0, 50.0, 84.0]) extdata.columns["RV"] = ( rv_per[1], rv_per[1] - rv_per[0], rv_per[2] - rv_per[1], ) inv_rv_dist = ebv_dist / av_dist inv_rv_per = inv_rv_dist.pdf_percentiles([16.0, 50.0, 84.0]) extdata.columns["IRV"] = ( inv_rv_per[1], inv_rv_per[1] - inv_rv_per[0], inv_rv_per[2] - inv_rv_per[1], ) print(extdata.columns) # save the fits file extdata.save(filename) # print information about the ice feature if fixed: print( "Ice feature strength: ", extdata.model["params"][3].value, extdata.model["params"][3].unc_minus, extdata.model["params"][3].unc_plus, )
"XO": r"{[$\micron^{-1}$]}", "GAMMA": r"{[$\micron^{-1}$]}", } mval = { "C1": 1, "C2": 1, "C3": 1, "C4": 1, "XO": 1, "GAMMA": 1, } okeys = ["C1", "C2", "C3", "C4", "XO", "GAMMA"] for k, bfile in enumerate(files): edata = ExtData(filename=bfile.replace(".fits", "_FM90.fits")) spos = names[k].find("_") sname = names[k][:spos].upper() pstr = f"{sname} & " for k, ckey in enumerate(okeys): if first_line: hstr += fr"\colhead{phead[ckey]} & " hstr2 += fr"\colhead{phead2[ckey]} & " val, punc, munc = edata.fm90_p50_fit[ckey] cmval = float(mval[ckey]) if sname == "DIFFUS": pstr += f"${cmval*val:.4f}^{{+{cmval*punc:.4f}}}_{{-{cmval*munc:.4f}}}$ & " else: pstr += f"${cmval*val:.3f}^{{+{cmval*punc:.3f}}}_{{-{cmval*munc:.3f}}}$ & "
def plot_multi_extinction( starpair_list, path, alax=False, average=False, extmodels=False, fitmodel=False, HI_lines=False, range=None, spread=False, exclude=[], log=False, text_offsets=[], text_angles=[], pdf=False, ): """ Plot the extinction curves of multiple stars in the same plot Parameters ---------- starpair_list : list of strings List of star pairs for which to plot the extinction curve, in the format "reddenedstarname_comparisonstarname" (no spaces) path : string Path to the data files alax : boolean [default=False] Whether or not to plot A(lambda)/A(X) instead of E(lambda-X) average : boolean [default=False] Whether or not to plot the average extinction curve extmodels: boolean [default=False] Whether or not to overplot Milky Way extinction curve models fitmodel: boolean [default=False] Whether or not to overplot a fitted model HI_lines : boolean [default=False] Whether or not to indicate the HI-lines in the plot range : list of 2 floats [default=None] Wavelength range to be plotted (in micron) - [min,max] spread : boolean [default=False] Whether or not to spread the extinction curves out by adding a vertical offset to each curve exclude : list of strings [default=[]] List of data type(s) to exclude from the plot (e.g., IRS) log : boolean [default=False] Whether or not to plot the wavelengths on a log-scale text_offsets : list of floats [default=[]] List of the same length as starpair_list with offsets for the annotated text text_angles : list of integers [default=[]] List of the same length as starpair_list with rotation angles for the annotated text pdf : boolean [default=False] Whether or not to save the figure as a pdf file Returns ------- Figure with extinction curves of multiple stars """ # plotting setup for easier to read plots fontsize = 18 font = {"size": fontsize} plt.rc("font", **font) plt.rc("lines", linewidth=1) plt.rc("axes", linewidth=2) plt.rc("xtick.major", width=2, size=10) plt.rc("xtick.minor", width=1, size=5) plt.rc("ytick.major", width=2, size=10) plt.rc("ytick.minor", width=1, size=5) plt.rc("axes.formatter", min_exponent=2) # create the plot fig, ax = plt.subplots(figsize=(15, len(starpair_list) * 1.25)) colors = plt.get_cmap("tab10") # set default text offsets and angles if text_offsets == []: text_offsets = np.full(len(starpair_list), 0.2) if text_angles == []: text_angles = np.full(len(starpair_list), 10) for i, starpair in enumerate(starpair_list): # read in the extinction curve data extdata = ExtData("%s%s_ext.fits" % (path, starpair.lower())) # spread out the curves if requested if spread: yoffset = 0.25 * i else: yoffset = 0.0 # determine where to add the name of the star # find the shortest plotted wavelength (waves, exts, ext_uncs) = extdata.get_fitdata(extdata.waves.keys() - exclude) if range is not None: waves = waves[waves.value >= range[0]] min_wave = waves[-1] # find out which data type corresponds with this wavelength for data_type in extdata.waves.keys(): if data_type in exclude: continue used_waves = extdata.waves[data_type][extdata.npts[data_type] > 0] if min_wave in used_waves: ann_key = data_type ann_range = [min_wave, min_wave] * u.micron # plot the extinction curve extdata.plot( ax, color=colors(i % 10), alpha=0.7, alax=alax, exclude=exclude, yoffset=yoffset, annotate_key=ann_key, annotate_wave_range=ann_range, annotate_text=extdata.red_file.split(".")[0].upper(), annotate_yoffset=text_offsets[i], annotate_rotation=text_angles[i], annotate_color=colors(i % 10), ) # overplot a fitted model if requested if fitmodel: plot_fitmodel(extdata, yoffset=yoffset) # overplot Milky Way extinction curve models if requested if extmodels: if alax: plot_extmodels(extdata, alax) else: warnings.warn( "Overplotting Milky Way extinction curve models on a figure with multiple observed extinction curves in E(lambda-V) units is disabled, because the model curves in these units are different for every star, and would overload the plot. Please, do one of the following if you want to overplot Milky Way extinction curve models: 1) Use the flag --alax to plot ALL curves in A(lambda)/A(V) units, OR 2) Plot all curves separately by removing the flag --onefig.", stacklevel=2, ) # plot the average extinction curve if requested if average: plot_average( path, ax=ax, extmodels=extmodels, fitmodel=fitmodel, exclude=exclude, spread=spread, annotate_key=ann_key, annotate_wave_range=ann_range, ) # define the output name outname = "all_ext_%s.pdf" % (extdata.type) # plot HI-lines if requested if HI_lines: plot_HI(path, ax) # zoom in on a specific region if requested if range is not None: zoom(ax, range) outname = outname.replace(".pdf", "_zoom.pdf") # finish configuring the plot if log: ax.set_xscale("log") ax.set_xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.5 * fontsize) ylabel = extdata._get_ext_ytitle(ytype=extdata.type) if spread: ylabel += " + offset" ax.set_ylabel(ylabel, fontsize=1.5 * fontsize) # show the figure or save it to a pdf file if pdf: fig.savefig(path + outname, bbox_inches="tight") else: plt.show() # return the figure and axes for additional manipulations return fig, ax
def plot_average( path, filename="average_ext.fits", ax=None, extmodels=False, fitmodel=False, HI_lines=False, range=None, exclude=[], log=False, spread=False, annotate_key=None, annotate_wave_range=None, pdf=False, ): """ Plot the average extinction curve Parameters ---------- path : string Path to the average extinction curve fits file filename : string [default="average_ext.fits"] Name of the average extinction curve fits file ax : AxesSubplot [default=None] Axes of plot on which to add the average extinction curve if pdf=False extmodels: boolean [default=False] Whether or not to overplot Milky Way extinction curve models fitmodel: boolean [default=False] Whether or not to overplot a fitted model HI_lines : boolean [default=False] Whether or not to indicate the HI-lines in the plot range : list of 2 floats [default=None] Wavelength range to be plotted (in micron) - [min,max] exclude : list of strings [default=[]] List of data type(s) to exclude from the plot (e.g., IRS) log : boolean [default=False] Whether or not to plot the wavelengths on a log-scale spread : boolean [default=False] Whether or not to offset the average extinction curve from the other curves (only relevant when pdf=False and ax=None) annotate_key : string [default=None] type of data for which to annotate text (e.g., SpeX_LXD) (only relevant when pdf=False and ax=None) annotate_wave_range : list of 2 floats [default=None] min/max wavelength range for the annotation of the text (only relevant when pdf=False and ax=None) pdf : boolean [default=False] - If False, the average extinction curve will be overplotted on the current plot (defined by ax) - If True, the average extinction curve will be plotted in a separate plot and saved as a pdf Returns ------- Plots the average extinction curve """ # read in the average extinction curve (if it exists) if os.path.isfile(path + filename): average = ExtData(path + filename) else: warnings.warn( "An average extinction curve with the name " + filename + " could not be found in " + path + ". Please calculate the average extinction curve first with the calc_ave_ext function in measure_extinction/utils/calc_ext.py.", UserWarning, ) # make a new plot if requested if pdf: # plotting setup for easier to read plots fontsize = 18 font = {"size": fontsize} plt.rc("font", **font) plt.rc("lines", linewidth=1) plt.rc("axes", linewidth=2) plt.rc("xtick.major", width=2, size=10) plt.rc("xtick.minor", width=1, size=5) plt.rc("ytick.major", width=2, size=10) plt.rc("ytick.minor", width=1, size=5) plt.rc("axes.formatter", min_exponent=2) # create the plot fig, ax = plt.subplots(figsize=(13, 10)) average.plot(ax, exclude=exclude, color="k") # plot Milky Way extinction models if requested if extmodels: plot_extmodels(average, alax=True) # overplot a fitted model if requested if fitmodel: plot_fitmodel(average, res=True) # plot HI-lines if requested if HI_lines: plot_HI(path, ax) # zoom in on a specific region if requested if range is not None: zoom(ax, range) # finish configuring the plot if log: ax.set_xscale("log") plt.xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.5 * fontsize) ax.set_ylabel(average._get_ext_ytitle(ytype=average.type), fontsize=1.5 * fontsize) fig.savefig(path + "average_ext.pdf", bbox_inches="tight") # return the figure and axes for additional manipulations return fig, ax else: if spread: yoffset = -0.3 else: yoffset = 0 average.plot( ax, color="k", alpha=0.6, yoffset=yoffset, annotate_key=annotate_key, annotate_wave_range=annotate_wave_range, annotate_text="average", annotate_yoffset=0.05, ) # overplot a fitted model if requested if fitmodel: plot_fitmodel(average, yoffset=yoffset)
horizontalalignment='center', fontsize=fontsize * 0.75, bbox=dict(facecolor='white')) ax[1].set_yscale('linear') ax[1].set_xscale('log') ax[1].set_xlim(3.0, 3.8) ax[1].set_ylim(0.07, 0.0825) ax[1].set_xlabel(r'$\lambda$ [$\mu$m]') ax[1].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax[1].get_xaxis().set_minor_formatter(matplotlib.ticker.ScalarFormatter()) ax[1].set_xticks([3, 3.5]) ax[1].set_xticks([3.25, 3.75], minor=True) # read in and plot the spitzer average extinction spitext = ExtData() spitext.read('all_ext_trim_ave.fits') spitext.plot(ax[2], color='r', fontsize=fontsize, legend_key='IRS', legend_label='Average (this work)') wave_bands = [7.0, 10.0, 14.0] frac_width = 0.1 for cwave in wave_bands: bx = [ cwave - frac_width * cwave, cwave - frac_width * cwave, cwave + frac_width * cwave, cwave + frac_width * cwave ] by = [-0.05, -0.025, -0.025, -0.05] ax[2].plot(bx, by, 'k-')
"azv23_azv404_ext.fits", "azv214_azv380_ext.fits", "azv398_azv289_ext.fits", "azv456_azv70_ext.fits", ] nfiles = [ "azv_018_ext.fits", "azv_023_ext.fits", "azv_214_ext.fits", "azv_398_ext.fits", "azv_456_ext.fits", ] pcol = ["r", "b", "g", "c", "m"] for k, ofile in enumerate(ofiles): oext = ExtData(filename=f"prev/{ofile}") next = ExtData(filename=f"fits/{nfiles[k]}") dext = copy.deepcopy(oext) for curtype, csym in zip(ptypes, psym): oext.exts[curtype][oext.npts[curtype] == 0] = np.nan if curtype == "IUE": olabel = "G03" nlabel = "this work" dlabel = "this work - G03" dlabel2 = f"{snames[k]} diff" else: olabel = None nlabel = None dlabel = None
nuv_ceninten = np.full((n_ext), 0.0) nuv_ceninten_unc = np.full((2, n_ext), 0.0) fuv_amp = np.full((n_ext), 0.0) fuv_amp_unc = np.full((n_ext), 0.0) avs = np.full((n_ext), 0.0) avs_unc = np.full((2, n_ext), 0.0) ebvs = np.full((n_ext), 0.0) ebvs_unc = np.full((n_ext), 0.0) rvs = np.full((n_ext), 0.0) rvs_unc = np.full((2, n_ext), 0.0) for k, cname in enumerate(extfnames): # get P92 fits bfile = f"fits_good_18aug20/{cname}" cext = ExtData(filename=bfile) mcmcfile = bfile.replace(".fits", ".h5") reader = emcee.backends.HDFBackend(mcmcfile) nsteps, nwalkers = reader.get_log_prob().shape samples = reader.get_chain(discard=int(mcmc_burnfrac * nsteps), flat=True) avs_dist = unc.Distribution(samples[:, -1]) av_per = avs_dist.pdf_percentiles([16.0, 50.0, 84.0]) avs[k] = av_per[1] avs_unc[1, k] = av_per[2] - av_per[1] avs_unc[0, k] = av_per[1] - av_per[0] # print(avs_dist.pdf_percentiles([33., 50., 87.])) (indxs, ) = np.where((cext.waves["BAND"] > 0.4 * u.micron)
file_lines = list(f) extnames = [] extdatas = [] extdatas_fm90 = [] avs = [] normtype = "IUE" norm_wave_range = [0.25, 0.30] * u.micron normvals = [] for line in file_lines: if (line.find("#") != 0) & (len(line) > 0): name = line.rstrip() extnames.append(name) bfilename = f"fits/{name}" text = ExtData(filename=bfilename) extdatas.append(text) avs.append(text.columns["AV"][0]) # determine the extinction in the near-UV # useful for sorting to make a pretty plot if "IUE" in text.exts.keys(): (gindxs, ) = np.where( (text.npts[normtype] > 0) & ((text.waves[normtype] >= norm_wave_range[0]) & (text.waves[normtype] <= norm_wave_range[1]))) normvals.append( np.average((text.exts[normtype][gindxs]) / float(text.columns["AV"][0]) + 1.0)) else: normvals.append(1.0)
def fit_features_ext(starpair, path): """ Fit the extinction features separately with different profiles Parameters ---------- starpair : string Name of the star pair for which to fit the extinction features, in the format "reddenedstarname_comparisonstarname" (no spaces) path : string Path to the data files Returns ------- waves : np.ndarray Numpy array with wavelengths exts_sub : np.ndarray Numpy array with continuum subtracted extinctions results : list List with the fitted models for different profiles """ # first, fit the continuum, excluding the region of the features fit_spex_ext(starpair, path, exclude=[(2.8, 3.6)]) # retrieve the SpeX data to be fitted, and sort the curve from short to long wavelengths extdata = ExtData("%s%s_ext.fits" % (path, starpair.lower())) (waves, exts, exts_unc) = extdata.get_fitdata(["SpeX_SXD", "SpeX_LXD"]) indx = np.argsort(waves) waves = waves[indx].value exts = exts[indx] exts_unc = exts_unc[indx] # subtract the fitted (powerlaw) continuum from the data, and select the relevant region params = extdata.model["params"] exts_sub = exts - (params[0] * params[3] * waves ** (-params[2]) - params[3]) mask = (waves >= 2.8) & (waves <= 3.6) waves = waves[mask] exts_sub = exts_sub[mask] exts_unc = exts_unc[mask] # define different profiles # 2 Gaussians (stddev=FWHM/(2sqrt(2ln2))) gauss = Gaussian1D(mean=3, stddev=0.13) + Gaussian1D(mean=3.4, stddev=0.06) # 2 Drudes drude = Drude1D(x_0=3, fwhm=0.3) + Drude1D(x_0=3.4, fwhm=0.15) # 2 Lorentzians lorentz = Lorentz1D(x_0=3, fwhm=0.3) + Lorentz1D(x_0=3.4, fwhm=0.15) # 2 asymmetric Gaussians Gaussian_asym = custom_model(gauss_asymmetric) gauss_asym = Gaussian_asym(x_o=3, gamma_o=0.3) + Gaussian_asym( x_o=3.4, gamma_o=0.15 ) # 2 "asymmetric" Drudes Drude_asym = custom_model(drude_asymmetric) drude_asym = Drude_asym(x_o=3, gamma_o=0.3) + Drude_asym(x_o=3.4, gamma_o=0.15) # 2 asymmetric Lorentzians Lorentzian_asym = custom_model(lorentz_asymmetric) lorentz_asym = Lorentzian_asym(x_o=3, gamma_o=0.3) + Lorentzian_asym( x_o=3.4, gamma_o=0.15 ) profiles = [gauss, drude, lorentz, gauss_asym, drude_asym, lorentz_asym] # fit the different profiles fit = LevMarLSQFitter() results = [] for profile in profiles: fit_result = fit(profile, waves, exts_sub, weights=1 / exts_unc, maxiter=10000) results.append(fit_result) print(fit_result) print("Chi2", np.sum(((exts_sub - fit_result(waves)) / exts_unc) ** 2)) return waves, exts_sub, results
avefilenames = [ "data/all_ext_14oct20_diffuse_ave_POWLAW2DRUDE.fits", "fits/hd283809_hd064802_ext_POWLAW2DRUDE.fits", "fits/hd029647_hd195986_ext_POWLAW2DRUDE.fits", ] if not args.dense: avefilenames = avefilenames[0:1] pcol = ["b", "g", "m"] psym = ["o", "s", "^"] pline = ["-", "-.", ":"] plabel = ["diffuse", "HD283809", "HD029647"] for i, avefilename in enumerate(avefilenames): # IR Fit obsext = ExtData() obsext.read(avefilename) # UV fit obsext2 = ExtData() if plabel[i] == "diffuse": obsext2.read(avefilename.replace(".fits", "_FM90.fits")) obsext_wave = obsext.waves["BAND"].value obsext_ext = obsext.exts["BAND"] obsext_ext_uncs = obsext.uncs["BAND"] gindxs_IRS = np.where(obsext.npts["IRS"] > 0) obsext_IRS_wave = obsext.waves["IRS"][gindxs_IRS].value obsext_IRS_ext = obsext.exts["IRS"][gindxs_IRS] obsext_IRS_uncs = obsext.uncs["IRS"][gindxs_IRS] else:
fig, ax = pyplot.subplots(figsize=figsize) # New measurements avefilenames = [ "data/all_ext_14oct20_diffuse_ave_POWLAW2DRUDE.fits", # "fits_good_18aug20/hd283809_hd064802_ext_POWLAW2DRUDE.fits", # "fits_good_18aug20/hd029647_hd195986_ext_POWLAW2DRUDE.fits", ] pcol = ["b", "g", "c"] psym = ["o", "s", "^"] pline = ["-", ":", "-."] plabel = ["diffuse", "HD283809", "HD029647"] for i, avefilename in enumerate(avefilenames): # IR Fit obsext = ExtData() obsext.read(avefilename) if obsext.type == "elx": obsext.trans_elv_alav() obsext_wave = obsext.waves["BAND"].value obsext_ext = obsext.exts["BAND"] obsext_ext_uncs = obsext.uncs["BAND"] gindxs_IRS = np.where(obsext.npts["IRS"] > 0) obsext_IRS_wave = obsext.waves["IRS"][gindxs_IRS].value obsext_IRS_ext = obsext.exts["IRS"][gindxs_IRS] obsext_IRS_uncs = obsext.uncs["IRS"][gindxs_IRS]
mod_lam = np.logspace(np.log10(0.13), np.log10(2.4), num=500) * u.micron lstyles = [":", "--", "-."] for k, cRv in enumerate(Rvs): mwmod = F19(cRv) ax[0, 0].plot( mod_lam, mwmod(mod_lam), label=f"F19 MW Rv={cRv}", color="k", alpha=0.5, linestyle=lstyles[k], ) mw1 = GCC09_MWAvg() ax[0, 0].plot(1.0 / mw1.obsdata_x, mw1.obsdata_axav, "b-", label="GCC09 MWAvg") obsext = ExtData() obsext.read( "/home/kgordon/Python_git/spitzer_mir_ext/data/all_ext_14oct20_diffuse_ave_POWLAW2DRUDE.fits" ) plot_obsext(ax[0, 1], obsext) ax[0, 1].legend(fontsize=0.8 * fontsize) # lmc lmc1 = G03_LMCAvg() lmc2 = G03_LMC2() ax[1, 0].plot(1.0 / lmc1.obsdata_x, lmc1.obsdata_axav, "g-", label="G03 LMCAvg") ax[1, 0].plot(1.0 / lmc2.obsdata_x, lmc2.obsdata_axav, label="G03 LMC2 (30 Dor)") ax[1, 1].plot(1.0 / lmc1.obsdata_x, lmc1.obsdata_axav, "c-", label="G03 LMCAvg") ax[1, 1].plot(1.0 / lmc2.obsdata_x, lmc2.obsdata_axav, label="G03 LMC2 (30 Dor)") ax[1, 1].text(
ekvs = np.full((n_ext), 0.0) ekvs_unc = np.full((n_ext), 0.0) eivs = np.full((n_ext), 0.0) eivs_unc = np.full((n_ext), 0.0) rvs = np.full((n_ext), 0.0) rvs_unc = np.full((2, n_ext), 0.0) rks = np.full((n_ext), 0.0) rks_unc = np.full((2, n_ext), 0.0) ris = np.full((n_ext), 0.0) ris_unc = np.full((2, n_ext), 0.0) for k, cname in enumerate(extfnames): # get P92 fits bfile = f"fits/{cname}" cext = ExtData(filename=bfile) mcmcfile = bfile.replace(".fits", ".h5") reader = emcee.backends.HDFBackend(mcmcfile) nsteps, nwalkers = reader.get_log_prob().shape samples = reader.get_chain(discard=int(mcmc_burnfrac * nsteps), flat=True) avs_dist = unc.Distribution(samples[:, -1]) av_per = avs_dist.pdf_percentiles([16.0, 50.0, 84.0]) avs[k] = av_per[1] avs_unc[1, k] = av_per[2] - av_per[1] avs_unc[0, k] = av_per[1] - av_per[0] # print(avs_dist.pdf_percentiles([33., 50., 87.])) (indxs, ) = np.where((cext.waves["BAND"] > 0.4 * u.micron)
type=float, default=0.1, help="fraction of MCMC chain to burn") parser.add_argument("--png", help="save figure as a png file", action="store_true") parser.add_argument("--pdf", help="save figure as a pdf file", action="store_true") args = parser.parse_args() # get a saved extnction curve file = args.extfile # file = '/home/kgordon/Python_git/spitzer_mir_ext/fits/hd147889_hd064802_ext.fits' ofile = file.replace(".fits", "_FM90.fits") ext = ExtData(filename=file) if ext.type == "elx": ext.trans_elv_alav(av=float(ext.columns["AV"][0])) wave, y, y_unc = ext.get_fitdata( ["IUE"], remove_uvwind_region=True, remove_lya_region=True, ) x = 1.0 / wave.value # remove points above x = 8.0 gvals = x < 8.0 x = x[gvals] y = y[gvals]