def read_simple_templates(velscale, lamrange): hdul = fits.open(UT.lgal_dir() + "/simple_mocks/template_fluxbc03.fits") wave_s = hdul[1].data['wave'] flux_bulge = hdul[1].data['L_bulge'] flux_disk = hdul[1].data['L_disk'] hdul.close() wave, flux_bulge = to_common_grid(wave_s, flux_bulge, lamrange[0], lamrange[1]) wave, flux_disk = to_common_grid(wave_s, flux_disk, lamrange[0], lamrange[1]) mask = ((wave >= lamrange[0]) & (wave <= lamrange[1])) wave = wave[mask] flux_bulge = flux_bulge[mask] model1, logLam1, velscale_out = util.log_rebin([wave[0], wave[-1]], flux_bulge, velscale=velscale) model1 /= np.median(model1) print(velscale, velscale_out) flux_disk = flux_disk[mask] model2, logLam2, velscale_out = util.log_rebin([wave[0], wave[-1]], flux_disk, velscale=velscale) model2 /= np.median(model2) templates = np.column_stack([model1, model2]) #print([wave[0], wave[-1]]) plt.plot(np.exp(1)**logLam1, model1) plt.plot(np.exp(1)**logLam2, model2) return (logLam1, templates)
def _mini_mocha_galid(lib='bc03'): ''' pick 100 unique Lgal galids that roughly fall under the BGS target selection for the mini mock challenge: r < 20. ''' # gather all galids galids = [] dir_inputs = os.path.join(UT.lgal_dir(), 'gal_inputs') for finput in glob.glob(dir_inputs + '/*'): galids.append(int(os.path.basename(finput).split('_')[2])) galids = np.array(galids) n_id = len(galids) # get noiseless source spectra _, spectra_s = _lgal_noiseless_spectra(galids, lib=lib) # get DECAM photometry photo, _ = FM.Photo_DESI(spectra_s['wave'], spectra_s['flux_dust']) target_selection = (photo[:, 1] <= 20.) print('%i Lgal galaxies within target_selection' % np.sum(target_selection)) # now randomly choose 100 galids mini_galids = np.random.choice(galids[target_selection], size=100, replace=False) fids = os.path.join(UT.dat_dir(), 'mini_mocha', 'lgal.galids.%s.txt' % lib) np.savetxt(fids, mini_galids, fmt='%i', header='%i Lgal galids for mini mock challenge' % len(mini_galids)) return None
def Fbestfit_photo(igal, noise='bgs0_legacy', sample='mini_mocha', method='ifsps'): ''' file name of best-fit of photometry of spectral_challenge galaxy #igal :param igal: index of spectral_challenge galaxy :param noise: noise of the spectra. If noise == 'none', no noise. If noise =='legacy', then legacy like noise. (default: 'none') :param sample: mini_mocha :param method: fitting method. (default: ifsps) ''' if noise != 'none': noise_spec = noise.split('_')[0] noise_photo = noise.split('_')[1] else: noise_spec = 'none' noise_photo = 'none' model = 'vanilla' f_bf = os.path.join( UT.lgal_dir(), sample, method, 'lgal.photo.noise_%s.%s.%i.hdf5' % (noise_photo, model, igal)) return f_bf
def Fbestfit_photo(igal, noise='none', dust=False, method='ifsps'): ''' file name of best-fit of photometry of spectral_challenge galaxy #igal :param igal: index of spectral_challenge galaxy :param noise: noise of the spectra. If noise == 'none', no noise. If noise =='legacy', then legacy like noise. (default: 'none') :param dust: spectra has dust or not. :param method: fitting method. (default: ifsps) ''' if dust: model = 'vanilla' else: model = 'dustless_vanilla' f_bf = os.path.join( UT.lgal_dir(), 'spectral_challenge', method, 'photo.noise_%s.dust_%s.%s.%i.hdf5' % (noise, ['no', 'yes'][dust], model, igal)) return f_bf
def read_simple_templates(velscale, lamrange): hdul = fits.open(UT.lgal_dir() + "/simple_mocks/template_fluxbc03.fits") wave = hdul[1].data['wave'] flux_bulge = hdul[1].data['L_bulge'] flux_disk = hdul[1].data['L_disk'] hdul.close() #put on constant grid, as this is assumed by log_rebin wave_s, flux_bulge = to_constant_grid(wave, flux_bulge, lamrange[0], lamrange[1]) wave_s, flux_disk = to_constant_grid(wave, flux_disk, lamrange[0], lamrange[1]) print('full model wavelength range: ', wave_s[0], wave_s[-1]) print('requested model wavelength range:', lamrange) #flux_bulge = flux_bulge[mask] model1, logLam1, velscale_out = util.log_rebin([lamrange[0], lamrange[1]], flux_bulge, velscale=velscale) norm1 = np.median(model1) model1 /= norm1 print(velscale, velscale_out) #flux_disk = flux_disk[mask] model2, logLam2, velscale_out = util.log_rebin([lamrange[0], lamrange[1]], flux_disk, velscale=velscale) norm2 = np.median(model2) model2 /= norm2 print(velscale, velscale_out) #print([wave[0], wave[-1]]) #protect against data goint outside of models wavelength range #Stelib library is defined below 3400, but at lower resolution if lamrange[0] < 3400.0: lamrange_new = [3400.0, lamrange[1]] print(lamrange_new) mask = ((np.exp(1)**logLam1 >= lamrange_new[0]) & (np.exp(1)**logLam1 <= lamrange_new[1])) logLam1 = logLam1[mask] logLam2 = logLam2[mask] model1 = model1[mask] model2 = model2[mask] templates = np.column_stack([model1, model2]) plt.plot(np.exp(1)**logLam1, model1) plt.plot(np.exp(1)**logLam2, model2) return (logLam1, templates, [norm1, norm2])
def _lgal_metadata(galids): ''' return galaxy properties (meta data) of Lgal galaxies given the galids ''' tlookback, dt = [], [] sfh_disk, sfh_bulge, Z_disk, Z_bulge, logM_disk, logM_bulge, logM_total = [], [], [], [], [], [], [] t_age_MW, Z_MW = [], [] for i, galid in enumerate(galids): f_input = os.path.join( UT.lgal_dir(), 'gal_inputs', 'gal_input_%i_BGS_template_FSPS_uvmiles.csv' % galid) gal_input = Table.read(f_input, delimiter=' ') tlookback.append(gal_input['sfh_t']) # lookback time (age) dt.append(gal_input['dt']) # SF history sfh_disk.append(gal_input['sfh_disk']) sfh_bulge.append(gal_input['sfh_bulge']) # metalicity history Z_disk.append(gal_input['Z_disk']) Z_bulge.append(gal_input['Z_bulge']) # formed mass logM_disk.append(np.log10(np.sum(gal_input['sfh_disk']))) logM_bulge.append(np.log10(np.sum(gal_input['sfh_bulge']))) logM_total.append( np.log10( np.sum(gal_input['sfh_disk']) + np.sum(gal_input['sfh_bulge']))) # mass weighted t_age_MW.append( np.sum(gal_input['sfh_t'] * (gal_input['sfh_disk'] + gal_input['sfh_bulge'])) / np.sum(gal_input['sfh_disk'] + gal_input['sfh_bulge'])) Z_MW.append( np.sum(gal_input['Z_disk'] * gal_input['sfh_disk'] + gal_input['Z_bulge'] * gal_input['sfh_bulge']) / np.sum(gal_input['sfh_disk'] + gal_input['sfh_bulge'])) meta = {} meta['galid'] = galids meta['t_lookback'] = tlookback meta['dt'] = dt meta['sfh_disk'] = sfh_disk meta['sfh_bulge'] = sfh_bulge meta['Z_disk'] = Z_disk meta['Z_bulge'] = Z_bulge meta['logM_disk'] = logM_disk meta['logM_bulge'] = logM_bulge meta['logM_total'] = logM_total meta['t_age_MW'] = t_age_MW meta['Z_MW'] = Z_MW return meta
def _lgal_noiseless_spectra(galids, lib='bc03'): ''' return noiseless source spectra of Lgal galaxies given the galids and the library. The spectra is interpolated to a standard wavelength grid. ''' n_id = len(galids) if lib == 'bc03': str_lib = 'BC03_Stelib' # noiseless source spectra _Fsource = lambda galid: os.path.join( UT.lgal_dir(), 'templates', 'gal_spectrum_%i_BGS_template_%s.fits' % (galid, str_lib)) wavemin, wavemax = 3000.0, 3e5 wave = np.arange(wavemin, wavemax, 0.2) flux_dust = np.zeros((n_id, len(wave))) flux_nodust = np.zeros((n_id, len(wave))) redshift, cosi, tau_ism, tau_bc, vd_disk, vd_bulge = [], [], [], [], [], [] for i, galid in enumerate(galids): f_source = fits.open(_Fsource(galid)) # grab extra meta data from header hdr = f_source[0].header redshift.append(hdr['REDSHIFT']) cosi.append(hdr['COSI']) tau_ism.append(hdr['TAUISM']) tau_bc.append(hdr['TAUBC']) vd_disk.append(hdr['VD_DISK']) vd_bulge.append(hdr['VD_BULGE']) specin = f_source[1].data _flux_dust = specin[ 'flux_dust_nonoise'] * 1e-4 * 1e7 * 1e17 #from W/A/m2 to 10e-17 erg/s/cm2/A _flux_nodust = specin[ 'flux_nodust_nonoise'] * 1e-4 * 1e7 * 1e17 #from W/A/m2 to 10e-17 erg/s/cm2/A interp_flux_dust = sp.interpolate.interp1d(specin['wave'], _flux_dust, fill_value='extrapolate') interp_flux_nodust = sp.interpolate.interp1d(specin['wave'], _flux_nodust, fill_value='extrapolate') flux_dust[i, :] = interp_flux_dust(wave) flux_nodust[i, :] = interp_flux_nodust(wave) meta = { 'redshift': np.array(redshift), 'cosi': np.array(cosi), 'tau_ism': np.array(tau_ism), 'tau_bc': np.array(tau_bc), 'vd_disk': np.array(vd_disk), 'vd_bulge': np.array(vd_bulge) } spectra = { 'wave': wave, 'flux_dust': flux_dust, 'flux_nodust': flux_nodust } return meta, spectra
# --- plotting --- import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.xmargin'] = 1 mpl.rcParams['xtick.labelsize'] = 'x-large' mpl.rcParams['xtick.major.size'] = 5 mpl.rcParams['xtick.major.width'] = 1.5 mpl.rcParams['ytick.labelsize'] = 'x-large' mpl.rcParams['ytick.major.size'] = 5 mpl.rcParams['ytick.major.width'] = 1.5 mpl.rcParams['legend.frameon'] = False dir_fig = os.path.join(UT.lgal_dir(), 'spectral_challenge') def mock_challenge_spec(noise='none', dust=False, method='ifsps'): ''' Compare properties inferred from forward modeled spectra to input properties ''' # read Lgal spectra of the spectral_challenge mocks and get input properties specs, meta = Data.Spectra(sim='lgal', noise=noise, lib='bc03', sample='spectral_challenge') Mstar_input = meta['logM_total'] # total mass Z_MW_input = meta['Z_MW'] # mass-weighted metallicity tage_input = meta['t_age_MW'] # mass-weighted age theta_inf = []