Пример #1
0
def sample_posterior(outname=None, shortname=None, mass_folder=None):
    # I/O
    # paramfile = model_setup.import_module_from_file(param_name)
    # outname = paramfile.run_params['outfile']
    # sample_results, powell_results, model, eout = load_prospector_data(outname, hdf5=True, load_extra_output=True)
    sample_results, powell_results, model = bread.results_from(outname)

    # create useful quantities
    sample_results['flatchain'] = chop_chain(sample_results['chain'],
                                             **sample_results['run_params'])
    sample_results['flatprob'] = chop_chain(sample_results['lnprobability'],
                                            **sample_results['run_params'])

    sps = model_setup.load_sps(**sample_results['run_params'])
    # sps = paramfile.load_sps(**sample_results['run_params'])
    # obs = paramfile.load_obs(**sample_results['run_params'])

    # sample from posterior
    nsamp = 3000
    good = np.isfinite(sample_results['flatprob']) == True
    sample_idx = np.random.choice(np.where(good)[0], nsamp)

    # define outputs
    mfrac = np.linspace(0, 0.95, 20)
    mfrac_out = np.zeros(shape=(nsamp, mfrac.shape[0]))
    for jj, idx in enumerate(sample_idx):
        print(jj)
        ##### model call, to set parameters
        thetas = copy.copy(sample_results['flatchain'][idx])
        spec, mags, sm = sample_results['model'].mean_model(
            thetas, sample_results['obs'], sps=sps)

        ##### extract sfh parameters
        sfh_params = find_sfh_params(sample_results['model'],
                                     thetas,
                                     sample_results['obs'],
                                     sps,
                                     sm=sm)

        for ii, m in enumerate(mfrac):
            mfrac_out[jj, ii] = halfmass_assembly_time(sfh_params, c=m)

    # fixing negatives
    mfrac_out = np.clip(mfrac_out, 0.0, np.inf)
    # write out
    out = np.percentile(mfrac_out, [50, 84, 16], axis=0)
    with open('out/' + mass_folder + shortname + 'mass.txt', 'w') as f:
        f.write('# mass_fraction median_time err_up err_down\n')
        for ii in range(out.shape[1]):
            f.write("{:.2f}".format(mfrac[ii]) + ' ' +
                    "{:.3f}".format(out[0, ii]) + ' ' +
                    "{:.3f}".format(out[1, ii]) + ' ' +
                    "{:.3f}".format(out[2, ii]) + ' ')
            f.write('\n')
Пример #2
0
def calc_sfr(nsample=40000):
	
	### setup SPS
	run_params = model_setup.get_run_params(param_file=param_file)
	sps = model_setup.load_sps(**run_params)
	model = model_setup.load_model(**run_params)
	obs = return_fake_obs({})
	nbins = model.params['sfr_fraction'].shape[0]

	#### create chain to sample from
	flatchain = np.random.dirichlet(tuple(1.0 for x in xrange(6)),nsample)
	flatchain = flatchain[:,:-1]

	### define time array for SFHs
	in_years = 10**model.params['agebins']/1e9
	t = in_years.sum(axis=1)/2.

	### output bins
	sfr = np.zeros(shape=(t.shape[0],nsample))

	#### sample the posterior
	for jj in xrange(nsample):
		
		if jj % 100 == 0:
			print float(jj)/nsample

		##### model call, to set parameters
		thetas = flatchain[jj,:]
		_,_,sm = model.mean_model(thetas, obs, sps=sps)

		##### extract sfh parameters
		# pass stellar mass to avoid extra model call
		sfh_params = prosp_dutils.find_sfh_params(model,thetas,
			                                       obs,sps,sm=sm)

		#### SFR
		sfr[:,jj] = prosp_dutils.return_full_sfh(t, sfh_params,minsfr=-np.inf)

	out = {}
	out['sfr'] = sfr
	out['flatchain'] = flatchain[:nsample,:]
	out['model'] = model

	return out
Пример #3
0
def cloudy_spectrum(ax):

	from prospect.models import model_setup

	param_file = '/Users/joel/code/python/prospector_alpha/parameter_files/brownseds_np/brownseds_np_params.py'
	
	run_params = model_setup.get_run_params(param_file=param_file)
	sps = model_setup.load_sps(**run_params)
	model = model_setup.load_model(**run_params)
	model.params['dust2'] = np.array(0.0)
	obs = model_setup.load_obs(**run_params)
	spec,_,_ = model.mean_model(model.initial_theta, obs, sps=sps)
	model.params['add_neb_emission'] = np.array(False)
	model.params['add_neb_continuum'] = np.array(False)
	spec_neboff,_,_ = model.mean_model(model.initial_theta, obs, sps=sps)

	spec_neb = (spec-spec_neboff)*3e18/sps.wavelengths**2
	in_plot = (sps.wavelengths/1e4 > 6) & (sps.wavelengths/1e4 < 30)
	spec_neb_smooth = smooth_spectrum(sps.wavelengths[in_plot], spec_neb[in_plot], 3500)
	spec_neb_smooth *= 1e5 / spec_neb_smooth.max()

	'''
	neb_color = '0.7'
	alpha = 0.7
	ax.fill_between(sps.wavelengths[in_plot]/1e4, np.zeros_like(spec_neb_smooth), spec_neb_smooth, 
                    color=neb_color,
                    alpha=alpha)

	### label H2 + [ArII] (second is in cloudy but very small)
	ax.fill_between([9.55,9.85],[0,0],[1,1],color=neb_color,alpha=alpha)
	ax.fill_between([6.8,7.1],[0,0],[1,1],color=neb_color,alpha=alpha)
	'''
	lines = ['[ArII]',r'H$_2$','[SIV]','[NeIII]','[SIII]']
	lam = [6.95,9.7,10.45,15.5,18.7]
	# removed because they're too weak, just distracting
	#lines = ['[ArIII]','[NeII]']
	#lam = [9.0,12.8]

	for ii in xrange(len(lines)):
		ax.text(lam[ii]*1.008,0.14,lines[ii],
			    ha='left',fontsize=9.5)
	for ii in xrange(len(lam)): ax.plot([lam[ii],lam[ii]],[0,1e5],linestyle='--',lw=1.5,color='k')
Пример #4
0
import numpy as np
import matplotlib.pyplot as plt
import spot_utils as pread
from prospect.io.read_results import results_from
from prospect.models import model_setup

paramfile = 'ad_params.py'
clargs = {'param_file': paramfile}
run_params = model_setup.get_run_params(argv=paramfile, **clargs)
print(run_params)

obs = model_setup.load_obs(**run_params)
sps = model_setup.load_sps(**run_params)
model = model_setup.load_model(**run_params)

wspec = sps.csp.wavelengths  # *restframe* spectral wavelengths
a = 1.0 + model.params.get('zred', 0.6)  # cosmological redshifting
wphot = np.array([f.wave_effective for f in obs['filters']])

# In [21]
# grab results, powell results, and our corresponding models
#res, pr, mod = results_from("{}_mcmc.h5".format(outroot))
res, pr, mod = results_from("demo_galphot_1508433060_mcmc.h5")

# In [22]
# To see how our MCMC samples look, we can examine a few traces
choice = np.random.choice
tracefig = pread.param_evol(res,
                            figsize=(20, 10),
                            chains=choice(128, size=10, replace=False))
Пример #5
0
def calc_extra_quantities(sample_results, ncalc=3000, **kwargs):
    ''''
    CALCULATED QUANTITIES
    model nebular emission line strength
    model star formation history parameters (ssfr,sfr,half-mass time)
    '''

    # different options for what to calculate
    # speedup is measured in runtime, where runtime = ncalc * model_call
    opts = {
        'restframe_optical_photometry': False,  # currently deprecated! but framework exists in
                                                # restframe_optical_properties
        'ir_priors': False,  # no cost
        'measure_spectral_features': False,  # cost = 2 runtimes
        'mags_nodust': False  # cost = 1 runtime
    }
    if kwargs:
        for key in kwargs.keys():
            opts[key] = kwargs[key]

    parnames = np.array(sample_results['model'].theta_labels())

    ##### describe number of components in Prospector model [legacy]
    sample_results['ncomp'] = np.sum(['mass' in x for x in sample_results['model'].theta_labels()])

    ##### array indexes over which to sample the flatchain
    sample_idx = sample_flatchain(sample_results['flatchain'], sample_results['flatprob'], parnames,
                                  ir_priors=opts['ir_priors'], include_maxlnprob=True, nsamp=ncalc)

    ##### initialize output arrays for SFH + emission line posterior draws
    half_time, sfr_10, sfr_100, ssfr_100, stellar_mass, ssfr_10, ssfr_full = [np.zeros(shape=(ncalc)) for i in range(7)]
    # BUCKET ssfr_full added by me^ (range(6) --> range(7))

    ##### set up time vector for full SFHs
    t = set_sfh_time_vector(sample_results, ncalc)  # returns array of len=18
    intsfr = np.zeros(shape=(t.shape[0], ncalc))

    ##### initialize sps, calculate maxprob
    # also confirm probability calculations are consistent with fit
    sps = model_setup.load_sps(**sample_results['run_params'])
    maxthetas, maxprob = maxprob_model(sample_results, sps)

    ##### set up model flux vectors
    mags = np.zeros(shape=(len(sample_results['obs']['filters']), ncalc))
    try:
        wavelengths = sps.wavelengths
    except AttributeError:
        wavelengths = sps.csp.wavelengths
    spec = np.zeros(shape=(wavelengths.shape[0], ncalc))

    ##### modify nebular status to ensure emission line production
    # don't cache, and turn on
    if sample_results['model'].params['add_neb_emission'] == 2:
        sample_results['model'].params['add_neb_emission'] = np.array([True])
    sample_results['model'].params['nebemlineinspec'] = np.array([True])

    loop = 0
    ######## posterior sampling #########
    for jj, idx in enumerate(sample_idx):

        ##### model call, to set parameters
        thetas = copy(sample_results['flatchain'][idx])
        spec[:, jj], mags[:, jj], sm = sample_results['model'].mean_model(thetas, sample_results['obs'], sps=sps)

        ##### extract sfh parameters
        # pass stellar mass to avoid extra model call
        sfh_params = prosp_dutils.find_sfh_params(sample_results['model'], thetas, sample_results['obs'], sps, sm=sm)

        ##### calculate SFH
        intsfr[:, jj] = prosp_dutils.return_full_sfh(t, sfh_params)

        ##### solve for half-mass assembly time
        half_time[jj] = prosp_dutils.halfmass_assembly_time(sfh_params)

        ##### calculate time-averaged SFR
        sfr_10[jj] = prosp_dutils.calculate_sfr(sfh_params, 0.01, minsfr=-np.inf, maxsfr=np.inf)  # avg over 10 Myr
        sfr_100[jj] = prosp_dutils.calculate_sfr(sfh_params, 0.1, minsfr=-np.inf, maxsfr=np.inf)  # avg over 100 Myr

        ##### calculate mass, sSFR
        stellar_mass[jj] = sfh_params['mass']
        ssfr_10[jj] = sfr_10[jj] / stellar_mass[jj]
        ssfr_100[jj] = sfr_100[jj] / stellar_mass[jj]

        ssfr_full = intsfr[:, jj] / stellar_mass[jj]  # TESTING added by me (includes ssfr)

        loop += 1
        print('loop', loop)

    #### QUANTILE OUTPUTS #
    extra_output = {}

    ##### CALCULATE Q16,Q50,Q84 FOR EXTRA PARAMETERS
    extra_flatchain = np.dstack(
        (half_time, sfr_10, sfr_100, ssfr_10, ssfr_100, stellar_mass))[0]

    #### EXTRA PARAMETER OUTPUTS
    extras = {'flatchain': extra_flatchain,
              'parnames': np.array(
                  ['half_time', 'sfr_10', 'sfr_100', 'ssfr_10', 'ssfr_100', 'stellar_mass']),
              'sfh': intsfr,
              't_sfh': t,
              'ssfr': ssfr_full}  # TESTING 'ssfr'
    extra_output['extras'] = extras

    #### BEST-FITS
    bfit = {'maxprob_params': maxthetas,
            'maxprob': maxprob,
            'sfh': intsfr[:, 0],
            'half_time': half_time[0],
            'sfr_10': sfr_10[0],
            'sfr_100': sfr_100[0],
            'spec': spec[:, 0],
            'mags': mags[:, 0]}
    extra_output['bfit'] = bfit

    ##### spectral features
    return extra_output
Пример #6
0
def calc_extra_quantities(sample_results, ncalc=3000, **kwargs):
    ''''
    CALCULATED QUANTITIES
    model nebular emission line strength
    model star formation history parameters (ssfr,sfr,half-mass time)
    '''

    # different options for what to calculate
    # speedup is measured in runtime, where runtime = ncalc * model_call
    opts = {
        'restframe_optical_photometry': False,
        # currently deprecated! but framework exists in restframe_optical_properties
        'ir_priors': False,  # no cost
        'measure_spectral_features': True,  # cost = 2 runtimes
        'mags_nodust': False  # cost = 1 runtime
    }
    if kwargs:
        for key in kwargs.keys():
            opts[key] = kwargs[key]

    parnames = np.array(sample_results['model'].theta_labels())

    ##### describe number of components in Prospector model [legacy]
    sample_results['ncomp'] = np.sum(
        ['mass' in x for x in sample_results['model'].theta_labels()])

    ##### array indexes over which to sample the flatchain
    sample_idx = sample_flatchain(sample_results['flatchain'],
                                  sample_results['flatprob'],
                                  parnames,
                                  ir_priors=opts['ir_priors'],
                                  include_maxlnprob=True,
                                  nsamp=ncalc)

    ##### initialize output arrays for SFH + emission line posterior draws
    half_time, sfr_10, sfr_100, ssfr_100, stellar_mass, emp_ha, lir, luv, lmir, lbol, \
    bdec_calc, ext_5500, dn4000, ssfr_10, xray_lum = [np.zeros(shape=(ncalc)) for i in range(15)]
    if 'fagn' in parnames:
        l_agn, fmir = [np.zeros(shape=(ncalc)) for i in range(2)]

    ##### information for empirical emission line calculation ######
    d1_idx = parnames == 'dust1'
    d2_idx = parnames == 'dust2'
    didx = parnames == 'dust_index'

    ##### set up time vector for full SFHs
    t = set_sfh_time_vector(sample_results, ncalc)
    intsfr = np.zeros(shape=(t.shape[0], ncalc))

    ##### initialize sps, calculate maxprob
    # also confirm probability calculations are consistent with fit
    sps = model_setup.load_sps(**sample_results['run_params'])
    maxthetas, maxprob = maxprob_model(sample_results, sps)

    ##### set up model flux vectors
    mags = np.zeros(shape=(len(sample_results['obs']['filters']), ncalc))
    try:
        wavelengths = sps.wavelengths
    except AttributeError:
        wavelengths = sps.csp.wavelengths
    spec = np.zeros(shape=(wavelengths.shape[0], ncalc))

    ##### modify nebular status to ensure emission line production
    # don't cache, and turn on
    if sample_results['model'].params['add_neb_emission'] == 2:
        sample_results['model'].params['add_neb_emission'] = np.array([True])
    sample_results['model'].params['nebemlineinspec'] = np.array([True])

    ######## posterior sampling #########
    for jj, idx in enumerate(sample_idx):
        t1 = time.time()

        ##### model call, to set parameters
        thetas = copy(sample_results['flatchain'][idx])
        spec[:, jj], mags[:, jj], sm = sample_results['model'].mean_model(
            thetas, sample_results['obs'], sps=sps)
        '''  # MACHETE
        ##### if we don't use these parameters, find them in SPS model
        dust1 = thetas[d1_idx]
        if dust1.shape[0] == 0:
            try:
                dust1 = sps.params['dust1']
            except:
                dust1 = sps.csp.params['dust1']
        dust_idx = thetas[didx]
        if dust_idx.shape[0] == 0:
            try:
                dust_idx = sps.params['dust_index']
            except:
                dust_idx = sps.csp.params['dust_index']
        '''

        ##### extract sfh parameters
        # pass stellar mass to avoid extra model call
        sfh_params = prosp_dutils.find_sfh_params(sample_results['model'],
                                                  thetas,
                                                  sample_results['obs'],
                                                  sps,
                                                  sm=sm)

        ##### calculate SFH
        intsfr[:, jj] = prosp_dutils.return_full_sfh(t, sfh_params)

        ##### solve for half-mass assembly time
        half_time[jj] = prosp_dutils.halfmass_assembly_time(sfh_params)

        ##### calculate time-averaged SFR
        sfr_10[jj] = prosp_dutils.calculate_sfr(sfh_params,
                                                0.01,
                                                minsfr=-np.inf,
                                                maxsfr=np.inf)
        sfr_100[jj] = prosp_dutils.calculate_sfr(sfh_params,
                                                 0.1,
                                                 minsfr=-np.inf,
                                                 maxsfr=np.inf)

        ##### calculate mass, sSFR
        stellar_mass[jj] = sfh_params['mass']
        ssfr_10[jj] = sfr_10[jj] / stellar_mass[jj]
        ssfr_100[jj] = sfr_100[jj] / stellar_mass[jj]

        ##### calculate L_AGN if necessary
        if 'fagn' in parnames:
            l_agn[jj] = prosp_dutils.measure_agn_luminosity(
                thetas[parnames == 'fagn'], sps, sfh_params['mformed'])
        xray_lum[jj] = prosp_dutils.estimate_xray_lum(sfr_100[jj])
        '''  # MACHETE
        ##### empirical halpha HERE
        emp_ha[jj] = prosp_dutils.synthetic_halpha(sfr_10[jj], dust1,
                                                   thetas[d2_idx], -1.0,
                                                   dust_idx,
                                                   kriek=(sample_results['model'].params['dust_type'] == 4)[0])

        ##### dust extinction at 5500 angstroms
        ext_5500[jj] = dust1 + thetas[d2_idx]

        ##### empirical Balmer decrement
        bdec_calc[jj] = prosp_dutils.calc_balmer_dec(dust1, thetas[d2_idx], -1.0,
                                                     dust_idx,
                                                     kriek=(sample_results['model'].params['dust_type'] == 4)[0])
        '''

        ##### lbol
        lbol[jj] = prosp_dutils.measure_lbol(sps, sfh_params['mformed'])

        t2 = time.time()
        ##### spectral quantities (emission line flux, Balmer decrement, Hdelta absorption, Dn4000)
        ##### and magnitudes (LIR, LUV)
        if opts['measure_spectral_features']:
            modelout = prosp_dutils.measure_restframe_properties(
                sps,
                thetas=thetas,
                model=sample_results['model'],
                obs=sample_results['obs'],
                measure_ir=True,
                measure_luv=True,
                measure_mir=True,
                emlines=True,
                abslines=True,
                restframe_optical_photometry=False)
            #### initialize arrays
            if jj == 0:
                emnames = np.array(modelout['emlines'].keys())
                nline = len(emnames)
                emflux, emeqw = [
                    np.empty(shape=(ncalc, nline)) for i in xrange(2)
                ]

                absnames = np.array(modelout['abslines'].keys())
                nabs = len(absnames)
                absflux, abseqw = [
                    np.empty(shape=(ncalc, nabs)) for i in xrange(2)
                ]

            absflux[jj, :] = np.array(
                [modelout['abslines'][line]['flux'] for line in absnames])
            abseqw[jj, :] = np.array(
                [modelout['abslines'][line]['eqw'] for line in absnames])
            emflux[jj, :] = np.array(
                [modelout['emlines'][line]['flux'] for line in emnames])
            emeqw[jj, :] = np.array(
                [modelout['emlines'][line]['eqw'] for line in emnames])

            lir[jj] = modelout['lir']
            luv[jj] = modelout['luv']
            lmir[jj] = modelout['lmir']
            dn4000[jj] = modelout['dn4000']

            if 'fagn' in parnames:
                nagn_thetas = copy(thetas)
                nagn_thetas[parnames == 'fagn'] = 0.0
                modelout = prosp_dutils.measure_restframe_properties(
                    sps,
                    thetas=nagn_thetas,
                    model=sample_results['model'],
                    obs=sample_results['obs'],
                    measure_mir=True)
                fmir[jj] = (lmir[jj] - modelout['lmir']) / lmir[jj]

        #### no dust
        if opts['mags_nodust']:
            if jj == 0:
                mags_nodust = np.zeros(
                    shape=(len(sample_results['obs']['filters']), ncalc))

            nd_thetas = copy(thetas)
            nd_thetas[d1_idx] = np.array([0.0])
            nd_thetas[d2_idx] = np.array([0.0])
            _, mags_nodust[:, jj], sm = sample_results['model'].mean_model(
                nd_thetas, sample_results['obs'], sps=sps)

        t3 = time.time()
        print('loop {0} took {1}s ({2}s for absorption+emission)'.format(
            jj, t3 - t1, t3 - t2))

    ##### CALCULATE Q16,Q50,Q84 FOR MODEL PARAMETERS
    ntheta = len(sample_results['initial_theta'])
    q_16, q_50, q_84 = (np.zeros(ntheta) + np.nan for i in range(3))
    for kk in xrange(ntheta):
        q_16[kk], q_50[kk], q_84[kk] = np.percentile(
            sample_results['flatchain'][sample_idx][:, kk], [16.0, 50.0, 84.0])

    #### QUANTILE OUTPUTS #
    extra_output = {}
    quantiles = {
        'sample_chain': sample_results['flatchain'][sample_idx],
        'parnames': parnames,
        'q16': q_16,
        'q50': q_50,
        'q84': q_84
    }
    extra_output['quantiles'] = quantiles

    ##### CALCULATE Q16,Q50,Q84 FOR EXTRA PARAMETERS
    extra_flatchain = np.dstack(
        (half_time, sfr_10, sfr_100, ssfr_10, ssfr_100, stellar_mass, emp_ha,
         bdec_calc, ext_5500, xray_lum, lbol))[0]
    if 'fagn' in parnames:
        extra_flatchain = np.append(extra_flatchain,
                                    np.hstack((l_agn[:, None], fmir[:, None])),
                                    axis=1)
    nextra = extra_flatchain.shape[1]
    q_16e, q_50e, q_84e = (np.zeros(nextra) + np.nan for i in range(3))
    for kk in xrange(nextra):
        q_16e[kk], q_50e[kk], q_84e[kk] = np.percentile(
            extra_flatchain[:, kk], [16.0, 50.0, 84.0])

    #### EXTRA PARAMETER OUTPUTS
    extras = {
        'flatchain':
        extra_flatchain,
        'parnames':
        np.array([
            'half_time', 'sfr_10', 'sfr_100', 'ssfr_10', 'ssfr_100',
            'stellar_mass', 'emp_ha', 'bdec_calc', 'total_ext5500', 'xray_lum',
            'lbol'
        ]),
        'q16':
        q_16e,
        'q50':
        q_50e,
        'q84':
        q_84e,
        'sfh':
        intsfr,
        't_sfh':
        t
    }
    if 'fagn' in parnames:
        extras['parnames'] = np.append(extras['parnames'],
                                       np.array(['l_agn', 'fmir']))
    extra_output['extras'] = extras

    #### OBSERVABLES
    observables = {'spec': spec, 'mags': mags, 'lam_obs': wavelengths}
    extra_output['observables'] = observables

    #### BEST-FITS
    bfit = {
        'maxprob_params': maxthetas,
        'maxprob': maxprob,
        'emp_ha': emp_ha[0],
        'sfh': intsfr[:, 0],
        'half_time': half_time[0],
        'sfr_10': sfr_10[0],
        'sfr_100': sfr_100[0],
        'bdec_calc': bdec_calc[0],
        'lbol': lbol[0],
        'spec': spec[:, 0],
        'mags': mags[:, 0]
    }
    extra_output['bfit'] = bfit

    ##### filters with no dust
    if opts['mags_nodust']:
        extra_output['bfit']['mags_nodust'] = mags_nodust[:, 0]
        extra_output['observables']['mags_nodust'] = mags_nodust

    ##### spectral features
    if opts['measure_spectral_features']:

        ##### FORMAT EMLINE OUTPUT
        q_16flux, q_50flux, q_84flux, q_16eqw, q_50eqw, q_84eqw = (
            np.zeros(nline) + np.nan for i in range(6))
        for kk in xrange(nline):
            q_16flux[kk], q_50flux[kk], q_84flux[kk] = np.percentile(
                emflux[:, kk], [16.0, 50.0, 84.0])
        for kk in xrange(nline):
            q_16eqw[kk], q_50eqw[kk], q_84eqw[kk] = np.percentile(
                emeqw[:, kk], [16.0, 50.0, 84.0])
        emline_info = {}
        emline_info['eqw'] = {
            'chain': emeqw,
            'q16': q_16eqw,
            'q50': q_50eqw,
            'q84': q_84eqw
        }
        emline_info['flux'] = {
            'chain': emflux,
            'q16': q_16flux,
            'q50': q_50flux,
            'q84': q_84flux
        }
        emline_info['emnames'] = emnames
        extra_output['model_emline'] = emline_info

        ##### SPECTRAL QUANTITIES
        q_16flux, q_50flux, q_84flux, q_16eqw, q_50eqw, q_84eqw = (
            np.zeros(nabs) for i in range(6))
        for kk in xrange(nabs):
            q_16flux[kk], q_50flux[kk], q_84flux[kk] = np.percentile(
                absflux[:, kk], [16.0, 50.0, 84.0])
        for kk in xrange(nabs):
            q_16eqw[kk], q_50eqw[kk], q_84eqw[kk] = np.percentile(
                abseqw[:, kk], [16.0, 50.0, 84.0])
        q_16dn, q_50dn, q_84dn = np.percentile(dn4000, [16.0, 50.0, 84.0])

        spec_info = {}
        spec_info['dn4000'] = {
            'chain': dn4000,
            'q16': q_16dn,
            'q50': q_50dn,
            'q84': q_84dn
        }
        spec_info['eqw'] = {
            'chain': abseqw,
            'q16': q_16eqw,
            'q50': q_50eqw,
            'q84': q_84eqw
        }
        spec_info['flux'] = {
            'chain': absflux,
            'q16': q_16flux,
            'q50': q_50flux,
            'q84': q_84flux
        }
        spec_info['absnames'] = absnames
        extra_output['spec_info'] = spec_info

        ### LUV + LIR
        extra_output['observables']['L_IR'] = lir
        extra_output['observables']['L_UV'] = luv
        extra_output['observables']['L_MIR'] = lmir

        ### bfits
        extra_output['bfit']['lir'] = lir[0]
        extra_output['bfit']['luv'] = luv[0]
        extra_output['bfit']['lmir'] = lmir[0]
        extra_output['bfit']['halpha_flux'] = emflux[0, emnames == 'Halpha']
        extra_output['bfit']['hbeta_flux'] = emflux[0, emnames == 'Hbeta']
        extra_output['bfit']['hdelta_flux'] = emflux[0, emnames == 'Hdelta']
        extra_output['bfit']['halpha_abs'] = absflux[0,
                                                     absnames == 'halpha_wide']
        extra_output['bfit']['hbeta_abs'] = absflux[0, absnames == 'hbeta']
        extra_output['bfit']['hdelta_abs'] = absflux[0,
                                                     absnames == 'hdelta_wide']
        extra_output['bfit']['dn4000'] = dn4000[0]

    return extra_output
Пример #7
0
outfolder = os.getenv('APPS')+'/prospector_alpha/plots/'+outname.split('/')[-2]+'/'

sample_results, powell_results, model = prosp_dutils.load_prospector_data(outname)



#### CALC_EXTRA_QUANTITIES
parnames = sample_results['model'].theta_labels()

##### modify nebon status
# we want to be able to turn it on and off at will
if sample_results['model'].params['add_neb_emission'] == 2:
	sample_results['model'].params['add_neb_emission'] = np.array(True)

##### initialize sps
sps = model_setup.load_sps(**sample_results['run_params'])


###### MAXPROB_MODEL
# grab maximum probability, plus the thetas that gave it
maxprob = np.max(sample_results['lnprobability'])
probind = sample_results['lnprobability'] == maxprob
thetas = sample_results['chain'][probind,:]
if type(thetas[0]) != np.dtype('float64'):
	thetas = thetas[0]


###### TEST LIKELIHOOD
run_params = model_setup.get_run_params(param_file=param_name)
gp_spec, gp_phot = model_setup.load_gp(**run_params)
likefn = LikelihoodFunction()
Пример #8
0
def post_processing(out_file, param_file, full_h5file=True, out_incl=False, **kwargs):
    """
    Driver. Loads output, runs post-processing routine.
    """

    # Dense, complex, terrible bookkeeping on my part here based on my naming system for prospector output files
    obj = ''
    field = ''
    base = ''
    count = 0
    slash = 0
    for i in kwargs['outname']:
        if i == '/':
            slash += 1
        elif i == '_':
            count += 1

        elif out_incl:
            if slash == 2 and count == 1:
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif full_h5file:
            if slash == 13 and count == 1:
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif count == 0:
            obj += i
        elif count == 1:
            field += i
        elif count == 2:
            base += i
        elif count == 3 and not outy:
            break

    print(obj, field)
    full_base = obj + '_' + field + '_' + base
    img_base = 'img/' + full_base  # /home/jonathan/img' +
    print(full_base, img_base)
    pkl = 'out.pkl'

    res, pr, mod = bread.results_from(out_file)
    print('bread')

    # create flatchain, run post-processing!
    res['flatchain'] = prosp_dutils.chop_chain(res['chain'], **res['run_params'])
    res['flatprob'] = prosp_dutils.chop_chain(res['lnprobability'], **res['run_params'])
    extra_output = calc_extra_quantities(res, **kwargs)
    print('extra calculated')
    print(base)

    # choose correct folder where .h5 file is stored based on param file name
    if base == 'corr':
        folder = 'pkl_ecorr/'  # 'pkl_ncorr/'
    elif base == 'fico':
        folder = 'pkl_efico/'  # 'pkl_nfico/'
    elif base == 'masstest':
        folder = 'pkl_masstest/'
    elif base == 'tt':
        folder = 'pkl_tt/'
    else:
        folder = 'pkl_simsfh/'

    '''
    if base == 'thirty':
        folder = 'etpkls/'
    elif base == 'nth':
        folder = 'ntpkls/'
    elif base == 'fixedmet':
        folder = 'pkls/'
    elif base == 'otherbins':
        folder = 'opkls/'
    elif base == 'noelg':
        folder = 'nmpkls/'
    elif base == 'nother':
        folder = 'nopkls/'
    elif base == 'vary':
        folder = 'ecorr_pkl/'
        # folder = 'evar_pkl/'
    elif base == 'noneb' or 'evarnoneb':
        folder = 'nonebpkls/'
    elif base == 'efifty2':
        folder = 'efifty2_pkls/'
    elif base == 'evar2':
        folder = 'evar2_pkls/'
    elif base == 'masstest':
        folder = 'pkl_masstest'
    '''

    # folder = 'evar2_pkls/'  # 'efifty2_pkls/'
    # pkl extra output!
    extra = folder + full_base + '_extra_' + pkl  # full_base + '_extra_' + pkl
    print(extra)
    with open(extra, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(extra_output, newfile, pickle.HIGHEST_PROTOCOL)
    print('extra pickled')
    
    # PRINT TRACE SHOWING HOW ITERATIONS PROGRESS FOR EACH PARAMETER
    # I edited param_evol to also store lnprob, but this is a silly and long-obsolete way of doing this
    tracefig = bread.param_evol(res)  # prints tracefig
    plt.title(full_base)  # BUCKET just added
    # plt.savefig(img_base + '_tracefig.png', bbox_inches='tight')
    # plt.show()

    # FIND WALKER, ITERATION THAT GIVE MAX PROBABILITY
    # a result of the silly, long-obsolete way I'm grabbing lnprob above
    prob = res['lnprobability'][..., 0:]
    print('max', prob.max())
    row = prob.argmax() / len(prob[0])
    col = prob.argmax() - row * len(prob[0])
    walker, iteration = row, col
    print(walker, iteration)

    # PRINT CORNERFIG CONTOURS/HISTOGRAMS FOR EACH PARAMETER
    bread.subtriangle(res, start=-1000, thin=5, show_titles=True)
    plt.title(full_base)  # BUCKET just added
    # plt.savefig(img_base + '_cornerfig.png', bbox_inches='tight')
    # plt.show()
    # For FAST: truths=[mass, age, tau, dust2] (for 1824: [9.78, 0.25, -1., 0.00])

    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    print('sps')

    # GET MODELED SPECTRA AND PHOTOMETRY
    # These have the same shape as the obs['spectrum'] and obs['maggies'] arrays.
    spec, phot, mfrac = mod.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)
    print('spec')

    # PLOT SPECTRUM
    wave = [f.wave_effective for f in res['obs']['filters']]
    wave = np.asarray(wave)

    # CHANGING OBSERVED TO REST FRAME WAVELENGTH
    # grabbing data from catalogs
    if field == 'cdfs':
        datname = '/home/jonathan/cdfs/cdfs.v1.6.11.cat'
        zname = '/home/jonathan/cdfs/cdfs.v1.6.9.awk.zout'
    elif field == 'cosmos':
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog
    elif field == 'uds':
        datname = '/home/jonathan/uds/uds.v1.5.10.cat'
        zname = '/home/jonathan/uds/uds.v1.5.8.awk.zout'
    elif field == 'sim':  # hacking for now
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog

    # photometry catalog
    with open(datname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(datname, comments='#', delimiter=' ', dtype=dtype)

    # redshift catalog
    with open(zname, 'r') as fz:
        hdr_z = fz.readline().split()
    dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]])
    zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z)

    # if z_spec exists, use it; else use best-fit z_phot
    idx = dat['id'] == obj  # array filled with: False for all dat['id'] != obj, True for dat['id'] == obj
    zred = zout['z_spec'][idx][0]
    if zred == -99:
        zred = zout['z_peak'][idx][0]
    print('redshift', zred)

    # convert stored wavelengths to rest frame wavelengths
    wave_rest = []
    for j in range(len(wave)):
        wave_rest.append(wave[j]/(1 + zred))  # 1 + z = l_obs / l_emit --> l_emit = l_obs / (1 + z)
    wave_rest = np.asarray(wave_rest)

    # OUTPUT SED results to pkls
    full_base = folder + full_base
    write_res = full_base + '_res_' + pkl  # results
    with open(write_res, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(res, newfile, pickle.HIGHEST_PROTOCOL)  # res includes res['obs']['maggies'], ['maggies_unc'], etc.
    write_sed = full_base + '_sed_' + pkl  # model sed
    with open(write_sed, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(phot, newfile, pickle.HIGHEST_PROTOCOL)
    write_restwave = full_base + '_restwave_' + pkl  # rest frame wavelengths
    with open(write_restwave, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(wave_rest, newfile, pickle.HIGHEST_PROTOCOL)
    write_spec = full_base + '_spec_' + pkl  # spectrum
    with open(write_spec, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(spec, newfile, pickle.HIGHEST_PROTOCOL)
    write_sps = full_base + '_spswave_' + pkl  # wavelengths that go with spectrum
    with open(write_sps, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(sps.wavelengths, newfile, pickle.HIGHEST_PROTOCOL)

    # OUTPUT chi_sq results to pkls
    chi_sq = ((res['obs']['maggies'] - phot) / res['obs']['maggies_unc']) ** 2
    write_chisq = full_base + '_chisq_' + pkl
    with open(write_chisq, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(chi_sq, newfile, pickle.HIGHEST_PROTOCOL)
    write_justchi = full_base + '_justchi_' + pkl
    with open(write_justchi, 'wb') as newfile:
        pickle.dump((res['obs']['maggies'] - phot) / res['obs']['maggies_unc'], newfile, pickle.HIGHEST_PROTOCOL)

    # PLOT chi_sq
    plt.plot(wave_rest, chi_sq, 'o', color='b')
    plt.title(str(obj) + r' $\chi^2$')
    plt.xlabel('Rest frame wavelength [angstroms]')
    plt.ylabel(r'$\chi^2$')
    # plt.savefig(img_base + '_chisq.png', bbox_inches='tight')
    # plt.show()

    # HOW CONVERGED IS THE CODE?? LET'S FIND OUT! --> kl test
    parnames = np.array(res['model'].theta_labels())
    fig, kl_ax = plt.subplots(1, 1, figsize=(7, 7))
    for l in xrange(parnames.shape[0]):
        kl_ax.plot(res['kl_iteration'], np.log10(res['kl_divergence'][:, l]), 'o', label=parnames[l], lw=1.5,
                   linestyle='-', alpha=0.6)

    # OUTPUT kl test to pkls
    write_klit = full_base + '_klit_' + pkl
    with open(write_klit, 'wb') as newfile:
        pickle.dump(res['kl_iteration'], newfile, pickle.HIGHEST_PROTOCOL)
    write_kldvg = full_base + '_kldvg_' + pkl
    with open(write_kldvg, 'wb') as newfile:
        pickle.dump(res['kl_divergence'], newfile, pickle.HIGHEST_PROTOCOL)

    kl_ax.set_ylabel('log(KL divergence)')
    kl_ax.set_xlabel('iteration')
    # kl_ax.set_xlim(0, nsteps*1.1)
    kl_div_lim = res['run_params'].get('convergence_kl_threshold', 0.018)
    kl_ax.axhline(np.log10(kl_div_lim), linestyle='--', color='red', lw=2, zorder=1)
    kl_ax.legend(prop={'size': 5}, ncol=2, numpoints=1, markerscale=0.7)
    plt.title(str(obj) + ' kl')
Пример #9
0
def calc_extra_quantities(sample_results, ncalc=3000, **kwargs):  # GOTEM??
    ''''
    CALCULATED QUANTITIES
    model nebular emission line strength
    model star formation history parameters (ssfr,sfr,half-mass time)
    '''

    # different options for what to calculate
    # speedup is measured in runtime, where runtime = ncalc * model_call
    opts = {
        'restframe_optical_photometry': False,  # currently deprecated! but framework exists in
                                                # restframe_optical_properties
        'ir_priors': False,  # no cost
        'measure_spectral_features': True,  # cost = 2 runtimes
        'mags_nodust': False  # cost = 1 runtime
    }
    if kwargs:
        for key in kwargs.keys():
            opts[key] = kwargs[key]

    parnames = np.array(sample_results['model'].theta_labels())

    ##### describe number of components in Prospector model [legacy]
    sample_results['ncomp'] = np.sum(['mass' in x for x in sample_results['model'].theta_labels()])

    ##### array indexes over which to sample the flatchain
    sample_idx = sample_flatchain(sample_results['flatchain'], sample_results['flatprob'], parnames,
                                  ir_priors=opts['ir_priors'], include_maxlnprob=True, nsamp=ncalc)

    ##### initialize output arrays for SFH + emission line posterior draws
    half_time, sfr_10, sfr_100, ssfr_100, stellar_mass, ssfr_10 = [np.zeros(shape=(ncalc)) for i in range(6)]

    ##### set up time vector for full SFHs
    t = set_sfh_time_vector(sample_results, ncalc)  # returns array of len=18
    intsfr = np.zeros(shape=(t.shape[0], ncalc))

    ##### initialize sps, calculate maxprob
    # also confirm probability calculations are consistent with fit
    '''
    ### DOES THIS DO ANYTHING? # LOL NOPE
    sargv = sys.argv
    argdict = {'param_file': 'eelg_emission_params.py'}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    ### DOES THIS DO ANYTHING?
    '''

    sps = model_setup.load_sps(**sample_results['run_params'])
    maxthetas, maxprob = maxprob_model(sample_results, sps)

    ##### set up model flux vectors
    mags = np.zeros(shape=(len(sample_results['obs']['filters']), ncalc))
    try:
        wavelengths = sps.wavelengths
    except AttributeError:
        wavelengths = sps.csp.wavelengths
    spec = np.zeros(shape=(wavelengths.shape[0], ncalc))

    ##### modify nebular status to ensure emission line production
    # don't cache, and turn on
    if sample_results['model'].params['add_neb_emission'] == 2:
        sample_results['model'].params['add_neb_emission'] = np.array([True])
    sample_results['model'].params['nebemlineinspec'] = np.array([True])

    loop = 0
    ######## posterior sampling #########
    for jj, idx in enumerate(sample_idx):
        # t1 = time.time()

        ##### model call, to set parameters
        thetas = copy(sample_results['flatchain'][idx])
        spec[:, jj], mags[:, jj], sm = sample_results['model'].mean_model(thetas, sample_results['obs'], sps=sps)

        ##### extract sfh parameters
        # pass stellar mass to avoid extra model call
        sfh_params = prosp_dutils.find_sfh_params(sample_results['model'], thetas, sample_results['obs'], sps, sm=sm)
        # find sfh params: returns out: a dictionary containing 'sfr_fraction_full', 'mass_fraction', 'mformed', and
        #   'mass'; where 'mass_fraction' = 'sfr_fraction_full' * time_per_bin / 'sfr_fraction_full'.sum(); and where
        #   'mass_formed' = 'mass' / stellar_mass

        # print(t, 't')  # PRINTER (what t always is)
        # print(prosp_dutils.return_full_sfh(t, sfh_params), 'intsfr[:, jj]')  # PRINTER (array of all 1 value)

        ##### calculate SFH
        print(0.5)  # PRINTER
        print(prosp_dutils.return_full_sfh(t, sfh_params), 'what this do')
        intsfr[:, jj] = prosp_dutils.return_full_sfh(t, sfh_params)
        print(1)  # PRINTER
        # return_full_sfh: returns for i in len(tcalc): {where tcalc = t - np.max(10 ** sfh_params['agebins']) / 1e9
        #   tcalc = tcalc[tcalc < 0] * -1} intsfr[i] = calculate_sfr(sfh_params, deltat, tcalc=tcalc[i], **kwargs)
        # calculate_sfr: returns sfr = integrate_sfh(tcalc - timescale, tcalc, sfh_params) * sfh_params['mformed'].sum
        #   / (timescale * 1e9); and clips sfr to account for minsfr and maxsfr; sfr = units (?) * (mass OR none) / yr
        # integrate_sfh (for npSFH): returns tot_mformed = sum(weights / time_per_bin) * sfh_params['mass_fraction'];
        #   linearizes bins: to_linear_bins = 10**sfh_params['agebins'] / 1e9
        #   time_per_bin = to_linear_bins[:, 1] - to_linear_bins[:, 0]; time_bins = max(to_linear_bins) - to_linear_bins
        #   clips times outside SFH bins
        #   weights: initialized to same shape as sfh_params['mass_fraction']
        #   for bins inside t1,t2 boundaries: weights[i] = time_per_bin[i]
        #   for edge cases: weights[i] = t2 - time_bins[i, 1] or weights[i] = time_bins[i, 0] - t1
        #   calculates tot_mformed --> tot_mformed units: (time) / (time) * sfh_params['mass_fraction']
        #   --> calculate_sfr units: sfh_params['mass_fraction'] * sfh_params['mformed'] / time = none * mass / time?
        # print(len(intsfr[:, jj]), 'len')  # len 18 (column len is 18)
        # print(len(intsfr[jj, :]), 'lenny')  # len 2000 (row len is 2000)
        # --> there are 2000 columns
        '''
        print(intsfr[0, 0], '0, 0')
        print(intsfr[1, 0], '1, 0')
        print(intsfr[9, 0], '9, 0')
        # PRINTER the three above are ALWAYS the same 0.2365572
        print(intsfr[15, 0], '15, 0')
        print(intsfr[17, 0], '18, 0')
        # print(intsfr[:, 0], 'hello')  # same as 'hi-there'
        # print(intsfr[0, 1], '0, 1')
        # print(intsfr[9, 1], '9, 1')
        # PRINTER the two above are always the same
        '''

        ##### solve for half-mass assembly time
        '''  # PRINTER TEMPORARY COMMENT
        half_time[jj] = prosp_dutils.halfmass_assembly_time(sfh_params)
        '''

        ##### calculate time-averaged SFR
        sfr_10[jj] = prosp_dutils.calculate_sfr(sfh_params, 0.01, minsfr=-np.inf, maxsfr=np.inf)  # avg over 10 Myr
        print(2)
        sfr_100[jj] = prosp_dutils.calculate_sfr(sfh_params, 0.1, minsfr=-np.inf, maxsfr=np.inf)  # avg over 100 Myr
        print(3)

        '''
        # print(t, 't')  # PRINTER
        print(intsfr[:, jj], 'sfh')  # PRINTER
        print(sfr_100[jj], '100')  # PRINTER
        '''

        ##### calculate mass, sSFR
        stellar_mass[jj] = sfh_params['mass']
        ssfr_10[jj] = sfr_10[jj] / stellar_mass[jj]
        ssfr_100[jj] = sfr_100[jj] / stellar_mass[jj]

        loop += 1
        print('loop', loop)

    print(intsfr[:, 0], 'hi-there')
    print(t, 'tea')
    '''
    (array([ 0.23655724,  0.23655724,  0.23655724,  0.23655724,  0.23655724,
        0.23655724,  0.23655724,  0.23655724,  0.23655724,  0.23655724,
        0.23655724,  0.23655724,  0.23655724,  0.23655724,  0.23655724,
        0.23655724,  0.23655724,  0.        ]), 'hi-there')
    (array([  1.00000000e-03,   9.99900000e-02,   9.99900000e-02,
         1.00100000e-01,   1.00100000e-01,   2.14493583e-01,
         2.14493583e-01,   2.14729550e-01,   2.14729550e-01,
         4.60120984e-01,   4.60120984e-01,   4.60627168e-01,
         4.60627168e-01,   9.87028689e-01,   9.87028689e-01,
         9.88114529e-01,   9.88114529e-01,   2.11732493e+00]), 'tea')
    '''


    #### QUANTILE OUTPUTS #
    extra_output = {}

    ##### CALCULATE Q16,Q50,Q84 FOR EXTRA PARAMETERS
    extra_flatchain = np.dstack(
        (half_time, sfr_10, sfr_100, ssfr_10, ssfr_100, stellar_mass))[0]

    #### EXTRA PARAMETER OUTPUTS
    extras = {'flatchain': extra_flatchain,
              'parnames': np.array(
                  ['half_time', 'sfr_10', 'sfr_100', 'ssfr_10', 'ssfr_100', 'stellar_mass']),
              'sfh': intsfr,
              't_sfh': t}
    extra_output['extras'] = extras

    #### BEST-FITS
    bfit = {'maxprob_params': maxthetas,
            'maxprob': maxprob,
            'sfh': intsfr[:, 0],
            'half_time': half_time[0],
            'sfr_10': sfr_10[0],
            'sfr_100': sfr_100[0],
            'spec': spec[:, 0],
            'mags': mags[:, 0]}
    extra_output['bfit'] = bfit

    ##### spectral features
    return extra_output
Пример #10
0
def ems(param_file, out_file, objname='21442', field='cdfs', enames=None):
    res, pr, model = bread.results_from(out_file)

    # get lnprob, based on bread.param_evol()
    chain = res['chain'][..., 0:, :]
    lnprob = res['lnprobability'][..., 0:]
    # deal with single chain (i.e. nested sampling) results
    if len(chain.shape) == 2:
        lnprob = lnprob[None, ...]
    # tracefig, prob = bread.param_evol(res)  # store probability
    # plt.show()
    print('max', lnprob.max())
    row = lnprob.argmax() / len(lnprob[0])
    col = lnprob.argmax() - row * len(lnprob[0])
    walker, iteration = row, col
    print(walker, iteration)

    # Get emission lines!
    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    print('sps')
    # spec, mags, sm = model.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)  # spec [maggies/Hz]
    print('mean model')
    w = sps.wavelengths

    ### save redshift, lumdist
    z = model.params.get('zred', np.array(0.0))
    lumdist = model.params.get('lumdist', np.array(0.0))
    nebinspec = model.params.get('nebemlineinspec', True)
    model.params['zred'] = np.array(0.0)
    if lumdist:
        model.params['lumdist'] = np.array(1e-5)
    if nebinspec == False:
        model.params['nebemlineinspec'] = True

    ### if we want restframe optical photometry, generate fake obs file
    ### else generate NO obs file (don't do extra filter convolutions if not necessary)
    obs = {'filters': [], 'wavelength': None}
    ### calculate SED. comes out as maggies per Hz, @ 10pc
    spec, mags, sm = model.mean_model(res['chain'][walker, iteration, :],
                                      obs=obs,
                                      sps=sps)  # maggies/Hz at 10pc
    w = sps.wavelengths

    ### reset model
    model.params['zred'] = z
    if lumdist:
        model.params['lumdist'] = lumdist
    if nebinspec == False:
        model.params['nebemlineinspec'] = False

    spec *= dfactor_10pc / constants.L_sun.cgs.value * to_ergs  # spec * cm^2 * (s/erg) * erg/maggies = s*cm^2 / Hz
    # note erg = [g cm^2 / s^2]
    # according to measure_restframe_properties(), above line converts to Lsun / Hz
    to_flam = 3e18 / w**2  # for f_nu in erg s^-1 Hz^-1 cm^-2: to_flam [(Ang / s^-1) * (1 / Ang^2)]
    spec_flam = spec * to_flam  # erg cm^-2 s^-1 ang^-1

    smooth_spec = smooth_spectrum(w, spec_flam, 250.0, minlam=3e3, maxlam=7e3)

    ### load fsps emission line list
    loc = os.getenv('SPS_HOME') + '/data/emlines_info.dat'
    dat = np.loadtxt(loc,
                     delimiter=',',
                     dtype={
                         'names': ('lam', 'name'),
                         'formats': ('f16', 'S40')
                     })
    print('env')

    print(type(enames))
    ### define emission lines
    # legacy code compatible
    if type(enames) == bool:
        lines = np.array(
            ['Hdelta', 'Hbeta', '[OIII]1', '[OIII]2', 'Halpha', '[NII]'])
        fsps_name = np.array([
            'H delta 4102', 'H beta 4861', '[OIII]4960', '[OIII]5007',
            'H alpha 6563', '[NII]6585'
        ])
    else:
        lines = enames
        fsps_name = enames

    print(lines, enames)  # None, None
    # lines = np.array(['Hdelta', 'Hbeta', '[OIII]1', '[OIII]2', 'Halpha', '[NII]'])
    # fsps_name = np.array(['H delta 4102', 'H beta 4861', '[OIII]4960', '[OIII]5007', 'H alpha 6563', '[NII]6585'])
    lines = np.array([
        '[OII]1', '[OII]2', 'Hdelta', 'Hbeta', '[OIII]1', '[OIII]2', 'Halpha',
        '[NII]'
    ])
    fsps_name = np.array([
        '[OII]3726', '[OII]3729', 'H delta 4102', 'H beta 4861', '[OIII]4960',
        '[OIII]5007', 'H alpha 6563', '[NII]6585'
    ])
    ##### measure emission line flux + EQW
    out = {}
    # print(1, sps.emline_wavelengths)  # big long array, 900 to 6*1e6
    for jj in xrange(len(lines)):

        # if we don't do nebular emission, zero this out
        if not hasattr(sps, 'get_nebline_luminosity'):
            print('hi')
            out[lines[jj]] = {'flux': 0.0, 'eqw': 0.0}
            continue

        ### calculate luminosity (in Lsun)
        idx = fsps_name[jj] == dat['name']
        # L_so = 3.84*10**33 erg/s
        print(sps.params['mass'].sum())
        eflux = float(sps.get_nebline_luminosity[idx] *
                      sps.params['mass'].sum())  # L_sun
        elam = float(sps.emline_wavelengths[idx])  # Angstroms!
        print(sps.get_nebline_luminosity[idx])
        print(eflux, 'e luminosity')  # typically 10**40 to 10**42
        print(elam, 'elam')  # Angstroms

        # simple continuum estimation
        tidx = np.abs(
            sps.wavelengths - elam
        ) < 100  # True for sps.wavelengths within 100 Angstroms of elam wavelengths
        eqw = eflux / np.median(
            smooth_spec[tidx]
        )  # (erg/s) / (erg cm^-2 s^-1 Ang^-1) = Ang * cm**2

        eflux *= 3.84 * 10**33  # erg/s (Lum, not flux)

        out[lines[jj]] = {'flux': eflux, 'eqw': eqw}

    return out, fsps_name
def make_all_plots(filebase=None,
                   extra_output=None,
                   outfolder=os.getenv('APPS')+'/prospector_alpha/plots/',
                   sample_results=None,
                   param_name=None,
                   plt_chain=True,
                   plt_corner=True,
                   plt_sed=True):

    '''
    Driver. Loads output, makes all plots for a given galaxy.
    '''

    # make sure the output folder exists
    if not os.path.isdir(outfolder):
        os.makedirs(outfolder)

    if sample_results is None:
        try:
            sample_results, powell_results, model, extra_output = load_prospector_data(filebase, hdf5=True, load_extra_output=True)
        except TypeError:
            return
    else: # if we already have sample results, but want powell results
        try:
            _, powell_results, model, extra_output = load_prospector_data(filebase,no_sample_results=True, hdf5=True)
        except TypeError:
            return  

    run_params = model_setup.get_run_params(param_file=param_name)
    sps = model_setup.load_sps(**run_params)

    # BEGIN PLOT ROUTINE
    print 'MAKING PLOTS FOR ' + filebase.split('/')[-1] + ' in ' + outfolder
    objname = sample_results['run_params'].get('objname','galaxy')

    # chain plot
    flatchain = prosp_dutils.chop_chain(sample_results['chain'],**sample_results['run_params'])
    if plt_chain: 
        print 'MAKING CHAIN PLOT'

        show_chain(sample_results,outname=outfolder+objname)

    # corner plot
    if plt_corner: 
        print 'MAKING CORNER PLOT'
        subcorner(sample_results, sps, copy.deepcopy(sample_results['model']),
                  extra_output,flatchain,outname=outfolder+objname)

    # sed plot
    if plt_sed:
        print 'MAKING SED PLOT'
        
        # FAST fit?
        try:
            sample_results['run_params']['fastname']
            fast=1
        except:
            fast=0

        # plot
        pfig = sed_figure(sresults = [sample_results], extra_output=[extra_output],
                          outname=outfolder+objname+'.sed.png')
Пример #12
0
def post_processing(out_file, param_file, out_incl=False, full_h5file=False):  # , **kwargs):
    """
    Driver. Loads output, runs post-processing routine.
    """

    obj = ''
    field = ''
    base = ''
    count = 0
    slash = 0
    for i in out_file:
        if i == '/':
            slash += 1
        elif i == '_':
            count += 1

        elif out_incl:
            if slash == 1 and count == 1:
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif full_h5file:
            if slash == 13 and count == 1:  # slash=12
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif count == 0:
            obj += i
        elif count == 1:
            field += i
        elif count == 2:
            base += i
        elif count == 3 and not outy:
            break
    print(field)
    git = '/home/jonathan/.conda/envs/snowflakes/lib/python2.7/site-packages/prospector/git/'
    full_base = 'pkl_tfn/' + obj + '_' + field + '_' + base  # 'pkl_efastnoem/'  # 'pkl_efico/'
    pkl = 'out.pkl'

    res, pr, mod = bread.results_from(out_file)
    print('bread')

    # create flatchain, run post-processing
    res['flatchain'] = prosp_dutils.chop_chain(res['chain'], **res['run_params'])
    res['flatprob'] = prosp_dutils.chop_chain(res['lnprobability'], **res['run_params'])
    '''
    extra_output = calc_extra_quantities(res)  # , **kwargs)
    print('extra calculated')
    extra = full_base + '_extra_' + pkl
    with open(extra, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(extra_output, newfile, pickle.HIGHEST_PROTOCOL)
    print('extra pickled')
    '''
    prob = res['lnprobability'][:, 0:]
    # PRINT TRACE SHOWING HOW ITERATIONS CONVERGE FOR EACH PARAMETER
    # tracefig, prob = bread.param_evol(res)  # print tracefig, store probability
    # plt.title(full_base)  # BUCKET just added
    # plt.savefig('img/' + full_base + '_tracefig.png', bbox_inches='tight')
    # plt.show()

    # FIND WALKER, ITERATION THAT GIVE MAX PROBABILITY
    print('max', prob.max())
    row = prob.argmax() / len(prob[0])
    col = prob.argmax() - row * len(prob[0])
    walker, iteration = row, col
    print(walker, iteration)

    # PRINT CORNERFIG CONTOURS/HISTOGRAMS FOR EACH PARAMETER
    '''
    bread.subtriangle(res, start=0, thin=5, show_titles=True)
    plt.title(full_base)  # BUCKET just added
    plt.savefig('img/' + full_base + '_cornerfig.png', bbox_inches='tight')
    # plt.show()
    '''
    # For FAST: truths=[mass, age, tau, dust2] (for 1824: [9.78, 0.25, -1., 0.00])

    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    print('sps')

    # GET MODELED SPECTRA AND PHOTOMETRY
    # These have the same shape as the obs['spectrum'] and obs['maggies'] arrays.
    spec, phot, mfrac = mod.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)
    print('spec')

    # PLOT SPECTRUM
    wave = [f.wave_effective for f in res['obs']['filters']]
    wave = np.asarray(wave)

    # CHANGING OBSERVED TO REST FRAME WAVELENGTH
    if field == 'cdfs':
        datname = '/home/jonathan/cdfs/cdfs.v1.6.11.cat'
        zname = '/home/jonathan/cdfs/cdfs.v1.6.9.awk.zout'
    elif field == 'cosmos':
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog
    elif field == 'uds':
        datname = '/home/jonathan/uds/uds.v1.5.10.cat'
        zname = '/home/jonathan/uds/uds.v1.5.8.awk.zout'

    with open(datname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(datname, comments='#', delimiter=' ', dtype=dtype)

    with open(zname, 'r') as fz:
        hdr_z = fz.readline().split()
    dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]])
    zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z)

    idx = dat['id'] == obj  # array filled: False when dat['id'] != obj, True when dat['id'] == obj
    print(obj)
    zred = zout['z_spec'][idx][0]  # z = z_spec
    if zred == -99:
        zred = zout['z_peak'][idx][0]  # if z_spec does not exist, z = z_phot
    print('redshift', zred)

    wave_rest = []  # REST FRAME WAVELENGTH
    for j in range(len(wave)):
        wave_rest.append(wave[j] / (1 + zred))  # 1 + z = l_obs / l_emit --> l_emit = l_obs / (1 + z)
    wave_rest = np.asarray(wave_rest)

    # OUTPUT SED results to files
    write_res = full_base + '_res_' + pkl  # results
    with open(write_res, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(res, newfile, pickle.HIGHEST_PROTOCOL)  # res includes res['obs']['maggies'] and ...['maggies_unc']
    write_sed = full_base + '_sed_' + pkl  # model sed
    with open(write_sed, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(phot, newfile, pickle.HIGHEST_PROTOCOL)
    write_restwave = full_base + '_restwave_' + pkl  # rest frame wavelengths
    with open(write_restwave, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(wave_rest, newfile, pickle.HIGHEST_PROTOCOL)
    write_spec = full_base + '_spec_' + pkl  # spectrum
    with open(write_spec, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(spec, newfile, pickle.HIGHEST_PROTOCOL)
    write_sps = full_base + '_spswave_' + pkl  # wavelengths that go with spectrum
    with open(write_sps, 'wb') as newfile:  # 'wb' because binary format
        try:
            wlengths = sps.wavelengths
        except AttributeError:
            wlengths = sps.csp.wavelengths
        pickle.dump(wlengths, newfile, pickle.HIGHEST_PROTOCOL)
        # pickle.dump(sps.wavelengths, newfile, pickle.HIGHEST_PROTOCOL)

    # OUTPUT CHI_SQ results to files
    chi_sq = ((res['obs']['maggies'] - phot) / res['obs']['maggies_unc']) ** 2
    write_chisq = full_base + '_chisq_' + pkl
    with open(write_chisq, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(chi_sq, newfile, pickle.HIGHEST_PROTOCOL)
    write_justchi = full_base + '_justchi_' + pkl
    with open(write_justchi, 'wb') as newfile:
        pickle.dump((res['obs']['maggies'] - phot) / res['obs']['maggies_unc'], newfile, pickle.HIGHEST_PROTOCOL)

    # PLOT CHISQ
    '''
    plt.plot(wave_rest, chi_sq, 'o', color='b')
    plt.title(str(obj) + r' $\chi^2$')
    plt.xlabel('Rest frame wavelength [angstroms]')
    plt.ylabel(r'$\chi^2$')
    plt.savefig('img/' + full_base + '_chisq.png', bbox_inches='tight')
    # plt.show()
    '''

    # HOW CONVERGED IS THE CODE?? LET'S FIND OUT!
    '''
    parnames = np.array(res['model'].theta_labels())
    fig, kl_ax = plt.subplots(1, 1, figsize=(7, 7))
    for l in xrange(parnames.shape[0]):
        kl_ax.plot(res['kl_iteration'], np.log10(res['kl_divergence'][:, l]), 'o', label=parnames[l], lw=1.5,
                   linestyle='-', alpha=0.6)
    '''
    write_klit = full_base + '_klit_' + pkl
    with open(write_klit, 'wb') as newfile:
        pickle.dump(res['kl_iteration'], newfile, pickle.HIGHEST_PROTOCOL)
    write_kldvg = full_base + '_kldvg_' + pkl
    with open(write_kldvg, 'wb') as newfile:
        pickle.dump(res['kl_divergence'], newfile, pickle.HIGHEST_PROTOCOL)
    '''
Пример #13
0
def sed(objname, field, res, mod, walker, iteration, param_file, **kwargs):
    # PRINT MODEL SED FOR GALAXY
    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)

    # GET MODELED SPECTRA AND PHOTOMETRY
    # These have the same shape as the obs['spectrum'] and obs['maggies'] arrays.
    spec, phot, mfrac = mod.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)

    mean = ((res['obs']['maggies']-phot)/res['obs']['maggies']).mean()
    print(mean, 'mean')  # print normalized mean difference between model and observations

    # PLOT SPECTRUM
    wave = [f.wave_effective for f in res['obs']['filters']]
    wave = np.asarray(wave)
    print('len', len(sps.wavelengths), len(spec))

    ''' #
    plt.plot(sps.wavelengths, spec)
    plt.xlabel('Wavelength [angstroms]')
    plt.title(str(objname) + ' spec')
    plt.show()

    # ''' #
    ''' #
    # HOW CONVERGED IS THE CODE?? LET'S FIND OUT!
    parnames = np.array(res['model'].theta_labels())
    fig, kl_ax = plt.subplots(1, 1, figsize=(7, 7))
    for i in xrange(parnames.shape[0]):
        kl_ax.plot(res['kl_iteration'], np.log10(res['kl_divergence'][:, i]),
                   'o', label=parnames[i], lw=1.5, linestyle='-', alpha=0.6)

    kl_ax.set_ylabel('log(KL divergence)')
    kl_ax.set_xlabel('iteration')
    # kl_ax.set_xlim(0, nsteps*1.1)

    kl_div_lim = res['run_params'].get('convergence_kl_threshold', 0.018)
    kl_ax.axhline(np.log10(kl_div_lim), linestyle='--', color='red', lw=2, zorder=1)

    kl_ax.legend(prop={'size': 5}, ncol=2, numpoints=1, markerscale=0.7)
    plt.title(str(objname) + ' kl')
    plt.show()
    # ''' #

    # field = kwargs['field']
    # CHANGING OBSERVED TO REST FRAME WAVELENGTH
    if field == 'cdfs':
        datname = '/home/jonathan/cdfs/cdfs.v1.6.11.cat'
        zname = '/home/jonathan/cdfs/cdfs.v1.6.9.awk.zout'
    elif field == 'cosmos':
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog
    elif field == 'uds':
        datname = '/home/jonathan/uds/uds.v1.5.10.cat'
        zname = '/home/jonathan/uds/uds.v1.5.8.zout'

    with open(datname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(datname, comments='#', delimiter=' ', dtype=dtype)

    with open(zname, 'r') as fz:
        hdr_z = fz.readline().split()
    dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]])
    zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z)

    idx = dat['id'] == objname  # array filled: False when dat['id'] != objname, True when dat['id'] == objname
    zred = zout['z_spec'][idx][0]  # z = z_spec
    if zred == -99:
        zred = zout['z_peak'][idx][0]  # if z_spec does not exist, z = z_phot
    print(zred)

    wave_rest = []  # REST FRAME WAVELENGTH
    for i in range(len(wave)):
        wave_rest.append(wave[i]/(1 + zred))  # 1 + z = l_obs / l_emit --> l_emit = l_obs / (1 + z)

    # PLOT MODEL SED BEST FIT, INPUT PHOT
    yerr = res['obs']['maggies_unc']
    plt.subplot(111, xscale="log", yscale="log")
    plt.errorbar(wave_rest, res['obs']['maggies'], yerr=yerr, marker='o', linestyle='', color='purple',
                 label='Observed photometry')
    plt.plot(wave_rest, phot, 'D', label='Model', color='b', markerfacecolor='None', markersize=10,
             markeredgewidth=1.25, markeredgecolor='k')  # label='Model at {},{}'.format(walker, iteration)
    plt.legend(loc="best", fontsize=20)
    plt.title(str(field) + '-' + str(objname) + ' SED')
    plt.plot(sps.wavelengths, spec, color='k', alpha=0.5)
    plt.xlabel(r'Wavelength (Rest) [$\AA$]')
    plt.ylabel('Maggies')
    plt.xlim(10**3, 2.5*10**4)
    plt.ylim(10**-5, 4*10**3)
    plt.show()

    # ''' #
    # PLOT CHI_SQ BESTFIT
    chi_sq = ((res['obs']['maggies'] - phot) / res['obs']['maggies_unc']) ** 2
    plt.plot(wave_rest, chi_sq, 'o', color='b')
    plt.title(str(objname) + r' $\chi^2$')
    plt.xlabel('Rest frame wavelength [angstroms]')
    plt.ylabel(r'$\chi^2$')
    plt.show()
Пример #14
0
def compare_sed():

	### custom parameter file
	### where mass is a six-parameter thing
	param_file = 'brownseds_np_params.py'

	run_params = model_setup.get_run_params(param_file=param_file)
	sps = model_setup.load_sps(**run_params)
	model = model_setup.load_model(**run_params)
	obs = model_setup.load_obs(**run_params)
	thetas = model.initial_theta

	### create star formation history and metallicity history
	mformed = 1e10
	m_constant = return_declining_sfh(model,mformed, tau=1e12)
	met = return_zt(model)
	#met = np.ones_like(met)
	thetas[model.theta_labels().index('logzsol')] = 0.0

	### simple
	i1, i2 = model.theta_index['mass']
	thetas[i1:i2] = m_constant
	nsamp = 21
	met_comp = np.linspace(-1,0,nsamp)
	spec, phot = [], []
	for m in met_comp:
		thetas[model.theta_labels().index('logzsol')] = m
		specx, photx, x = model.mean_model(thetas, obs, sps=sps)
		spec.append(specx)
		phot.append(photx)

	### complex
	for i in xrange(met.shape[0]):

		# zero out all masses
		thetas[i1:i2] = np.zeros_like(m_constant)

		# fill in masses and metallicities
		thetas[model.theta_labels().index('logzsol')] = np.log10(met[i])
		thetas[i1+i] = m_constant[i]

		# generate spectrum, add to existing spectrum
		sps.ssp.params.dirtiness = 1
		specx, photx, x = model.mean_model(thetas, obs, sps=sps)

		if i == 0:
			spec_agez, phot_agez = specx, photx
		else:
			spec_agez += specx
			phot_agez += photx

	### plot
	fig, ax = plt.subplots(1,1, figsize=(8, 7))
	cmap = get_cmap(nsamp)
	for i in xrange(0,nsamp,4): ax.plot(sps.wavelengths/1e4, np.log10(spec[i] / spec_agez),color=cmap(i), label=met_comp[i])
	ax.axhline(0, linestyle='--', color='0.1')
	ax.legend(loc=4,prop={'size':20},title=r'log(Z/Z$_{\odot}$) [fixed]')

	ax.set_xlim(0.1,10)
	ax.set_xscale('log',nonposx='clip',subsx=(2,5))
	ax.xaxis.set_minor_formatter(minorFormatter)
	ax.xaxis.set_major_formatter(majorFormatter)

	ax.set_ylabel(r'log(f$_{\mathrm{Z}_{\mathrm{fixed}}}$ / f$_{\mathrm{Z(t)}}$)')
	ax.set_xlabel('wavelength [microns]')
	ax.set_ylim(-0.4,0.4)
	plt.tight_layout()
	plt.show()
	print 1/0
Пример #15
0
        return scalar_map.to_rgba(index)
    return map_index_to_rgb_color


#### format those log plots! 
minorFormatter = jLogFormatter(base=10, labelOnlyBase=False)
majorFormatter = jLogFormatter(base=10, labelOnlyBase=True)

#### nsamp
nsamp = 15
cmap = get_cmap(nsamp)

### load shit
param_file = '/Users/joel/code/python/prospector_alpha/parameter_files/np_mocks_smooth/np_mocks_smooth_params.py'
run_params = model_setup.get_run_params(param_file=param_file)
sps = model_setup.load_sps(**run_params)

#### CALCULATE SPECTRA
print sps.ssp.params['dust1'],sps.ssp.params['dust2']
sps.ssp.params['dust1'] = 0.0
sps.ssp.params['dust2'] = 0.0
wave, ssp_spectra = sps.ssp.get_spectrum(tage=0, peraa=False)
wave /= 1e4
xlim = (0.1,3)
plt_lam = (wave > xlim[0]) & (wave < xlim[1])
factor = 3e18 / sps.wavelengths[plt_lam]

#### PLOT SPECTRA
fig, ax = plt.subplots(1,1, figsize=(10,10))

nskip = 4