コード例 #1
0
def print_kl(out_file, true_obj):
    # ''' #
    res, pr, mod = bread.results_from(out_file)

    # HOW CONVERGED IS THE CODE?? LET'S FIND OUT!
    parnames = np.array(res['model'].theta_labels())
    fig, kl_ax = plt.subplots(1, 1, figsize=(7, 7))
    for i in xrange(parnames.shape[0]):
        kl_ax.plot(res['kl_iteration'],
                   np.log10(res['kl_divergence'][:, i]),
                   'o',
                   label=parnames[i],
                   lw=1.5,
                   linestyle='-',
                   alpha=0.6)

    kl_ax.set_ylabel('log(KL divergence)')
    kl_ax.set_xlabel('iteration')
    # kl_ax.set_xlim(0, nsteps*1.1)

    kl_div_lim = res['run_params'].get('convergence_kl_threshold', 0.018)
    kl_ax.axhline(np.log10(kl_div_lim),
                  linestyle='--',
                  color='red',
                  lw=2,
                  zorder=1)

    kl_ax.legend(prop={'size': 5}, ncol=2, numpoints=1, markerscale=0.7)
    plt.title(true_obj + ' kl')
    plt.show()
コード例 #2
0
def read_results(filename):
    res, obs, mod = reader.results_from(path_res + filename)
    # update data table
    res['run_params']['data_table'] = path_wdir + 'data/halo7d_with_phot.fits'
    mod = reader.get_model(res)
    # update filters
    filternames = [str(ii) for ii in obs['filters']]
    obs['filters'] = load_filters(filternames, directory=filter_folder)
    # load sps
    sps = reader.get_sps(res)
    return (res, obs, mod, sps)
コード例 #3
0
def sample_posterior(outname=None, shortname=None, mass_folder=None):
    # I/O
    # paramfile = model_setup.import_module_from_file(param_name)
    # outname = paramfile.run_params['outfile']
    # sample_results, powell_results, model, eout = load_prospector_data(outname, hdf5=True, load_extra_output=True)
    sample_results, powell_results, model = bread.results_from(outname)

    # create useful quantities
    sample_results['flatchain'] = chop_chain(sample_results['chain'],
                                             **sample_results['run_params'])
    sample_results['flatprob'] = chop_chain(sample_results['lnprobability'],
                                            **sample_results['run_params'])

    sps = model_setup.load_sps(**sample_results['run_params'])
    # sps = paramfile.load_sps(**sample_results['run_params'])
    # obs = paramfile.load_obs(**sample_results['run_params'])

    # sample from posterior
    nsamp = 3000
    good = np.isfinite(sample_results['flatprob']) == True
    sample_idx = np.random.choice(np.where(good)[0], nsamp)

    # define outputs
    mfrac = np.linspace(0, 0.95, 20)
    mfrac_out = np.zeros(shape=(nsamp, mfrac.shape[0]))
    for jj, idx in enumerate(sample_idx):
        print(jj)
        ##### model call, to set parameters
        thetas = copy.copy(sample_results['flatchain'][idx])
        spec, mags, sm = sample_results['model'].mean_model(
            thetas, sample_results['obs'], sps=sps)

        ##### extract sfh parameters
        sfh_params = find_sfh_params(sample_results['model'],
                                     thetas,
                                     sample_results['obs'],
                                     sps,
                                     sm=sm)

        for ii, m in enumerate(mfrac):
            mfrac_out[jj, ii] = halfmass_assembly_time(sfh_params, c=m)

    # fixing negatives
    mfrac_out = np.clip(mfrac_out, 0.0, np.inf)
    # write out
    out = np.percentile(mfrac_out, [50, 84, 16], axis=0)
    with open('out/' + mass_folder + shortname + 'mass.txt', 'w') as f:
        f.write('# mass_fraction median_time err_up err_down\n')
        for ii in range(out.shape[1]):
            f.write("{:.2f}".format(mfrac[ii]) + ' ' +
                    "{:.3f}".format(out[0, ii]) + ' ' +
                    "{:.3f}".format(out[1, ii]) + ' ' +
                    "{:.3f}".format(out[2, ii]) + ' ')
            f.write('\n')
コード例 #4
0
def printer(out_file, percs=True, sfhtest=False, masstest=False, fast=False, five=False, quiet=False, draw1=False,
            fixmet=False):
    if not quiet:
        print(out_file)
        print(masstest)
    res, pr, mod = bread.results_from(out_file)
    # ''' #

    # PRINT CORNERFIG CONTOURS/HISTOGRAMS FOR EACH PARAMETER
    return md(res, start=-1000, thin=5, percs=percs, sfhtest=sfhtest, masstest=masstest, fast=fast, five=five,
              quiet=quiet, draw1=draw1, fixmet=fixmet)  # -650
コード例 #5
0
ファイル: posteriors.py プロジェクト: camlawlorforsyth/HFF
def extract_posteriors():

    logMs, logZs, dusts, t0s, logTaus = [], [], [], [], []
    clusters = ['a370', 'a1063', 'a2744', 'm416', 'm717', 'm1149']
    for cluster in clusters:

        h5_files = glob.glob('{}/h5/*.h5'.format(cluster))

        h5s = []
        for file in h5_files:
            file = file.replace(os.sep, '/')  # compatibility for Windows
            h5s.append(file)

        for file in h5s:
            result, obs, _ = reader.results_from(file, dangerous=True)

            samples = sample_posterior(result['chain'],
                                       weights=result['weights'],
                                       nsample=len(result['chain']))

            samples[:, 1] = np.log10(samples[:, 1])
            samples[:, 5] = np.log10(samples[:, 5])

            logMs.append(list(samples[:, 1]))
            logZs.append(list(samples[:, 2]))
            dusts.append(list(samples[:, 3]))
            t0s.append(list(samples[:, 4]))
            logTaus.append(list(samples[:, 5]))

    logM = flatten_posteriors(logMs)
    logZ = flatten_posteriors(logZs)
    dust = flatten_posteriors(dusts)
    t0 = flatten_posteriors(t0s)
    logTau = flatten_posteriors(logTaus)

    # np.savez('subsample_posteriors.npz', logM=logM, logZ=logZ, dust=dust,
    #          t0=t0, logTau=tau)

    solMetal = u.def_unit(['solMetal', 'Z_sun', 'Zsun'],
                          prefixes=False,
                          format={
                              'latex': r'Z_{\odot}',
                              'unicode': 'Z\N{SUN}'
                          })

    table = Table(
        [logM / u.solMass, logZ / solMetal, dust, t0 * u.Gyr, logTau / u.Gyr],
        names=('logM', 'logZ', 'dust', 't0', 'logTau'))
    table.write('boneyard/subsample_posteriors.fits')

    return
コード例 #6
0
ファイル: results.py プロジェクト: camlawlorforsyth/HFF
def get_results(cluster, ID, binNum, results_type='dynesty', version=''):
    # convenience function

    infile = '{}/h5/{}_ID_{}_bin_{}{}.h5'.format(cluster, cluster, ID, binNum,
                                                 version)

    result, obs, _ = reader.results_from(infile, dangerous=True)
    model = reader.get_model(result)
    sps = reader.get_sps(result)

    # t_hr = result['sampling_duration']/3600
    # print('The sampling took {:.2f} hours'.format(t_hr))

    return result, obs, model, sps
コード例 #7
0
ファイル: sfh_output.py プロジェクト: rpan04/prospector-eelgs
def post_processing(out_file, filename, **kwargs):
    '''
    Driver. Loads output, runs post-processing routine.
    '''

    sample_results, powell_results, model = read_results.results_from(out_file)

    ### create flatchain, run post-processing
    sample_results['flatchain'] = prosp_dutils.chop_chain(sample_results['chain'], **sample_results['run_params'])
    sample_results['flatprob'] = prosp_dutils.chop_chain(sample_results['lnprobability'],
                                                         **sample_results['run_params'])
    extra_output = calc_extra_quantities(sample_results, **kwargs)

    with open(filename, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(extra_output, newfile, pickle.HIGHEST_PROTOCOL)
コード例 #8
0
ファイル: prospector.py プロジェクト: MartinBriday/pyprosp
 def read_h5(filename, **kwargs):
     """
     Read and return the prospector fit results saved in a given .h5 file.
     
     Parameters
     ----------
     filename : [string]
         File name in which are saved the results to be read and returned.
     
     
     Returns
     -------
     dict, dict, SedModel
     """
     import prospect.io.read_results as reader
     return reader.results_from(filename, **kwargs)
コード例 #9
0
ファイル: degeneracies.py プロジェクト: camlawlorforsyth/HFF
def determine_age_gradients(cluster, ID, bins, radius_array, version='') :
    
    mwas = []
    radii = []
    for binNum, radius in zip(bins, radius_array) :
        infile = '{}/h5/{}_ID_{}_bin_{}{}.h5'.format(
            cluster, cluster, ID, binNum, version)
        result, obs, _ = reader.results_from(infile, dangerous=True)
        
        samples = sample_posterior(result['chain'], weights=result['weights'])
        
        mwas.append(mwa_calc(samples[:, 5], samples[:, 4], power=1))
        
        radii.append(np.full(len(samples), radius))
    
    return np.concatenate(radii).ravel(), np.concatenate(mwas).ravel()
コード例 #10
0
ファイル: only_sfh.py プロジェクト: rpan04/prospector-eelgs
def post_processing(param_name, **kwargs):
    '''
    Driver. Loads output, runs post-processing routine.
    '''
    print(outname)

    sample_results, powell_results, model = read_results.results_from(outname)

    ### create flatchain, run post-processing
    sample_results['flatchain'] = prosp_dutils.chop_chain(sample_results['chain'], **sample_results['run_params'])
    sample_results['flatprob'] = prosp_dutils.chop_chain(sample_results['lnprobability'],
                                                         **sample_results['run_params'])
    extra_output = calc_extra_quantities(sample_results, **kwargs)

    print(len(extra_output['extras']['sfh'][0]), '2000?')  # YAY

    '''
    print('len flatchain', len(extra_output['extras']['flatchain']))  # 2000 :D
    print('index 0', extra_output['extras']['flatchain'][0])  # should return set of params (half time, sfr10, etc)?
        # Returns array with len 6: (half_time [units?], sfr10, sfr100, ssfr10, ssfr100, stellar_mass)?
    print('thing', extra_output['extras']['flatchain'])  # Returns 2000 arrays each with len(6)
    '''

    print(extra_output['bfit']['maxprob_params'])  # [10.34, 0.33, 0.59, 0.0023, 0.03, 0.0095, 0.69, -2, -0.2, -1]
    # emission_decoupled_recent: [10.32, 0.32, 0.61, 0.0246, 0.00155, 0.015, 0.7, -2, -0.2, -1]
    # 10 thetas are: logmass, sfr_frac1, sfr_frac2, sfr_frac3, sfr_frac4, sfr_frac5, dust2, logzsol, gas_logz, gas_logu
    # print('max', len(extra_output['bfit']['maxprob_params']))  # 10
    print(extra_output['bfit']['half_time'])  # -1.0216794961; emission_decoupled_recent: -1.03132084372
    print(extra_output['bfit']['sfr_10'])  # 66.4716596583; emission_decoupled_recent: 66.2740632613
    print(extra_output['bfit']['sfr_100'])  # 114.443681589; emission_decoupled_recent: 121.448368493

    # plt.plot(extra_output['extras']['t_sfh'], extra_output['bfit']['sfh'])
    # plt.plot(extra_output['extras']['t_sfh'], extra_output['extras']['sfh'])

    plt.plot(extra_output['extras']['t_sfh'], extra_output['bfit']['sfh'])
    plt.ylabel(r'M$_\odot$ yr$^{-1}$')
    plt.xlabel('Gyr')
    plt.show()
コード例 #11
0
def post_processing(param_name, outname=None, **kwargs):
    '''
    Driver. Loads output, runs post-processing routine.
    '''

    sample_results, powell_results, model = read_results.results_from(out_file)

    ### create flatchain, run post-processing
    sample_results['flatchain'] = prosp_dutils.chop_chain(
        sample_results['chain'], **sample_results['run_params'])
    sample_results['flatprob'] = prosp_dutils.chop_chain(
        sample_results['lnprobability'], **sample_results['run_params'])
    extra_output = calc_extra_quantities(sample_results, **kwargs)

    print('max', extra_output['bfit']['maxprob_params'])
    print('half', extra_output['bfit']['half_time'])
    print('sfr_10', extra_output['bfit']['sfr_10'])
    print('sfr_100', extra_output['bfit']['sfr_100'])

    plt.plot(extra_output['extras']['t_sfh'], extra_output['bfit']['sfh'])
    plt.ylabel(r'M$_\odot$ yr$^{-1}$')
    plt.xlabel('Gyr')
    plt.show()
コード例 #12
0
def save_mass_metallicity_images():

    HFF = Table.read('output/tables/nbCGs.fits')

    for cluster, ID in zip(HFF['cluster'], HFF['ID']):
        # ensure the output directories for the images are available
        os.makedirs('{}/logM_images'.format(cluster), exist_ok=True)
        os.makedirs('{}/logZ_images'.format(cluster), exist_ok=True)

        bins_image = core.open_image(cluster, ID, 'bins')
        bins = np.sort(np.unique(
            bins_image[~np.isnan(bins_image)])).astype(int)

        mass = bins_image.copy()
        metal = bins_image.copy()

        if not (cluster == 'm717') & (ID == 3692):  # fitting issue for bin_4
            for binNum in bins:  # loop over all the bins
                result, obs, _ = reader.results_from(
                    '{}/h5/{}_ID_{}_bin_{}.h5'.format(cluster, cluster, ID,
                                                      binNum),
                    dangerous=True)
                samples = sample_posterior(result['chain'],
                                           weights=result['weights'])

                mass[mass == binNum] = np.percentile(samples[:, 1], 50)
                metal[metal == binNum] = np.percentile(samples[:, 2], 50)

            hdu = fits.PrimaryHDU(mass)
            hdu.writeto('{}/logM_images/{}_ID_{}_logM.fits'.format(
                cluster, cluster, ID))

            hdu = fits.PrimaryHDU(metal)
            hdu.writeto('{}/logZ_images/{}_ID_{}_logZ.fits'.format(
                cluster, cluster, ID))

    return
コード例 #13
0
from prospect.io import write_results
from prospect.io import read_results as pr
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log, chi_spec, chi_phot

# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'restart_from': '', 'niter': 1024}
clargs = model_setup.parse_args(sargv, argdict=argdict)

# ----------
# Result object and Globals
# ----------
result, global_obs, global_model = pr.results_from(clargs["restart_from"])
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
assert is_emcee, "Result file does not have a chain of the proper shape."

# SPS Model instance (with libraries check)
sps = pr.get_sps(result)
run_params = result["run_params"]
run_params.update(clargs)

# Noise model (this should be doable via read_results)
from prospect.models.model_setup import import_module_from_string
param_file = (result['run_params'].get('param_file',
                                       ''), result.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
コード例 #14
0
def main(field, galaxy_seq):

    #vers = (np.__version__, scipy.__version__, h5py.__version__, fsps.__version__, prospect.__version__)
    #print("Numpy: {}\nScipy: {}\nH5PY: {}\nFSPS: {}\nProspect: {}".format(*vers))

    # -------------- Decide field and filters
    # Read in catalog from Lou
    if 'North' in field:
        df = pandas.read_pickle(adap_dir + 'GOODS_North_SNeIa_host_phot.pkl')

        all_filters = [
            'LBC_U_FLUX', 'ACS_F435W_FLUX', 'ACS_F606W_FLUX', 'ACS_F775W_FLUX',
            'ACS_F814W_FLUX', 'ACS_F850LP_FLUX', 'WFC3_F105W_FLUX',
            'WFC3_F125W_FLUX', 'WFC3_F140W_FLUX', 'WFC3_F160W_FLUX',
            'MOIRCS_K_FLUX', 'CFHT_Ks_FLUX', 'IRAC_CH1_SCANDELS_FLUX',
            'IRAC_CH2_SCANDELS_FLUX', 'IRAC_CH3_FLUX', 'IRAC_CH4_FLUX'
        ]

        #all_filters = ['LBC_U_FLUX', 'ACS_F435W_FLUX', 'ACS_F606W_FLUX',
        #'ACS_F775W_FLUX', 'ACS_F850LP_FLUX']

        seq = np.array(df['ID'])
        i = int(np.where(seq == galaxy_seq)[0])

    elif 'South' in field:
        df = pandas.read_pickle(adap_dir + 'GOODS_South_SNeIa_host_phot.pkl')

        all_filters = [
            'CTIO_U_FLUX', 'ACS_F435W_FLUX', 'ACS_F606W_FLUX',
            'ACS_F775W_FLUX', 'ACS_F814W_FLUX', 'ACS_F850LP_FLUX',
            'WFC3_F098M_FLUX', 'WFC3_F105W_FLUX', 'WFC3_F125W_FLUX',
            'WFC3_F160W_FLUX', 'HAWKI_KS_FLUX', 'IRAC_CH1_FLUX',
            'IRAC_CH2_FLUX', 'IRAC_CH3_FLUX', 'IRAC_CH4_FLUX'
        ]

        #all_filters = ['CTIO_U_FLUX', 'ACS_F435W_FLUX', 'ACS_F606W_FLUX',
        #'ACS_F775W_FLUX', 'ACS_F850LP_FLUX']

        seq = np.array(df['Seq'])
        i = int(np.where(seq == galaxy_seq)[0])

    #print('Read in pickle with the following columns:')
    #print(df.columns)
    #print('Rows in DataFrame:', len(df)

    print("Match index:", i, "for Seq:", galaxy_seq)

    # -------------- Preliminary stuff
    # Set up for emcee
    nwalkers = 1000
    niter = 500

    ndim = 12

    # Other set up
    obj_ra = df['RA'][i]
    obj_dec = df['DEC'][i]

    obj_z = df['zbest'][i]

    print("Object redshift:", obj_z)
    age_at_z = astropy_cosmo.age(obj_z).value
    print("Age of Universe at object redshift [Gyr]:", age_at_z)

    # ------------- Get obs data
    fluxes = []
    fluxes_unc = []
    useable_filters = []

    for ft in range(len(all_filters)):
        filter_name = all_filters[ft]

        flux = df[filter_name][i]
        fluxerr = df[filter_name + 'ERR'][i]

        if np.isnan(flux):
            continue

        if flux <= 0.0:
            continue

        if (fluxerr < 0) or np.isnan(fluxerr):
            fluxerr = 0.1 * flux

        fluxes.append(flux)
        fluxes_unc.append(fluxerr)
        useable_filters.append(filter_name)

    #print("\n")
    #print(df.loc[i])
    #print(fluxes, len(fluxes))
    #print(useable_filters, len(useable_filters))

    fluxes = np.array(fluxes)
    fluxes_unc = np.array(fluxes_unc)

    # Now build the prospector observation
    obs = build_obs(fluxes, fluxes_unc, useable_filters)

    # Set params for run
    run_params = {}
    run_params["object_redshift"] = obj_z
    run_params["fixed_metallicity"] = None
    run_params["add_duste"] = True
    #run_params["dust_type"] = 4

    run_params["zcontinuous"] = 1

    # Generate the model SED at the initial value of theta
    #theta = model.theta.copy()
    #initial_spec, initial_phot, initial_mfrac = model.sed(theta, obs=obs, sps=sps)

    verbose = True
    run_params["verbose"] = verbose

    model = build_model(**run_params)
    print("\nInitial free parameter vector theta:\n  {}\n".format(model.theta))
    #print("Initial parameter dictionary:\n{}".format(model.params))
    print("\n----------------------- Model details: -----------------------")
    print(model)
    print(
        "----------------------- End model details. -----------------------\n")

    # Here we will run all our building functions
    obs = build_obs(fluxes, fluxes_unc, useable_filters)
    sps = build_sps(**run_params)

    #plot_data(obs)
    #sys.exit(0)

    # --- start fitting ----
    # Set this to False if you don't want to do another optimization
    # before emcee sampling (but note that the "optimization" entry
    # in the output dictionary will be (None, 0.) in this case)
    # If set to true then another round of optmization will be performed
    # before sampling begins and the "optmization" entry of the output
    # will be populated.
    """
    run_params["optimize"] = False
    run_params["min_method"] = 'lm'
    # We'll start minimization from "nmin" separate places, 
    # the first based on the current values of each parameter and the 
    # rest drawn from the prior.  Starting from these extra draws 
    # can guard against local minima, or problems caused by 
    # starting at the edge of a prior (e.g. dust2=0.0)
    run_params["nmin"] = 5

    run_params["emcee"] = True
    run_params["dynesty"] = False
    # Number of emcee walkers
    run_params["nwalkers"] = nwalkers
    # Number of iterations of the MCMC sampling
    run_params["niter"] = niter
    # Number of iterations in each round of burn-in
    # After each round, the walkers are reinitialized based on the 
    # locations of the highest probablity half of the walkers.
    run_params["nburn"] = [8, 16, 32, 64]
    run_params["progress"] = True

    hfile = adap_dir + "emcee_" + field + "_" + str(galaxy_seq) + ".h5"

    if not os.path.isfile(hfile):

        print("Now running with Emcee.")
        output = fit_model(obs, model, sps, lnprobfn=lnprobfn, **run_params)

        print('done emcee in {0}s'.format(output["sampling"][1]))
    
        writer.write_hdf5(hfile, run_params, model, obs,
                          output["sampling"][0], output["optimization"][0],
                          tsample=output["sampling"][1],
                          toptimize=output["optimization"][1])
    
        print('Finished with Seq: ' + str(galaxy_seq))

    """

    hfile = adap_dir + "dynesty_" + field + "_" + str(galaxy_seq) + ".h5"

    if not os.path.isfile(hfile):

        print("Now running with Dynesty.")

        run_params["emcee"] = False
        run_params["dynesty"] = True
        run_params["nested_method"] = "rwalk"
        run_params["nlive_init"] = 400
        run_params["nlive_batch"] = 200
        run_params["nested_dlogz_init"] = 0.05
        run_params["nested_posterior_thresh"] = 0.05
        run_params["nested_maxcall"] = int(1e6)

        #from multiprocessing import Pool
        #with Pool(6) as pool:
        #    run_params["pool"] = pool
        output = fit_model(obs, model, sps, lnprobfn=lnprobfn, **run_params)
        print('done dynesty in {0}s'.format(output["sampling"][1]))

        writer.write_hdf5(hfile,
                          run_params,
                          model,
                          obs,
                          output["sampling"][0],
                          output["optimization"][0],
                          tsample=output["sampling"][1],
                          toptimize=output["optimization"][1])

        print('Finished with Seq: ' + str(galaxy_seq))

    # -------------------------
    # Visualizing results

    results_type = "dynesty"  # "emcee" | "dynesty"
    # grab results (dictionary), the obs dictionary, and our corresponding models
    # When using parameter files set `dangerous=True`
    #result, obs, _ = reader.results_from("{}_" + str(galaxy_seq) + \
    #                 ".h5".format(results_type), dangerous=False)

    result, obs, _ = reader.results_from(adap_dir + results_type + "_" + \
                     field + "_" + str(galaxy_seq) + ".h5", dangerous=False)

    #The following commented lines reconstruct the model and sps object,
    # if a parameter file continaing the `build_*` methods was saved along with the results
    #model = reader.get_model(result)
    #sps = reader.get_sps(result)

    # let's look at what's stored in the `result` dictionary
    print(result.keys())

    parnames = np.array(result['theta_labels'])
    print('Parameters in this model:', parnames)
    """
    if results_type == "emcee":

        chosen = np.random.choice(result["run_params"]["nwalkers"], size=150, replace=False)
        tracefig = reader.traceplot(result, figsize=(10,6), chains=chosen)

        tracefig.savefig(adap_dir + 'trace_' + field + '_' + str(galaxy_seq) + '.pdf', 
            dpi=200, bbox_inches='tight')

    else:
        tracefig = reader.traceplot(result, figsize=(10,6))
        tracefig.savefig(adap_dir + 'trace_' + field + '_' + str(galaxy_seq) + '.pdf', 
            dpi=200, bbox_inches='tight')
    """

    # Get chain for corner plot
    if results_type == 'emcee':
        trace = result['chain']
        thin = 5
        trace = trace[:, ::thin, :]
        samples = trace.reshape(trace.shape[0] * trace.shape[1],
                                trace.shape[2])
    else:
        samples = result['chain']

    # Plot SFH
    #print(model) # to copy paste agebins from print out

    nagebins = 6
    agebins = np.array([[0., 8.], [8., 8.47712125], [8.47712125, 9.],
                        [9., 9.47712125], [9.47712125, 9.77815125],
                        [9.77815125, 10.13353891]])

    # Get the zfractions from corner quantiles
    zf1 = corner.quantile(samples[:, 2], q=[0.16, 0.5, 0.84])
    zf2 = corner.quantile(samples[:, 3], q=[0.16, 0.5, 0.84])
    zf3 = corner.quantile(samples[:, 4], q=[0.16, 0.5, 0.84])
    zf4 = corner.quantile(samples[:, 5], q=[0.16, 0.5, 0.84])
    zf5 = corner.quantile(samples[:, 6], q=[0.16, 0.5, 0.84])

    zf_arr = np.array([zf1[1], zf2[1], zf3[1], zf4[1], zf5[1]])
    zf_arr_l = np.array([zf1[0], zf2[0], zf3[0], zf4[0], zf5[0]])
    zf_arr_u = np.array([zf1[2], zf2[2], zf3[2], zf4[2], zf5[2]])

    cq_mass = corner.quantile(samples[:, 7], q=[0.16, 0.5, 0.84])

    print("Total mass:", "{:.3e}".format(cq_mass[1]), "+",
          "{:.3e}".format(cq_mass[2] - cq_mass[1]), "-",
          "{:.3e}".format(cq_mass[1] - cq_mass[0]))
    # -----------

    new_agebins = pt.zred_to_agebins(zred=obj_z, agebins=agebins)
    print("New agebins:", new_agebins)

    # -----------------------
    # now convert to sfh and its errors
    sfr = pt.zfrac_to_sfr(total_mass=cq_mass[1],
                          z_fraction=zf_arr,
                          agebins=new_agebins)
    sfr_l = pt.zfrac_to_sfr(total_mass=cq_mass[1],
                            z_fraction=zf_arr_l,
                            agebins=new_agebins)
    sfr_u = pt.zfrac_to_sfr(total_mass=cq_mass[1],
                            z_fraction=zf_arr_u,
                            agebins=new_agebins)

    print("----------")
    print("z fractions:      ", zf_arr)
    print("Lower z fractions:", zf_arr_l)
    print("Upper z fractions:", zf_arr_u)

    print("----------")
    print("Inferred SFR:  ", sfr)
    print("Lower sfr vals:", sfr_l)
    print("Upper sfr vals:", sfr_u)

    #############
    x_agebins = 10**new_agebins / 1e9

    fig = plt.figure(figsize=(8, 4))
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Time\, [Gyr];\, since\ galaxy\ formation}$',
                  fontsize=20)
    ax.set_ylabel(r'$\mathrm{SFR\, [M_\odot/yr]}$', fontsize=20)

    for a in range(len(agebins)):
        ax.plot(x_agebins[a],
                np.ones(len(x_agebins[a])) * sfr[a],
                color='mediumblue',
                lw=3.0)
        # put in some poisson errors
        sfr_err = np.ones(len(x_agebins[a])) * np.sqrt(sfr[a])
        sfr_plt = np.ones(len(x_agebins[a])) * sfr[a]
        sfr_low_fill = sfr_plt - sfr_err
        sfr_up_fill = sfr_plt + sfr_err
        ax.fill_between(x_agebins[a],
                        sfr_low_fill,
                        sfr_up_fill,
                        color='gray',
                        alpha=0.6)

    #ax.set_ylim(np.min(sfr_low_fill) * 0.3, np.max(sfr_up_fill) * 1.1)

    fig.savefig(adap_dir + 'sfh_' + field + '_' + str(galaxy_seq) + '.pdf',
                dpi=200,
                bbox_inches='tight')

    sys.exit(0)

    # Keep code block for future use if needed
    """
    # combination of linear and log axis from
    # https://stackoverflow.com/questions/21746491/combining-a-log-and-linear-scale-in-matplotlib

    # linear part i.e., first age bin
    ax.plot(x_agebins[0], np.ones(len(x_agebins[0])) * sfr[0], color='mediumblue', lw=3.5)
    #ax.fill_between(x_agebins[0], np.ones(len(x_agebins[0])) * sfr_l[0], 
    #               np.ones(len(x_agebins[0])) * sfr_u[0], color='gray', alpha=0.5)

    ax.set_xlim(0.0, 8.0)
    ax.spines['right'].set_visible(False)
    ax.yaxis.set_ticks_position('left')
    ax.xaxis.set_label_coords(x=1.2,y=-0.06)

    # now log axis 
    divider = make_axes_locatable(ax)
    axlog = divider.append_axes("right", size=3.0, pad=0, sharey=ax)
    axlog.set_xscale('log')

    for a in range(1, nagebins):
        axlog.plot(x_agebins[a], np.ones(len(x_agebins[a])) * sfr[a], color='mediumblue', lw=3.5)
        #axlog.fill_between(x_agebins[a], np.ones(len(x_agebins[a])) * sfr_l[a], 
        #                np.ones(len(x_agebins[a])) * sfr_u[a], color='gray', alpha=0.5)

    axlog.set_xlim(8.0, x_agebins[-1, -1] + 0.1)
    axlog.spines['left'].set_visible(False)
    axlog.yaxis.set_ticks_position('right')
    axlog.tick_params(labelright=False)

    axlog.xaxis.set_ticks(ticks=[8.0, 9.0])
    axlog.xaxis.set_ticks(ticks=[8.2, 8.4, 8.6, 8.8, 9.2, 9.4, 9.6], minor=True)

    axlog.set_xticklabels(['8', '9'])
    axlog.set_xticklabels(labels=[], minor=True)

    fig.savefig(adap_dir + 'sfh_' + field + '_' + str(galaxy_seq) + '.pdf',
        dpi=200, bbox_inches='tight')
    """

    # ---------- corner plot
    # set up corner ranges and labels
    math_parnames = [
        r'$\mathrm{log(Z_\odot)}$', r'$\mathrm{dust2}$', r'$zf_1$', r'$zf_2$',
        r'$zf_3$', r'$zf_4$', r'$zf_5$', r'$\mathrm{M_s}$',
        r'$\mathrm{f_{agn}}$', r'$\mathrm{agn_\tau}$',
        r'$\mathrm{dust_{ratio}}$', r'$\mathrm{dust_{index}}$'
    ]

    #math_parnames = [r'$\mathrm{M_s}$', r'$\mathrm{log(Z_\odot)}$',
    #r'$\mathrm{dust2}$', r'$\mathrm{t_{age}}$', r'$\mathrm{log(\tau)}$']

    # Fix labels for corner plot and
    # Figure out ranges for corner plot
    corner_range = []
    for d in range(ndim):

        # Get corner estimate and errors
        cq = corner.quantile(x=samples[:, d], q=[0.16, 0.5, 0.84])

        low_err = cq[1] - cq[0]
        up_err = cq[2] - cq[1]

        # Decide the padding for the plot range
        # depending on how large the error is relative
        # to the central estimate.
        if low_err * 2.5 >= cq[1]:
            sigma_padding_low = 1.2
        else:
            sigma_padding_low = 3.0

        if up_err * 2.5 >= cq[1]:
            sigma_padding_up = 1.2
        else:
            sigma_padding_up = 3.0

        low_lim = cq[1] - sigma_padding_low * low_err
        up_lim = cq[1] + sigma_padding_up * up_err

        corner_range.append((low_lim, up_lim))

        # Print estimate to screen
        if 'mass' in parnames[d]:
            pn = '{:.3e}'.format(cq[1])
            pnu = '{:.3e}'.format(up_err)
            pnl = '{:.3e}'.format(low_err)
        else:
            pn = '{:.3f}'.format(cq[1])
            pnu = '{:.3f}'.format(up_err)
            pnl = '{:.3f}'.format(low_err)

        print(parnames[d], ":  ", pn, "+", pnu, "-", pnl)

    # Corner plot
    cornerfig = corner.corner(samples,
                              quantiles=[0.16, 0.5, 0.84],
                              labels=math_parnames,
                              label_kwargs={"fontsize": 14},
                              range=corner_range,
                              smooth=0.5,
                              smooth1d=0.5)

    # loop over all axes *again* and set title
    # because it won't let me set the
    # format for soem titles separately
    # Looping has to be done twice because corner
    # plotting has to be done to get the figure.
    corner_axes = np.array(cornerfig.axes).reshape((ndim, ndim))

    for d in range(ndim):
        # Get corner estimate and errors
        cq = corner.quantile(x=samples[:, d], q=[0.16, 0.5, 0.84])

        low_err = cq[1] - cq[0]
        up_err = cq[2] - cq[1]

        ax_c = corner_axes[d, d]

        if 'mass' in parnames[d]:
            ax_c.set_title(math_parnames[d] + r"$ \, =\,$" + csn(cq[1], sigfigs=3) + \
            r"$\substack{+$" + csn(up_err, sigfigs=3) + r"$\\ -$" + \
            csn(low_err, sigfigs=3) + r"$}$", fontsize=11, pad=15)
        else:
            ax_c.set_title(math_parnames[d] + r"$ \, =\,$" + r"${:.3f}$".format(cq[1]) + \
            r"$\substack{+$" + r"${:.3f}$".format(up_err) + r"$\\ -$" + \
            r"${:.3f}$".format(low_err) + r"$}$", fontsize=11, pad=10)

    cornerfig.savefig(adap_dir + 'corner_' + field + '_' + \
        str(galaxy_seq) + '.pdf', dpi=200, bbox_inches='tight')

    sys.exit(0)

    # maximum a posteriori (of the locations visited by the MCMC sampler)
    pmax = np.argmax(result['lnprobability'])
    if results_type == "emcee":
        p, q = np.unravel_index(pmax, result['lnprobability'].shape)
        theta_max = result['chain'][p, q, :].copy()
    else:
        theta_max = result["chain"][pmax, :]

    #print('Optimization value: {}'.format(theta_best))
    #print('MAP value: {}'.format(theta_max))

    # make SED plot for MAP model and some random model
    # randomly chosen parameters from chain
    randint = np.random.randint
    if results_type == "emcee":
        theta = result['chain'][randint(nwalkers), randint(niter)]
    else:
        theta = result["chain"][randint(len(result["chain"]))]

    # generate models
    a = 1.0 + model.params.get('zred', 0.0)  # cosmological redshifting
    # photometric effective wavelengths
    wphot = obs["phot_wave"]
    # spectroscopic wavelengths
    if obs["wavelength"] is None:
        # *restframe* spectral wavelengths, since obs["wavelength"] is None
        wspec = sps.wavelengths
        wspec *= a  #redshift them
    else:
        wspec = obs["wavelength"]

    # sps = reader.get_sps(result)  # this works if using parameter files
    mspec, mphot, mextra = model.mean_model(theta, obs, sps=sps)
    mspec_map, mphot_map, _ = model.mean_model(theta_max, obs, sps=sps)

    # establish bounds
    xmin, xmax = np.min(wphot) * 0.8, np.max(wphot) / 0.8
    ymin, ymax = obs["maggies"].min() * 0.8, obs["maggies"].max() / 0.4

    # Make plot of data and model
    fig3 = plt.figure(figsize=(9, 4))
    ax3 = fig3.add_subplot(111)

    ax3.set_xlabel(r'$\mathrm{\lambda\ [\AA]}$', fontsize=15)
    #ax3.set_ylabel(r'$\mathrm{f_\lambda\ [erg\, s^{-1}\, cm^{-2}\, \AA^{-1}]}$', fontsize=15)
    ax3.set_ylabel(r'$\mathrm{Flux\ Density\ [maggies]}$', fontsize=15)

    ax3.loglog(wspec,
               mspec,
               label='Model spectrum (random draw)',
               lw=0.7,
               color='navy',
               alpha=0.7)
    ax3.loglog(wspec,
               mspec_map,
               label='Model spectrum (MAP)',
               lw=0.7,
               color='green',
               alpha=0.7)
    ax3.errorbar(wphot,
                 mphot,
                 label='Model photometry (random draw)',
                 marker='s',
                 markersize=10,
                 alpha=0.8,
                 ls='',
                 lw=3,
                 markerfacecolor='none',
                 markeredgecolor='blue',
                 markeredgewidth=3)
    ax3.errorbar(wphot,
                 mphot_map,
                 label='Model photometry (MAP)',
                 marker='s',
                 markersize=10,
                 alpha=0.8,
                 ls='',
                 lw=3,
                 markerfacecolor='none',
                 markeredgecolor='green',
                 markeredgewidth=3)
    ax3.errorbar(wphot,
                 obs['maggies'],
                 yerr=obs['maggies_unc'],
                 label='Observed photometry',
                 ecolor='red',
                 marker='o',
                 markersize=10,
                 ls='',
                 lw=3,
                 alpha=0.8,
                 markerfacecolor='none',
                 markeredgecolor='red',
                 markeredgewidth=3)

    # plot transmission curves
    for f in obs['filters']:
        w, t = f.wavelength.copy(), f.transmission.copy()
        t = t / t.max()
        t = 10**(0.2 * (np.log10(ymax / ymin))) * t * ymin
        ax3.loglog(w, t, lw=3, color='gray', alpha=0.7)

    ax3.set_xlim([xmin, xmax])
    ax3.set_ylim([ymin, ymax])
    ax3.legend(loc='best', fontsize=11)

    fig3.savefig(adap_dir + 'sedplot_' + field + '_' + str(galaxy_seq) +
                 '.pdf',
                 dpi=200,
                 bbox_inches='tight')

    plt.clf()
    plt.cla()
    plt.close()

    return None
コード例 #15
0
ファイル: sfh_plot.py プロジェクト: rpan04/prospector-eelgs
def post_processing(param_name, outname=None, **kwargs):
    '''
    Driver. Loads output, runs post-processing routine.
    '''
    '''  # MACHETE
    from brown_io import load_prospector_data, create_prosp_filename
    '''
    # I/O
    if outname is None:
        parmfile = model_setup.import_module_from_file(param_name)
        outname = parmfile.run_params['outfile']

    model_filename = outname[:-6] + 'odel'  # ADDED
    sample_results, powell_results, model = read_results.results_from(
        outname, model_file=model_filename, inmod=None)  # ADDED
    cornerfig = read_results.subtriangle(sample_results,
                                         start=400,
                                         thin=5,
                                         show_titles=True)  # ADDED
    plt.show(
    )  # ADDED to give idea what sfh should look like based on sfr_fraction
    '''  # MACHETE
    outfolder = os.getenv('APPS') + '/threedhst_bsfh/plots/' + outname.split('/')[-2] + '/'

    # check for output folder, create if necessary
    if not os.path.isdir(outfolder):
        os.makedirs(outfolder)

    try:
        sample_results, powell_results, model, _ = load_prospector_data(outname, hdf5=True, load_extra_output=False)
    except AttributeError:
        print
        'Failed to load chain for ' + sample_results['run_params']['objname'] + '. Returning.'
        return
    '''
    print
    'Performing post-processing on ' + sample_results['run_params']['objname']

    ### create flatchain, run post-processing
    sample_results['flatchain'] = prosp_dutils.chop_chain(
        sample_results['chain'], **sample_results['run_params'])
    sample_results['flatprob'] = prosp_dutils.chop_chain(
        sample_results['lnprobability'], **sample_results['run_params'])
    extra_output = calc_extra_quantities(sample_results, **kwargs)
    '''  # MACHETE
    ### create post-processing name, dump info
    mcmc_filename, model_filename, extra_filename = create_prosp_filename(outname)
    hickle.dump(extra_output, open(extra_filename, "w"))

    ### MAKE PLOTS HERE
    try:
        prosp_diagnostic_plots.make_all_plots(sample_results=sample_results, extra_output=extra_output,
                                              filebase=outname, outfolder=outfolder, param_name=param_name + '.py')
    except NameError:
        print
        "Unable to make plots for " + sample_results['run_params']['objname'] + " due to import error. Passing."
        pass
    '''

    ### ADDED to quickly print out SFH
    plt.plot(extra_output['extras']['t_sfh'], extra_output['bfit']['sfh'])
    plt.ylabel(r'M$_\odot$ yr$^{-1}$')
    plt.xlabel('Gyr')
    plt.show()
コード例 #16
0
ファイル: outputfast.py プロジェクト: rpan04/prospector-eelgs
def post_processing(out_file, param_file, out_incl=False, full_h5file=False):  # , **kwargs):
    """
    Driver. Loads output, runs post-processing routine.
    """

    obj = ''
    field = ''
    base = ''
    count = 0
    slash = 0
    for i in out_file:
        if i == '/':
            slash += 1
        elif i == '_':
            count += 1

        elif out_incl:
            if slash == 1 and count == 1:
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif full_h5file:
            if slash == 13 and count == 1:  # slash=12
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif count == 0:
            obj += i
        elif count == 1:
            field += i
        elif count == 2:
            base += i
        elif count == 3 and not outy:
            break
    print(field)
    git = '/home/jonathan/.conda/envs/snowflakes/lib/python2.7/site-packages/prospector/git/'
    full_base = 'pkl_tfn/' + obj + '_' + field + '_' + base  # 'pkl_efastnoem/'  # 'pkl_efico/'
    pkl = 'out.pkl'

    res, pr, mod = bread.results_from(out_file)
    print('bread')

    # create flatchain, run post-processing
    res['flatchain'] = prosp_dutils.chop_chain(res['chain'], **res['run_params'])
    res['flatprob'] = prosp_dutils.chop_chain(res['lnprobability'], **res['run_params'])
    '''
    extra_output = calc_extra_quantities(res)  # , **kwargs)
    print('extra calculated')
    extra = full_base + '_extra_' + pkl
    with open(extra, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(extra_output, newfile, pickle.HIGHEST_PROTOCOL)
    print('extra pickled')
    '''
    prob = res['lnprobability'][:, 0:]
    # PRINT TRACE SHOWING HOW ITERATIONS CONVERGE FOR EACH PARAMETER
    # tracefig, prob = bread.param_evol(res)  # print tracefig, store probability
    # plt.title(full_base)  # BUCKET just added
    # plt.savefig('img/' + full_base + '_tracefig.png', bbox_inches='tight')
    # plt.show()

    # FIND WALKER, ITERATION THAT GIVE MAX PROBABILITY
    print('max', prob.max())
    row = prob.argmax() / len(prob[0])
    col = prob.argmax() - row * len(prob[0])
    walker, iteration = row, col
    print(walker, iteration)

    # PRINT CORNERFIG CONTOURS/HISTOGRAMS FOR EACH PARAMETER
    '''
    bread.subtriangle(res, start=0, thin=5, show_titles=True)
    plt.title(full_base)  # BUCKET just added
    plt.savefig('img/' + full_base + '_cornerfig.png', bbox_inches='tight')
    # plt.show()
    '''
    # For FAST: truths=[mass, age, tau, dust2] (for 1824: [9.78, 0.25, -1., 0.00])

    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    print('sps')

    # GET MODELED SPECTRA AND PHOTOMETRY
    # These have the same shape as the obs['spectrum'] and obs['maggies'] arrays.
    spec, phot, mfrac = mod.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)
    print('spec')

    # PLOT SPECTRUM
    wave = [f.wave_effective for f in res['obs']['filters']]
    wave = np.asarray(wave)

    # CHANGING OBSERVED TO REST FRAME WAVELENGTH
    if field == 'cdfs':
        datname = '/home/jonathan/cdfs/cdfs.v1.6.11.cat'
        zname = '/home/jonathan/cdfs/cdfs.v1.6.9.awk.zout'
    elif field == 'cosmos':
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog
    elif field == 'uds':
        datname = '/home/jonathan/uds/uds.v1.5.10.cat'
        zname = '/home/jonathan/uds/uds.v1.5.8.awk.zout'

    with open(datname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(datname, comments='#', delimiter=' ', dtype=dtype)

    with open(zname, 'r') as fz:
        hdr_z = fz.readline().split()
    dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]])
    zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z)

    idx = dat['id'] == obj  # array filled: False when dat['id'] != obj, True when dat['id'] == obj
    print(obj)
    zred = zout['z_spec'][idx][0]  # z = z_spec
    if zred == -99:
        zred = zout['z_peak'][idx][0]  # if z_spec does not exist, z = z_phot
    print('redshift', zred)

    wave_rest = []  # REST FRAME WAVELENGTH
    for j in range(len(wave)):
        wave_rest.append(wave[j] / (1 + zred))  # 1 + z = l_obs / l_emit --> l_emit = l_obs / (1 + z)
    wave_rest = np.asarray(wave_rest)

    # OUTPUT SED results to files
    write_res = full_base + '_res_' + pkl  # results
    with open(write_res, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(res, newfile, pickle.HIGHEST_PROTOCOL)  # res includes res['obs']['maggies'] and ...['maggies_unc']
    write_sed = full_base + '_sed_' + pkl  # model sed
    with open(write_sed, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(phot, newfile, pickle.HIGHEST_PROTOCOL)
    write_restwave = full_base + '_restwave_' + pkl  # rest frame wavelengths
    with open(write_restwave, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(wave_rest, newfile, pickle.HIGHEST_PROTOCOL)
    write_spec = full_base + '_spec_' + pkl  # spectrum
    with open(write_spec, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(spec, newfile, pickle.HIGHEST_PROTOCOL)
    write_sps = full_base + '_spswave_' + pkl  # wavelengths that go with spectrum
    with open(write_sps, 'wb') as newfile:  # 'wb' because binary format
        try:
            wlengths = sps.wavelengths
        except AttributeError:
            wlengths = sps.csp.wavelengths
        pickle.dump(wlengths, newfile, pickle.HIGHEST_PROTOCOL)
        # pickle.dump(sps.wavelengths, newfile, pickle.HIGHEST_PROTOCOL)

    # OUTPUT CHI_SQ results to files
    chi_sq = ((res['obs']['maggies'] - phot) / res['obs']['maggies_unc']) ** 2
    write_chisq = full_base + '_chisq_' + pkl
    with open(write_chisq, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(chi_sq, newfile, pickle.HIGHEST_PROTOCOL)
    write_justchi = full_base + '_justchi_' + pkl
    with open(write_justchi, 'wb') as newfile:
        pickle.dump((res['obs']['maggies'] - phot) / res['obs']['maggies_unc'], newfile, pickle.HIGHEST_PROTOCOL)

    # PLOT CHISQ
    '''
    plt.plot(wave_rest, chi_sq, 'o', color='b')
    plt.title(str(obj) + r' $\chi^2$')
    plt.xlabel('Rest frame wavelength [angstroms]')
    plt.ylabel(r'$\chi^2$')
    plt.savefig('img/' + full_base + '_chisq.png', bbox_inches='tight')
    # plt.show()
    '''

    # HOW CONVERGED IS THE CODE?? LET'S FIND OUT!
    '''
    parnames = np.array(res['model'].theta_labels())
    fig, kl_ax = plt.subplots(1, 1, figsize=(7, 7))
    for l in xrange(parnames.shape[0]):
        kl_ax.plot(res['kl_iteration'], np.log10(res['kl_divergence'][:, l]), 'o', label=parnames[l], lw=1.5,
                   linestyle='-', alpha=0.6)
    '''
    write_klit = full_base + '_klit_' + pkl
    with open(write_klit, 'wb') as newfile:
        pickle.dump(res['kl_iteration'], newfile, pickle.HIGHEST_PROTOCOL)
    write_kldvg = full_base + '_kldvg_' + pkl
    with open(write_kldvg, 'wb') as newfile:
        pickle.dump(res['kl_divergence'], newfile, pickle.HIGHEST_PROTOCOL)
    '''
コード例 #17
0
sys.path.append(prosp_dir)
from run_prosp import build_model, build_sps

sfr_50 = []
sfr_16 = []
sfr_84 = []

print('now reading files')
galaxy_num = "{:03d}".format(galaxy)
infile = prosp_dir + '/galaxy' + str(galaxy) + '.h5'
globfiles = glob.glob(infile)

try:
    for prosp_output in glob.glob(infile):
        print(prosp_output)
        res, obs, _ = pread.results_from(prosp_output)
    pdfile = pd_dir + '/grid_physical_properties.305_galaxy' + str(
        galaxy_num) + '.npz'
    pd_data = np.load(pdfile)
    int_mass = np.sum(pd_data['grid_star_mass'])
except:
    print('file not found')

print('sps and model')
sps = build_sps()
mod = build_model()
thetas = mod.theta_labels()
#print(mod)
thetas_50 = []
thetas_16 = []
thetas_84 = []
コード例 #18
0
                                    post_burnin_center=burn_p0,
                                    post_burnin_prob=burn_prob0)

        if hfile is None:
            hfile = hfilename
        write_results.write_hdf5(hfile, run_params, model, obs, esampler, 
                                 guesses,
                                 toptimize=pdur, tsample=edur,
                                 sampling_initial_center=initial_center,
                                 post_burnin_center=burn_p0,
                                 post_burnin_prob=burn_prob0)

        #print('Finished')

        # grab results, powell results, and our corresponding models
        res, pr, mod = results_from("{}_mcmc.h5".format(outroot))
        '''
        if os.path.isfile("{}_mcmc.h5".format(outroot)):
            print('the files are IN the computer!')
            res, pr, mod = results_from("{}_mcmc.h5".format(outroot))
        else:
            tstart = time.time()  # time it

            out = fitting.run_emcee_sampler(lnprobfn, initial_center, model, postkwargs=postkwargs, initial_prob=initial_prob, pool=None, hdf5=hfile, **run_params)

            esampler, burn_p0, burn_prob0 = out
            edur = time.time() - tstart

            #sys.stdout = fout

            #print('done emcee in {0}s'.format(edur))
コード例 #19
0
dirich_timelist = np.unique(np.array([ 1.38000000e+01,  1.37000000e+01,  1.37000000e+01,  1.34688689e+01,
        1.34688689e+01,  1.32487758e+01,  1.32487758e+01,  1.28823933e+01,
        1.28823933e+01,  1.22724872e+01,  1.22724872e+01,  1.12571945e+01,
        1.12571945e+01,  9.56706658e+00,  9.56706658e+00,  6.75356055e+00,
        6.75356055e+00,  2.07, 0.]))

#sfr50 = output_file['data'][()]['SFR_50']
#print(sfr50)
#massfrac = output_file['data'][()]['Massfrac']
#mass50 = output_file['data'][()]['Mass_50']
true_mstar = output_file['data'][()]['True Mass'] / 1.989e33        



print('loading prosp results')
res, _, _ = pread.results_from(prosp_dir+'/galaxy'+str(galaxy)+'.h5')
thetas = mod.theta_labels()
print(thetas)
imax = np.argmax(res['lnprobability'])
theta_max = res["chain"][imax, :]

smass_idx = [i for i, s in enumerate(thetas) if 'massmet_1' in s][0]
smass = theta_max[smass_idx]
duste_gamma_idx = [i for i, s in enumerate(thetas) if 'duste_gamma' in s][0]
gamma = theta_max[duste_gamma_idx]#[item[duste_gamma_idx] for item in res['chain']]
duste_umin_idx = [i for i, s in enumerate(thetas) if 'duste_umin' in s][0]
umin = theta_max[duste_umin_idx]#[item[duste_umin_idx] for item in res['chain']]
#duste_qpah_idx = [i for i, s in enumerate(thetas) if 'duste_qpah' in s][0]
#pah = theta_max[duste_qpah_idx]#[item[duste_qpah_idx] for item in res['chain']]
dust2_idx = [i for i, s in enumerate(thetas) if 'dust2' in s][0]
d2 = theta_max[dust2_idx]#[item[dust2_idx] for item in res['chain']]
コード例 #20
0
ファイル: output.py プロジェクト: rpan04/prospector-eelgs
def post_processing(out_file, param_file, full_h5file=True, out_incl=False, **kwargs):
    """
    Driver. Loads output, runs post-processing routine.
    """

    # Dense, complex, terrible bookkeeping on my part here based on my naming system for prospector output files
    obj = ''
    field = ''
    base = ''
    count = 0
    slash = 0
    for i in kwargs['outname']:
        if i == '/':
            slash += 1
        elif i == '_':
            count += 1

        elif out_incl:
            if slash == 2 and count == 1:
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif full_h5file:
            if slash == 13 and count == 1:
                obj += i
            elif count == 2:
                field += i
            elif count == 3:
                base += i
            elif count == 4:
                break

        elif count == 0:
            obj += i
        elif count == 1:
            field += i
        elif count == 2:
            base += i
        elif count == 3 and not outy:
            break

    print(obj, field)
    full_base = obj + '_' + field + '_' + base
    img_base = 'img/' + full_base  # /home/jonathan/img' +
    print(full_base, img_base)
    pkl = 'out.pkl'

    res, pr, mod = bread.results_from(out_file)
    print('bread')

    # create flatchain, run post-processing!
    res['flatchain'] = prosp_dutils.chop_chain(res['chain'], **res['run_params'])
    res['flatprob'] = prosp_dutils.chop_chain(res['lnprobability'], **res['run_params'])
    extra_output = calc_extra_quantities(res, **kwargs)
    print('extra calculated')
    print(base)

    # choose correct folder where .h5 file is stored based on param file name
    if base == 'corr':
        folder = 'pkl_ecorr/'  # 'pkl_ncorr/'
    elif base == 'fico':
        folder = 'pkl_efico/'  # 'pkl_nfico/'
    elif base == 'masstest':
        folder = 'pkl_masstest/'
    elif base == 'tt':
        folder = 'pkl_tt/'
    else:
        folder = 'pkl_simsfh/'

    '''
    if base == 'thirty':
        folder = 'etpkls/'
    elif base == 'nth':
        folder = 'ntpkls/'
    elif base == 'fixedmet':
        folder = 'pkls/'
    elif base == 'otherbins':
        folder = 'opkls/'
    elif base == 'noelg':
        folder = 'nmpkls/'
    elif base == 'nother':
        folder = 'nopkls/'
    elif base == 'vary':
        folder = 'ecorr_pkl/'
        # folder = 'evar_pkl/'
    elif base == 'noneb' or 'evarnoneb':
        folder = 'nonebpkls/'
    elif base == 'efifty2':
        folder = 'efifty2_pkls/'
    elif base == 'evar2':
        folder = 'evar2_pkls/'
    elif base == 'masstest':
        folder = 'pkl_masstest'
    '''

    # folder = 'evar2_pkls/'  # 'efifty2_pkls/'
    # pkl extra output!
    extra = folder + full_base + '_extra_' + pkl  # full_base + '_extra_' + pkl
    print(extra)
    with open(extra, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(extra_output, newfile, pickle.HIGHEST_PROTOCOL)
    print('extra pickled')
    
    # PRINT TRACE SHOWING HOW ITERATIONS PROGRESS FOR EACH PARAMETER
    # I edited param_evol to also store lnprob, but this is a silly and long-obsolete way of doing this
    tracefig = bread.param_evol(res)  # prints tracefig
    plt.title(full_base)  # BUCKET just added
    # plt.savefig(img_base + '_tracefig.png', bbox_inches='tight')
    # plt.show()

    # FIND WALKER, ITERATION THAT GIVE MAX PROBABILITY
    # a result of the silly, long-obsolete way I'm grabbing lnprob above
    prob = res['lnprobability'][..., 0:]
    print('max', prob.max())
    row = prob.argmax() / len(prob[0])
    col = prob.argmax() - row * len(prob[0])
    walker, iteration = row, col
    print(walker, iteration)

    # PRINT CORNERFIG CONTOURS/HISTOGRAMS FOR EACH PARAMETER
    bread.subtriangle(res, start=-1000, thin=5, show_titles=True)
    plt.title(full_base)  # BUCKET just added
    # plt.savefig(img_base + '_cornerfig.png', bbox_inches='tight')
    # plt.show()
    # For FAST: truths=[mass, age, tau, dust2] (for 1824: [9.78, 0.25, -1., 0.00])

    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    print('sps')

    # GET MODELED SPECTRA AND PHOTOMETRY
    # These have the same shape as the obs['spectrum'] and obs['maggies'] arrays.
    spec, phot, mfrac = mod.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)
    print('spec')

    # PLOT SPECTRUM
    wave = [f.wave_effective for f in res['obs']['filters']]
    wave = np.asarray(wave)

    # CHANGING OBSERVED TO REST FRAME WAVELENGTH
    # grabbing data from catalogs
    if field == 'cdfs':
        datname = '/home/jonathan/cdfs/cdfs.v1.6.11.cat'
        zname = '/home/jonathan/cdfs/cdfs.v1.6.9.awk.zout'
    elif field == 'cosmos':
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog
    elif field == 'uds':
        datname = '/home/jonathan/uds/uds.v1.5.10.cat'
        zname = '/home/jonathan/uds/uds.v1.5.8.awk.zout'
    elif field == 'sim':  # hacking for now
        datname = '/home/jonathan/cosmos/cosmos.v1.3.8.cat'  # main catalog
        zname = '/home/jonathan/cosmos/cosmos.v1.3.6.awk.zout'  # redshift catalog

    # photometry catalog
    with open(datname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(datname, comments='#', delimiter=' ', dtype=dtype)

    # redshift catalog
    with open(zname, 'r') as fz:
        hdr_z = fz.readline().split()
    dtype_z = np.dtype([(hdr_z[1], 'S20')] + [(n, np.float) for n in hdr_z[2:]])
    zout = np.loadtxt(zname, comments='#', delimiter=' ', dtype=dtype_z)

    # if z_spec exists, use it; else use best-fit z_phot
    idx = dat['id'] == obj  # array filled with: False for all dat['id'] != obj, True for dat['id'] == obj
    zred = zout['z_spec'][idx][0]
    if zred == -99:
        zred = zout['z_peak'][idx][0]
    print('redshift', zred)

    # convert stored wavelengths to rest frame wavelengths
    wave_rest = []
    for j in range(len(wave)):
        wave_rest.append(wave[j]/(1 + zred))  # 1 + z = l_obs / l_emit --> l_emit = l_obs / (1 + z)
    wave_rest = np.asarray(wave_rest)

    # OUTPUT SED results to pkls
    full_base = folder + full_base
    write_res = full_base + '_res_' + pkl  # results
    with open(write_res, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(res, newfile, pickle.HIGHEST_PROTOCOL)  # res includes res['obs']['maggies'], ['maggies_unc'], etc.
    write_sed = full_base + '_sed_' + pkl  # model sed
    with open(write_sed, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(phot, newfile, pickle.HIGHEST_PROTOCOL)
    write_restwave = full_base + '_restwave_' + pkl  # rest frame wavelengths
    with open(write_restwave, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(wave_rest, newfile, pickle.HIGHEST_PROTOCOL)
    write_spec = full_base + '_spec_' + pkl  # spectrum
    with open(write_spec, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(spec, newfile, pickle.HIGHEST_PROTOCOL)
    write_sps = full_base + '_spswave_' + pkl  # wavelengths that go with spectrum
    with open(write_sps, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(sps.wavelengths, newfile, pickle.HIGHEST_PROTOCOL)

    # OUTPUT chi_sq results to pkls
    chi_sq = ((res['obs']['maggies'] - phot) / res['obs']['maggies_unc']) ** 2
    write_chisq = full_base + '_chisq_' + pkl
    with open(write_chisq, 'wb') as newfile:  # 'wb' because binary format
        pickle.dump(chi_sq, newfile, pickle.HIGHEST_PROTOCOL)
    write_justchi = full_base + '_justchi_' + pkl
    with open(write_justchi, 'wb') as newfile:
        pickle.dump((res['obs']['maggies'] - phot) / res['obs']['maggies_unc'], newfile, pickle.HIGHEST_PROTOCOL)

    # PLOT chi_sq
    plt.plot(wave_rest, chi_sq, 'o', color='b')
    plt.title(str(obj) + r' $\chi^2$')
    plt.xlabel('Rest frame wavelength [angstroms]')
    plt.ylabel(r'$\chi^2$')
    # plt.savefig(img_base + '_chisq.png', bbox_inches='tight')
    # plt.show()

    # HOW CONVERGED IS THE CODE?? LET'S FIND OUT! --> kl test
    parnames = np.array(res['model'].theta_labels())
    fig, kl_ax = plt.subplots(1, 1, figsize=(7, 7))
    for l in xrange(parnames.shape[0]):
        kl_ax.plot(res['kl_iteration'], np.log10(res['kl_divergence'][:, l]), 'o', label=parnames[l], lw=1.5,
                   linestyle='-', alpha=0.6)

    # OUTPUT kl test to pkls
    write_klit = full_base + '_klit_' + pkl
    with open(write_klit, 'wb') as newfile:
        pickle.dump(res['kl_iteration'], newfile, pickle.HIGHEST_PROTOCOL)
    write_kldvg = full_base + '_kldvg_' + pkl
    with open(write_kldvg, 'wb') as newfile:
        pickle.dump(res['kl_divergence'], newfile, pickle.HIGHEST_PROTOCOL)

    kl_ax.set_ylabel('log(KL divergence)')
    kl_ax.set_xlabel('iteration')
    # kl_ax.set_xlim(0, nsteps*1.1)
    kl_div_lim = res['run_params'].get('convergence_kl_threshold', 0.018)
    kl_ax.axhline(np.log10(kl_div_lim), linestyle='--', color='red', lw=2, zorder=1)
    kl_ax.legend(prop={'size': 5}, ncol=2, numpoints=1, markerscale=0.7)
    plt.title(str(obj) + ' kl')
コード例 #21
0
ファイル: ngc5322-sedfit.py プロジェクト: geordie666/SGA
def main():
    """
    Main wrapper script.

    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--priors',
                        default='delayed-tau',
                        type=str,
                        choices=['delayed-tau', 'bursty'],
                        help='Choose the model priors.')
    parser.add_argument('--prefix',
                        default='ngc5322',
                        type=str,
                        help='Output file prefix.')
    parser.add_argument('--seed',
                        default=1,
                        type=int,
                        help='Seed for random number generation.')
    parser.add_argument('--nproc',
                        default=1,
                        type=int,
                        help='Number of cores to use.')
    parser.add_argument('--sedfit',
                        action='store_true',
                        help='Do the SED fit.')
    parser.add_argument('--qaplots',
                        action='store_true',
                        help='Make pretty plots.')
    parser.add_argument('--verbose', action='store_true', help='Be verbose.')
    args = parser.parse_args()

    ngc5322dir = os.path.join(os.getenv('LSLGA_DIR'), 'science', 'proposals',
                              'nasa-adap-2019')
    hfile = os.path.join(ngc5322dir, '{}-{}.h5'.format(args.prefix,
                                                       args.priors))

    if args.sedfit:
        import prospect.io
        import prospect.fitting

        # Initialize the SPS library (takes a bit), the photometry, the "run
        # parameters" dictionary, and the model priors.
        sps = load_sps(verbose=args.verbose)
        obs, rp = load_obs(seed=args.seed,
                           nproc=args.nproc,
                           verbose=args.verbose,
                           sps=sps)
        model = load_model(obs, args.priors, verbose=args.verbose)

        #with multiprocessing.Pool(args.nproc) as P:
        P = None
        output = prospect.fitting.fit_model(
            obs,
            model,
            sps,
            noise=(None, None),
            #optimize=True, dynesty=False, emcee=False,
            optimize=False,
            dynesty=True,
            emcee=False,
            #nested_posterior_thresh=0.05,
            pool=P,
            **rp)

        if os.path.isfile(hfile):
            os.remove(hfile)
        print('Writing {}'.format(hfile))
        prospect.io.write_results.write_hdf5(
            hfile,
            rp,
            model,
            obs,
            output['sampling'][0],
            output['optimization'][0],
            tsample=output['sampling'][1],
            toptimize=output['optimization'][1])

    if args.qaplots:
        from prospect.io import read_results as reader

        print('Reading {}...'.format(hfile), end='')
        t0 = time.time()
        result, obs, _ = reader.results_from(hfile, dangerous=False)
        print('...took {:.2f} sec'.format(time.time() - t0))

        sps = load_sps(verbose=args.verbose)
        model = load_model(obs, args.priors, verbose=args.verbose)

        ##################################################
        # P(M, SFR)
        print('Hack!')
        png = os.path.join(ngc5322dir,
                           '{}-{}-pofm.png'.format(args.prefix, args.priors))
        chain = result['chain']
        lnprobability = result['lnprobability']

        # infer the SFR
        sfr = np.zeros_like(lnprobability)
        for ii in np.arange(len(lnprobability)):
            _, _, _ = model.mean_model(chain[ii, :], obs=obs, sps=sps)
            sfr[ii] = sps.ssp.sfr * model.params['mass']

        pdb.set_trace()

        #ax.set_xlabel(r'$\log_{10}({\rm Stellar\ Mass})\ (\mathcal{M}_{\odot})$')
        #ax.set_ylabel(r'Marginalized Posterior Probability')

        fig, ax = plt.subplots(figsize=(8, 6))
        ax.hist(chain[:, 4],
                bins=50,
                histtype='step',
                linewidth=2,
                edgecolor='k',
                fill=True)
        ax.set_xlim(11, 11.8)
        ax.set_yticklabels([])
        ax.set_xlabel(r'$\log_{10}(\mathcal{M}/\mathcal{M}_{\odot})$')
        ax.set_ylabel(r'$P(\mathcal{M})$')
        ax.xaxis.set_major_locator(MultipleLocator(0.2))
        #for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
        #         ax.get_xticklabels() + ax.get_yticklabels()):
        #    item.set_fontsize(22)
        print('Writing {}'.format(png))
        plt.subplots_adjust(left=0.1, right=0.95, bottom=0.18, top=0.95)
        fig.savefig(png)

        pdb.set_trace()

        ##################################################
        # SED.
        png = os.path.join(ngc5322dir,
                           '{}-{}-sed.png'.format(args.prefix, args.priors))
        bestfit_sed(obs,
                    chain=result['chain'],
                    lnprobability=result['lnprobability'],
                    sps=sps,
                    model=model,
                    seed=1,
                    nrand=100,
                    png=png)

        pdb.set_trace()

        # Corner plot.
        png = os.path.join(ngc5322dir,
                           '{}-{}-corner.png'.format(args.prefix, args.priors))
        subtriangle(result,
                    showpars=['logmass', 'sfr', 'tau', 'dust2', 'dust_ratio'],
                    logify=['tau'],
                    png=png)

        pdb.set_trace()

        #reader.subcorner(result, start=0, thin=1, fig=plt.subplots(5,5,figsize=(27,27))[0])

        pdb.set_trace()
コード例 #22
0
ファイル: fit_host.py プロジェクト: muryelgp/TDEpy
def run_prospector(tde_name, path, z, withmpi, n_cores, gal_ebv, show_figs=True, init_theta=None, n_walkers=None,
                   n_inter=None, n_burn=None, read_only=False):
    os.chdir(os.path.join(path, tde_name, 'host'))

    if init_theta is None:
        init_theta = [1e10, -1.0, 6, 0.5]

    if n_walkers is None:
        n_walkers = 100

    if n_inter is None:
        n_inter = 1000

    if n_burn is None:
        n_burn = [500]

    obs, sps, model, run_params = configure(tde_name, path, z, init_theta, n_walkers, n_inter, n_burn, gal_ebv)

    if not withmpi:
        n_cores = 1

    if not read_only:
        print("Initial guess: {}".format(model.initial_theta))
        print('Sampling the the SPS grid..')
        if withmpi & ('logzsol' in model.free_params):
            dummy_obs = dict(filters=None, wavelength=None)

            logzsol_prior = model.config_dict["logzsol"]['prior']
            lo, hi = logzsol_prior.range
            logzsol_grid = np.around(np.arange(lo, hi, step=0.1), decimals=2)
            sps.update(**model.params)  # make sure we are caching the correct IMF / SFH / etc
            for logzsol in logzsol_grid:
                model.params["logzsol"] = np.array([logzsol])
                _ = model.predict(model.theta, obs=dummy_obs, sps=sps)
        print('Done')
        from functools import partial
        lnprobfn_fixed = partial(lnprobfn, sps=sps)
        print('Starting posterior emcee sampling..')
        if withmpi:
            from multiprocessing import Pool
            from multiprocessing import cpu_count

            with Pool(int(n_cores)) as pool:
                nprocs = n_cores
                output = fit_model(obs, model, sps, pool=pool, queue_size=nprocs, lnprobfn=lnprobfn_fixed,
                                   **run_params)
        else:
            output = fit_model(obs, model, sps, lnprobfn=lnprobfn_fixed, **run_params)

        # output = fit_model(obs, model, sps, lnprobfn=lnprobfn, **run_params)
        print('done emcee in {0}s'.format(output["sampling"][1]))

        if os.path.exists("prospector_result.h5"):
            os.system('rm prospector_result.h5')

        hfile = "prospector_result.h5"
        writer.write_hdf5(hfile, run_params, model, obs,
                          output["sampling"][0], output["optimization"][0],
                          tsample=output["sampling"][1],
                          toptimize=output["optimization"][1])

        print('Finished')

    # Loading results file
    result, _, _ = reader.results_from("prospector_result.h5", dangerous=False)

    # Finding the Maximum A Posteriori (MAP) model
    imax = np.argmax(result['lnprobability'])
    i, j = np.unravel_index(imax, result['lnprobability'].shape)
    theta_max = result['chain'][i, j, :].copy()

    # saving results
    save_results(result, model, obs, sps, theta_max, tde_name, path, n_walkers, n_inter, n_burn, n_cores)

    print('MAP value: {}'.format(theta_max))
    fit_plot = plot_resulting_fit(tde_name, path)

    try:
        os.mkdir(os.path.join(path, tde_name, 'plots'))
    except:
        pass

    fit_plot.savefig(os.path.join(path, tde_name, 'plots', tde_name + '_host_fit.png'), bbox_inches='tight', dpi=300)
    if show_figs:
        plt.show()
    corner_fig = corner_plot(result)
    corner_fig.savefig(os.path.join(path, tde_name, 'plots', tde_name + '_cornerplot.png'), bbox_inches='tight',
                       dpi=300)
    if show_figs:
        plt.show()
    os.chdir(path)
コード例 #23
0
                estTaus[i, k] = float(
                    lines[j].split('tau: ')[1]) * 1e9  # convert from Gyr to yr
        textFile.close()

        # Calculate Prospector attenuation curves
        # Take bestfit parameters from dust fit, use only
        # non-dust related parameters with nodust model
        # to give dust-free spectrum
        if attBool:
            nodust_path = '/scratch/ntf229/RT_fit/projects/' + galaxies[
                i] + '/maxLevel13/wavelengths601/numPhotons1e9/niter4096/inc0/nodust/' + phot[
                    k] + '/walkers512/Prospector_files/fit.h5'
            dust_path = '/scratch/ntf229/RT_fit/projects/' + galaxies[
                i] + '/maxLevel13/wavelengths601/numPhotons1e9/niter4096/inc0/dust/dustFraction0.2/maxTemp8000/' + phot[
                    k] + '/walkers512/Prospector_files/fit.h5'
            nodust_res, nodust_obs, nodust_model = reader.results_from(
                nodust_path)
            nodust_sps = reader.get_sps(nodust_res)
            dust_res, dust_obs, dust_model = reader.results_from(dust_path)
            dust_sps = reader.get_sps(dust_res)

            # generate fake obs to get full resolution spectra
            nodust_fake_obs = nodust_obs.copy()
            nodust_fake_obs['spectrum'] = None
            nodust_fake_obs['wavelength'] = None
            nodust_theta_max = [
                estMasses[i, k], estLogzsol[i, k], estAges[i, k], estTaus[i, k]
            ]  # non-dust related bestfit parameters from dust fit
            nodust_full_spec = nodust_model.predict(nodust_theta_max,
                                                    obs=nodust_fake_obs,
                                                    sps=nodust_sps)[0]
コード例 #24
0
ファイル: prospector_restart.py プロジェクト: bd-j/prospector
from prospect.io import read_results as pr
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log, chi_spec, chi_phot


# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'restart_from': '', 'niter': 1024}
clargs = model_setup.parse_args(sargv, argdict=argdict)

# ----------
# Result object and Globals
# ----------
result, global_obs, global_model = pr.results_from(clargs["restart_from"])
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
assert is_emcee, "Result file does not have a chain of the proper shape."

# SPS Model instance (with libraries check)
sps = pr.get_sps(result)
run_params = result["run_params"]
run_params.update(clargs)

# Noise model (this should be doable via read_results)
from prospect.models.model_setup import import_module_from_string
param_file = (result['run_params'].get('param_file', ''),
              result.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
コード例 #25
0
from scipy import stats
from scipy.interpolate import griddata
from scipy.stats import kde
import argparse
import os

#currentPath = os.path.dirname(__file__)

parser = argparse.ArgumentParser()
parser.add_argument("--filename")
parser.add_argument("--path")
args = parser.parse_args()

filePath = '{0}/Prospector_files/{1}'.format(args.path, args.filename)

res, obs, model = reader.results_from(filePath)

best = res["bestfit"]

# Maximum posterior probability sample
imax = np.argmax(res['lnprobability'])
csz = res["chain"].shape

#print('prob shape', res['lnprobability'].shape)

i, j = np.unravel_index(imax, res['lnprobability'].shape)
theta_max = res['chain'][i, j, :].copy()
flatchain = res["chain"].reshape(csz[0] * csz[1], csz[2])
flatprob = res['lnprobability'].reshape(csz[0] * csz[1])

# flatchain[:,0] is all mass parameters
コード例 #26
0
import corner

import prospect.models.transforms as pt
import prospect.io.read_results as reader

home = os.getenv('HOME')
adap_dir = home + '/Documents/adap2021/'

field = 'North'
galaxy_seq = 27438
obj_z = 1.557
galaxy_age = 10**4.06

results_type = "dynesty" 
result, obs, _ = reader.results_from(adap_dir + results_type + "_" + \
                 field + "_" + str(galaxy_seq) + ".h5", dangerous=False)

# ----------- non param
nagebins = 6
agebins = np.array([[ 0.        ,  8.        ],
                    [ 8.        ,  8.47712125],
                    [ 8.47712125,  9.        ],
                    [ 9.        ,  9.47712125],
                    [ 9.47712125,  9.77815125],
                    [ 9.77815125, 10.13353891]])

samples = result['chain']

# Get the zfractions from corner quantiles
zf1 = corner.quantile(samples[:, 2], q=[0.16, 0.5, 0.84])
zf2 = corner.quantile(samples[:, 3], q=[0.16, 0.5, 0.84])
コード例 #27
0
import numpy as np
from glob import glob
import tqdm
import sys
from corner import quantile

prosp_dir = '/ufrc/narayanan/s.lower/simSEDs/simbam25n512_newfof/prod_runs/dirichlet/old/'

sys.path.append(prosp_dir)
from run_prosp import build_model, build_sps

sps = build_sps()
mod = build_model()

files_ = glob(prosp_dir + '/galaxy*.h5')

mass_sigma = []
mass_chains = []
for galaxy in tqdm.tqdm(files_):
    res, obs, _ = pread.results_from(galaxy)
    thetas = mod.theta_labels()

    mass = [item[thetas.index('massmet_1')] for item in res['chain']]
    mass_chains.append(mass)
    mass_std = np.std(mass)
    mass_sigma.append(mass_std)

np.savez(prosp_dir + 'mass_posterior_widths.npz',
         sigma=mass_sigma,
         posterior=mass_chains)
コード例 #28
0
clargs = {'param_file': paramfile}
run_params = model_setup.get_run_params(argv=paramfile, **clargs)
print(run_params)

obs = model_setup.load_obs(**run_params)
sps = model_setup.load_sps(**run_params)
model = model_setup.load_model(**run_params)

wspec = sps.csp.wavelengths  # *restframe* spectral wavelengths
a = 1.0 + model.params.get('zred', 0.6)  # cosmological redshifting
wphot = np.array([f.wave_effective for f in obs['filters']])

# In [21]
# grab results, powell results, and our corresponding models
#res, pr, mod = results_from("{}_mcmc.h5".format(outroot))
res, pr, mod = results_from("demo_galphot_1508433060_mcmc.h5")

# In [22]
# To see how our MCMC samples look, we can examine a few traces
choice = np.random.choice
tracefig = pread.param_evol(res,
                            figsize=(20, 10),
                            chains=choice(128, size=10, replace=False))

# In [23]
# Show samples in a triangle or corner plot
#theta_truth = np.array([pr[i]
#                        for i in ['mass','logzsol','tau','tage','dust2']])
theta_truth = pr[0]['x']
theta_truth[0] = np.log10(theta_truth[0])
#cornerfig = pread.subtriangle(res, start=0, thin=5, truths=theta_truth, showpars='mass')#, fig=plt.subplots(5,5,figsize=(27,27))[0])
コード例 #29
0
ファイル: emlines.py プロジェクト: rpan04/prospector-eelgs
def ems(param_file, out_file, objname='21442', field='cdfs', enames=None):
    res, pr, model = bread.results_from(out_file)

    # get lnprob, based on bread.param_evol()
    chain = res['chain'][..., 0:, :]
    lnprob = res['lnprobability'][..., 0:]
    # deal with single chain (i.e. nested sampling) results
    if len(chain.shape) == 2:
        lnprob = lnprob[None, ...]
    # tracefig, prob = bread.param_evol(res)  # store probability
    # plt.show()
    print('max', lnprob.max())
    row = lnprob.argmax() / len(lnprob[0])
    col = lnprob.argmax() - row * len(lnprob[0])
    walker, iteration = row, col
    print(walker, iteration)

    # Get emission lines!
    # We need the correct sps object to generate models
    sargv = sys.argv
    argdict = {'param_file': param_file}
    clargs = model_setup.parse_args(sargv, argdict=argdict)
    run_params = model_setup.get_run_params(argv=sargv, **clargs)
    sps = model_setup.load_sps(**run_params)
    print('sps')
    # spec, mags, sm = model.mean_model(res['chain'][walker, iteration, :], obs=res['obs'], sps=sps)  # spec [maggies/Hz]
    print('mean model')
    w = sps.wavelengths

    ### save redshift, lumdist
    z = model.params.get('zred', np.array(0.0))
    lumdist = model.params.get('lumdist', np.array(0.0))
    nebinspec = model.params.get('nebemlineinspec', True)
    model.params['zred'] = np.array(0.0)
    if lumdist:
        model.params['lumdist'] = np.array(1e-5)
    if nebinspec == False:
        model.params['nebemlineinspec'] = True

    ### if we want restframe optical photometry, generate fake obs file
    ### else generate NO obs file (don't do extra filter convolutions if not necessary)
    obs = {'filters': [], 'wavelength': None}
    ### calculate SED. comes out as maggies per Hz, @ 10pc
    spec, mags, sm = model.mean_model(res['chain'][walker, iteration, :],
                                      obs=obs,
                                      sps=sps)  # maggies/Hz at 10pc
    w = sps.wavelengths

    ### reset model
    model.params['zred'] = z
    if lumdist:
        model.params['lumdist'] = lumdist
    if nebinspec == False:
        model.params['nebemlineinspec'] = False

    spec *= dfactor_10pc / constants.L_sun.cgs.value * to_ergs  # spec * cm^2 * (s/erg) * erg/maggies = s*cm^2 / Hz
    # note erg = [g cm^2 / s^2]
    # according to measure_restframe_properties(), above line converts to Lsun / Hz
    to_flam = 3e18 / w**2  # for f_nu in erg s^-1 Hz^-1 cm^-2: to_flam [(Ang / s^-1) * (1 / Ang^2)]
    spec_flam = spec * to_flam  # erg cm^-2 s^-1 ang^-1

    smooth_spec = smooth_spectrum(w, spec_flam, 250.0, minlam=3e3, maxlam=7e3)

    ### load fsps emission line list
    loc = os.getenv('SPS_HOME') + '/data/emlines_info.dat'
    dat = np.loadtxt(loc,
                     delimiter=',',
                     dtype={
                         'names': ('lam', 'name'),
                         'formats': ('f16', 'S40')
                     })
    print('env')

    print(type(enames))
    ### define emission lines
    # legacy code compatible
    if type(enames) == bool:
        lines = np.array(
            ['Hdelta', 'Hbeta', '[OIII]1', '[OIII]2', 'Halpha', '[NII]'])
        fsps_name = np.array([
            'H delta 4102', 'H beta 4861', '[OIII]4960', '[OIII]5007',
            'H alpha 6563', '[NII]6585'
        ])
    else:
        lines = enames
        fsps_name = enames

    print(lines, enames)  # None, None
    # lines = np.array(['Hdelta', 'Hbeta', '[OIII]1', '[OIII]2', 'Halpha', '[NII]'])
    # fsps_name = np.array(['H delta 4102', 'H beta 4861', '[OIII]4960', '[OIII]5007', 'H alpha 6563', '[NII]6585'])
    lines = np.array([
        '[OII]1', '[OII]2', 'Hdelta', 'Hbeta', '[OIII]1', '[OIII]2', 'Halpha',
        '[NII]'
    ])
    fsps_name = np.array([
        '[OII]3726', '[OII]3729', 'H delta 4102', 'H beta 4861', '[OIII]4960',
        '[OIII]5007', 'H alpha 6563', '[NII]6585'
    ])
    ##### measure emission line flux + EQW
    out = {}
    # print(1, sps.emline_wavelengths)  # big long array, 900 to 6*1e6
    for jj in xrange(len(lines)):

        # if we don't do nebular emission, zero this out
        if not hasattr(sps, 'get_nebline_luminosity'):
            print('hi')
            out[lines[jj]] = {'flux': 0.0, 'eqw': 0.0}
            continue

        ### calculate luminosity (in Lsun)
        idx = fsps_name[jj] == dat['name']
        # L_so = 3.84*10**33 erg/s
        print(sps.params['mass'].sum())
        eflux = float(sps.get_nebline_luminosity[idx] *
                      sps.params['mass'].sum())  # L_sun
        elam = float(sps.emline_wavelengths[idx])  # Angstroms!
        print(sps.get_nebline_luminosity[idx])
        print(eflux, 'e luminosity')  # typically 10**40 to 10**42
        print(elam, 'elam')  # Angstroms

        # simple continuum estimation
        tidx = np.abs(
            sps.wavelengths - elam
        ) < 100  # True for sps.wavelengths within 100 Angstroms of elam wavelengths
        eqw = eflux / np.median(
            smooth_spec[tidx]
        )  # (erg/s) / (erg cm^-2 s^-1 Ang^-1) = Ang * cm**2

        eflux *= 3.84 * 10**33  # erg/s (Lum, not flux)

        out[lines[jj]] = {'flux': eflux, 'eqw': eqw}

    return out, fsps_name
コード例 #30
0
                                                  str(galaxy))
 
 
 
 table = Table.read(photometry)
 vorbins = table['vorbin']
 
 logmasses = []
 for vorbin in vorbins :
     if table['use'][vorbin] :
         h5_file = '{}{}/{}_ID_{}_BIN_{}_dynesty.h5'.format(inDir,
                                                            str(galaxy),
                                                            cluster,
                                                            str(galaxy),
                                                            str(vorbin))
         result, obs, model = reader.results_from(h5_file, dangerous=False)
         imax = np.argmax(result['lnprobability'])
         logmass = result['chain'][imax, :][2]
         logmasses.append(logmass)
     else :
         logmasses.append(np.nan)
 
 table['logmass'] = np.array(logmasses)
 
 final = '{}{}_ID_{}_results.fits'.format(outDir, cluster, str(galaxy))
 table.write(final)
 
 '''
 import glob
 
 h5_files = '{}{}/{}_ID_{}_BIN_*_dynesty.h5'.format(inDir, str(galaxy),
コード例 #31
0
import numpy as np
import matplotlib.pyplot as plt
import prospect.io.read_results as reader

results_type = 'dynesty'  # 'emcee' or 'dynesty'
verbose = True
plots = True

# get the results and observation dictionaries
result, obs, _ = reader.results_from('demo_params_{}.h5'.format(results_type),
                                     dangerous=True)

# also get the sps and model objects. This works if using a parameter file with
# `build_*` methods
sps = reader.get_sps(result)
model = reader.get_model(result)

# print the contents of the `results` dictionary
if verbose:
    print(result.keys())

# STEP 1: invetigate the parameter traces
if plots:
    if results_type == 'emcee':
        chosen = np.random.choice(result['run_params']['nwalkers'],
                                  size=10,
                                  replace=False)
        tracefig = reader.traceplot(result, figsize=(16, 9), chains=chosen)
    else:
        tracefig = reader.traceplot(result, figsize=(16, 9))