예제 #1
0
파일: starfitter.py 프로젝트: bd-j/hrdspy
 def load_models(self):
     """Load the BaSeL basis models, initialize the grid to hold resampled models,
     and load the filters"""
     self.basel = starmodel.BaSeL3()  # BaSeL 3.1 Basis
     self.basel.read_all_Z()
     self.stargrid = starmodel.SpecLibrary() #object to hold the model grid
     self.model_filterlist = observate.load_filters(self.rp['model_fnamelist']) #filter objects
     self.fit_filterlist = observate.load_filters(self.rp['fit_fnamelist'])
예제 #2
0
def load_obs(**extras):
    """the key word will be "uvj_key"
    generate UVJ fluxes based off of this
    """

    # first load the filters
    fnames = ['bessell_U','bessell_V','twomass_J']

    # generate uvj color
    uv, vj = return_uvj(int(extras['uvj_key']))

    # translate to fluxes
    # do this by fixing V-flux in maggies
    # this corresponds roughly to M~1e10
    vflux = 5e7
    maggies = np.array([10**(-uv/2.5), 1, 10**(vj/2.5)]) * vflux
    maggies_unc = maggies / 40.

    ### build output dictionary
    obs = {}
    obs['filters'] = observate.load_filters(fnames)
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = np.ones_like(maggies,dtype=bool)
    obs['maggies'] = maggies
    obs['maggies_unc'] =  maggies_unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False
    obs['uv'] = uv
    obs['vj'] = vj

    return obs
예제 #3
0
파일: test_photoz.py 프로젝트: ixkael/gasp
def test_madau():

    filternames = ["decam_u", "decam_g"]
    filterdir = "data/filters"
    filter_list = observate.load_filters(filternames, directory=filterdir)

    lambda_aa, f_lambda_aa = load_test_sed()
    speedoflight = 3e18
    f_nu_aa = f_lambda_aa * lambda_aa**2 / speedoflight

    redshift_grid = np.linspace(0.0, 4, 4)[1:]

    redshifted_fluxes1, redshifted_fluxesb1, redshift_factor = get_redshifted_photometry(
        lambda_aa,
        f_lambda_aa,
        redshift_grid,
        filter_list,
        apply_madau_igm=False)
    redshifted_fluxes2, redshifted_fluxesb2, redshift_factor = get_redshifted_photometry(
        lambda_aa,
        f_lambda_aa,
        redshift_grid,
        filter_list,
        apply_madau_igm=True)

    assert np.any(redshifted_fluxes1 != redshifted_fluxes2)
    assert np.any(redshifted_fluxesb1 != redshifted_fluxesb2)
예제 #4
0
파일: plotsed.py 프로젝트: moustakas/sedpy
def test():
    from sedpy import observate
    import fsps
    import matplotlib.pyplot as pl

    filters = [
        'galex_NUV', 'sdss_u0', 'sdss_r0', 'sdss_r0', 'sdss_i0', 'sdss_z0',
        'bessell_U', 'bessell_B', 'bessell_V', 'bessell_R', 'bessell_I',
        'twomass_J', 'twomass_H'
    ]
    flist = observate.load_filters(filters)

    sps = fsps.StellarPopulation(compute_vega_mags=False)
    wave, spec = sps.get_spectrum(tage=1.0, zmet=2, peraa=True)

    sed = observate.getSED(wave, spec, flist)
    sed_unc = np.abs(np.random.normal(1, 0.3, len(sed)))

    wgrid = np.linspace(2e3, 13e3, 1000)
    fgrid = np.linspace(-13, -9, 100)
    psed, sedpoints = sed_to_psed(flist, sed, sed_unc, wgrid, fgrid)

    pl.imshow(np.exp(psed).T,
              cmap='Greys_r',
              interpolation='nearest',
              origin='upper',
              aspect='auto')
예제 #5
0
def selftest():
    from sedpy.observate import load_filters
    sps = sps_basis.StellarPopBasis(debug=True)
    params = {}
    params['tage'] = np.array([1, 2, 3, 4.])
    params['zmet'] = np.array([-0.5, 0.0])
    ntot = len(params['tage']) * len(params['zmet'])
    params['mass'] = np.random.uniform(0, 1, ntot)
    params['sigma_smooth'] = 100.
    outwave = sps.ssp.wavelengths
    flist = ['sdss_u0', 'sdss_r0']
    filters = load_filters(flist)

    # get a spectrum
    s, p, e = sps.get_spectrum(params, outwave, filters)
    # change parameters that affect neither the basis nor the ssp, and
    # get spectrum again
    params['mass'] = np.random.uniform(0, 1, ntot)
    s, p, e = sps.get_spectrum(params, outwave, filters)
    # lets get the basis components while we're at it
    bs, bp, be = sps.get_components(params, outwave, filters)
    # change something that affects the basis
    params['tage'] += 1.0
    bs, bp, be = sps.get_components(params, outwave, filters)
    # try a single age pop at arbitrary metallicity
    params['tage'] = 1.0
    params['zmet'] = -0.2
    bs, bp, be = sps.get_components(params, outwave, filters)
예제 #6
0
def load_obs(objname=None,
             noisefactor=1.0,
             calibrated=False,
             mask=True,
             broaden_obs=False,
             wlo=3750.,
             whi=7500.,
             **extras):

    assert objname == 'M67'

    bmag = 7.5  #hack
    obs = {}
    dat = np.loadtxt(os.path.join(sdir, 'data/m67_nobs.dat'))
    obs['wavelength'] = observate.vac2air(dat[:, 0])
    obs['spectrum'] = dat[:, 1]
    obs['unc'] = dat[:, 2]

    obs['filters'] = observate.load_filters(['sdss_g0'])
    obs['maggies'] = np.array([10**(-0.4 * bmag)])
    obs['maggies_unc'] = 0.05 * obs['maggies']

    # mask
    obs['mask'] = (obs['wavelength'] > wlo) & (obs['wavelength'] < whi)
    #adjust uncertainties
    obs['unc'] *= noisefactor
    obs['noisefactor'] = noisefactor
    obs['spec_calibrated'] = calibrated
    return obs
def load_obs(photname='', objname='', **extras):
    """
    Custom-built because the photometric files are actually generated by the model
    """
    obs ={}

    # if the photometric files exist
    with open(photname, 'r') as f:
        hdr = f.readline().split()
    dat = np.loadtxt(photname, comments = '#',
                     dtype = np.dtype([(n, np.float) for n in hdr[1:]]))
    obj_ind = np.where(dat['id'] == int(objname))[0][0]
    
    # extract fluxes+uncertainties for all objects and all filters
    flux_fields = [f for f in dat.dtype.names if f[0:2] == 'f_']
    unc_fields = [f for f in dat.dtype.names if f[0:2] == 'e_']
    filters = [f[2:] for f in flux_fields]

    # extract fluxes for particular object, converting from record array to numpy array
    flux = dat[flux_fields].view(float).reshape(len(dat),-1)[obj_ind]
    unc  = dat[unc_fields].view(float).reshape(len(dat),-1)[obj_ind]

    # build output dictionary
    obs['filters'] = observate.load_filters(filters)
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = np.ones_like(flux,dtype=bool)
    obs['maggies'] = flux
    obs['maggies_unc'] =  unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs
예제 #8
0
파일: starfitter.py 프로젝트: bd-j/hrdspy
 def load_data(self):
     """Read the catalogs, apply distance modulus,
     and determine 'good' pixels"""
     self.data_filterlist = observate.load_filters(self.rp['data_fnamelist'])
     self.data_mag, self.data_magerr, self.data_header = catio.load_image_cube(self.rp)
     self.distance_modulus = 5.0*np.log10(self.rp['dist'])+25
     self.nobj = self.data_mag.shape[0]
예제 #9
0
def load_obs(objid=0, phottable='demo_photometry.dat', **kwargs):
    """Load photometry from an ascii file.  Assumes the following columns:
    `objid`, `filterset`, [`mag0`,....,`magN`] where N >= 11.  The User should
    modify this function (including adding keyword arguments) to read in their
    particular data format and put it in the required dictionary.

    :param objid:
        The object id for the row of the photomotery file to use.  Integer.
        Requires that there be an `objid` column in the ascii file.

    :param phottable:
        Name (and path) of the ascii file containing the photometry.

    :returns obs:
        Dictionary of observational data.
    """
    # Writes your code here to read data.  Can use FITS, h5py, astropy.table,
    # sqlite, whatever.
    # e.g.:
    # import astropy.io.fits as pyfits
    # catalog = pyfits.getdata(phottable)

    # Here we will read in an ascii catalog of magnitudes as a numpy structured
    # array
    with open(phottable, 'r') as f:
        # drop the comment hash
        header = f.readline().split()[1:]
    catalog = np.genfromtxt(phottable, comments='#',
                            dtype=np.dtype([(n, np.float) for n in header]))

    # Find the right row
    ind = catalog['objid'] == float(objid)
    # Here we are dynamically choosing which filters to use based on the object
    # and a flag in the catalog.  Feel free to make this logic more (or less)
    # complicated.
    filternames = filtersets[ int(catalog[ind]['filterset']) ]
    # And here we loop over the magnitude columns
    mags = [catalog[ind]['mag{}'.format(i)] for i in range(len(filternames))]
    mags = np.array(mags)

    # Build output dictionary. 
    obs = {}
    # This is a list of sedpy filter objects.    See the
    # sedpy.observate.load_filters command for more details on its syntax.
    obs['filters'] = load_filters(filternames)
    # This is a list of maggies, converted from mags.  It should have the same
    # order as `filters` above.
    obs['maggies'] = np.squeeze(10**(-mags/2.5))
    # HACK.  You should use real flux uncertainties
    obs['maggies_unc'] = obs['maggies'] * 0.07
    # Here we mask out any NaNs or infs
    obs['phot_mask'] = np.isfinite(np.squeeze(mags))
    # We have no spectrum.
    obs['wavelength'] = None

    # Add unessential bonus info.  This will be stored in output
    #obs['dmod'] = catalog[ind]['dmod']
    obs['objid'] = objid

    return obs
예제 #10
0
def load_obs(phot_file=run_params['phot_file'], **kwargs):
    import pandas as pd
    data = pd.read_csv(phot_file)

    flux = data[1:]
    maggies = flux / 1000. / 3631.
    unc = maggies * 0.1

    flux_mag = np.asarray(maggies)
    unc_mag = np.asarray(unc)

    # Build output dictionary.
    obs = {}
    # This is a list of sedpy filter objects.    See the
    # sedpy.observate.load_filters command for more details on its syntax.
    obs['filters'] = load_filters(filternames)
    # This is a list of maggies, converted from mJy.  It should have the same
    # order as `filters` above.
    obs['maggies'] = flux_mag
    #Uncertainties also converted from mJy. In same order as flux_mag and filters
    obs['maggies_unc'] = unc_mag
    # Here we mask out any NaNs or infs
    obs['phot_mask'] = np.isfinite(flux_mag)
    # We have no spectrum.
    obs['wavelength'] = None

    return obs
예제 #11
0
def selftest():
    from sedpy.observate import load_filters
    sps = sps_basis.StellarPopBasis(debug=True)
    params = {}
    params['tage'] = np.array([1, 2, 3, 4.])
    params['zmet'] = np.array([-0.5, 0.0])
    ntot = len(params['tage']) * len(params['zmet'])
    params['mass'] = np.random.uniform(0, 1, ntot)
    params['sigma_smooth'] = 100.
    outwave = sps.ssp.wavelengths
    flist = ['sdss_u0', 'sdss_r0']
    filters = load_filters(flist)

    # get a spectrum
    s, p, e = sps.get_spectrum(params, outwave, filters)
    # change parameters that affect neither the basis nor the ssp, and
    # get spectrum again
    params['mass'] = np.random.uniform(0, 1, ntot)
    s, p, e = sps.get_spectrum(params, outwave, filters)
    # lets get the basis components while we're at it
    bs, bp, be = sps.get_components(params, outwave, filters)
    # change something that affects the basis
    params['tage'] += 1.0
    bs, bp, be = sps.get_components(params, outwave, filters)
    # try a single age pop at arbitrary metallicity
    params['tage'] = 1.0
    params['zmet'] = -0.2
    bs, bp, be = sps.get_components(params, outwave, filters)
def load_obs(objid=0, phottable='demo_photometry.dat', **kwargs):
    """Load photometry from an ascii file.  Assumes the following columns:
    `objid`, `filterset`, [`mag0`,....,`magN`] where N >= 11.  The User should
    modify this function (including adding keyword arguments) to read in their
    particular data format and put it in the required dictionary.

    :param objid:
        The object id for the row of the photomotery file to use.  Integer.
        Requires that there be an `objid` column in the ascii file.

    :param phottable:
        Name (and path) of the ascii file containing the photometry.

    :returns obs:
        Dictionary of observational data.
    """
    # Writes your code here to read data.  Can use FITS, h5py, astropy.table,
    # sqlite, whatever.
    # e.g.:
    # import astropy.io.fits as pyfits
    # catalog = pyfits.getdata(phottable)

    # Here we will read in an ascii catalog of magnitudes as a numpy structured
    # array
    with open(phottable, 'r') as f:
        # drop the comment hash
        header = f.readline().split()[1:]
    catalog = np.genfromtxt(phottable, comments='#',
                            dtype=np.dtype([(n, np.float) for n in header]))

    # Find the right row
    ind = catalog['objid'] == float(objid)
    # Here we are dynamically choosing which filters to use based on the object
    # and a flag in the catalog.  Feel free to make this logic more (or less)
    # complicated.
    filternames = filtersets[ int(catalog[ind]['filterset']) ]
    # And here we loop over the magnitude columns
    mags = [catalog[ind]['mag{}'.format(i)] for i in range(len(filternames))]
    mags = np.array(mags)

    # Build output dictionary. 
    obs = {}
    # This is a list of sedpy filter objects.    See the
    # sedpy.observate.load_filters command for more details on its syntax.
    obs['filters'] = load_filters(filternames)
    # This is a list of maggies, converted from mags.  It should have the same
    # order as `filters` above.
    obs['maggies'] = np.squeeze(10**(-mags/2.5))
    # Hack.  you should use real flux uncertainties
    obs['maggies_unc'] = obs['maggies'] * 0.07
    # Here we mask out any NaNs or infs
    obs['phot_mask'] = np.isfinite(np.squeeze(mags))
    # We have no spectrum.
    obs['wavelength'] = None

    # Add unessential bonus info.  This will be sored in output
    #obs['dmod'] = catalog[ind]['dmod']
    obs['objid'] = objid

    return obs
예제 #13
0
def load_obs(objname, filterset=["sdss_g0", "sdss_r0"], **kwargs):
    obs = {}

    # Convert a model spectrum into broadband fluxes.
    from sedpy.observate import load_filters
    import pandas as pd

    c15 = pd.read_csv('C15_clean.csv')
    obj = c15.loc[c15['NUMBER'] == objname]
    obs['filters'] = load_filters(filterset)
    obs["phot_wave"] = [f.wave_effective for f in obs["filters"]]

    maggies = []
    maggies_unc = []
    for fil in filterset:
        maggies.append(float(obj[fil]) / 3.631e9)
        maggies_unc.append(float(obj[fil + '_err']) / 3.631e9)

    obs['maggies'] = np.array(maggies)
    obs['maggies_unc'] = np.array(maggies_unc)
    obs['phot_mask'] = np.ones(len(filterset), dtype=bool)

    # No spectrum
    obs['wavelength'] = None
    obs["spectrum"] = None

    return obs
예제 #14
0
def read_hdf5(filename, **extras):
    """Read an HDF5 file (with a specific format) into a dictionary of results.

    This HDF5 file is assumed to have the groups ``sampling`` and ``obs`` which
    respectively contain the sampling chain and the observational data used in
    the inference.

    All attributes of these groups as well as top-level attributes are loaded
    into the top-level of the dictionary using ``json.loads``, and therefore
    must have been written with ``json.dumps``.  This should probably use
    JSONDecoders, but who has time to learn that.

    :param filename:
        Name of the HDF5 file.
    """
    groups = {'sampling': {}, 'obs': {}}
    res = {}
    with h5py.File(filename, "r") as hf:
        # loop over the groups
        for group, d in groups.items():
            # read the arrays in that group into the dictionary for that group
            for k, v in hf[group].items():
                d[k] = np.array(v)
            # unserialize the attributes and put them in the dictionary
            for k, v in hf[group].attrs.items():
                try:
                    d[k] = json.loads(v)
                except:
                    try:
                        d[k] = unpick(v)
                    except:
                        d[k] = v
        # do top-level attributes.
        for k, v in hf.attrs.items():
            try:
                res[k] = json.loads(v)
            except:
                try:
                    res[k] = unpick(v)
                except:
                    res[k] = v
        res.update(groups['sampling'])
        res['obs'] = groups['obs']
        try:
            res['obs']['filters'] = load_filters(
                [str(f) for f in res['obs']['filters']])
        except:
            pass
        try:
            res['rstate'] = unpick(res['rstate'])
        except:
            pass
        try:
            mp = [names_to_functions(p.copy()) for p in res['model_params']]
            res['model_params'] = mp
        except:
            pass

    return res
예제 #15
0
def load_obs(snr=10.0, add_noise=True, filterset=["sdss_g0", "sdss_r0"],
             **kwargs):
    """Make a mock dataset.  Feel free to add more complicated kwargs, and put
    other things in the run_params dictionary to control how the mock is
    generated.

    :param snr:
        The S/N of the phock photometry.  This can also be a vector of same
        lngth as the number of filters.

    :param add_noise: (optional, boolean, default: True)
        If True, add a realization of the noise to the mock spectrum
        
    :param filterset:
        A list of `sedpy` filter names.  Mock photometry will be generated 
        for these filters.
    """
    mock = {}
    mock['wavelength'] = None # No spectrum
    
    # Convert a model spectrum into broadband fluxes.
    from sedpy.observate import load_filters
    mock['filters'] = load_filters(filterset)
    mock["phot_wave"] = [f.wave_effective for f in mock["filters"]]
    
    # We need the models to make a mock
    sps = load_sps(**kwargs)
    model = load_model(**kwargs)
    params = {}
    for p in model.params.keys():
        if p in kwargs:
            params[p] = np.atleast_1d(kwargs[p])

    # And build the mock SED
    model.params.update(params)
    spec, phot, mfrac = model.mean_model(model.theta, mock, sps=sps)

    # Now store some output
    mock['true_spectrum'] = spec.copy()
    mock['true_maggies'] = phot.copy()
    from copy import deepcopy
    mock['mock_params'] = deepcopy(model.params)
    
    # And add noise
    pnoise_sigma = phot / snr
    if add_noise:
        pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
        mock['maggies'] = phot + pnoise
    else:
        mock['maggies'] = phot.copy()
    mock['maggies_unc'] = pnoise_sigma
    mock['mock_snr'] = snr
    mock['phot_mask'] = np.ones(len(phot), dtype=bool)

    # No spectrum
    mock['wavelength'] = None
    mock["spectrum"] = None
    
    return mock
예제 #16
0
def load_obs(objname=None, datloc=None, err_floor=0.05, **extras):

    ''' 
    objname: number of object in the 3D-HST COSMOS photometric catalog
    err_floor: the fractional error floor (0.05 = 5% floor)
    zp_err: inflate the errors by the zeropoint offsets from Skelton+14
    '''

    ### open file, load data
    with open(datloc, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1],'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(datloc, comments = '#', dtype = dtype)

    ### extract filters, fluxes, errors for object
    obj_idx = (dat['ID'] == objname)
    filters = np.array([f[1:] for f in dat.dtype.names if f[0] == 'e'])
    flux = np.squeeze([dat[obj_idx][f] for f in filters])
    unc = np.squeeze([dat[obj_idx]['e'+f] for f in filters])

    ### define photometric mask, convert from Jy to maggies
    phot_mask = (flux != unc) & (flux != -99.0) & (unc > 0)
    maggies = flux / 3631
    maggies_unc = unc / 3631

    ### implement error floor
    maggies_unc = np.clip(maggies_unc, maggies*err_floor, np.inf)

    ### mask anything touching or bluewards of Ly-a
    zred = dat['zz'][obj_idx]
    fnames = [f+'_candels' for f in filters]
    translate = {
                 'f24_candels':'spitzer_mips_24',
                 'f70_candels':'spitzer_mips_70',
                 'f100_candels':'herschel_pacs_100',
                 'f160_candels':'herschel_pacs_160',
                 'f250_candels':'herschel_spire_250'
                }
    fnames = [translate[f] if f in translate.keys() else f for f in fnames]
    ofilters = observate.load_filters(fnames)

    wavemax = np.array([f.wavelength[f.transmission > (f.transmission.max()*0.1)].max() for f in ofilters]) / (1+zred)
    wavemin = np.array([f.wavelength[f.transmission > (f.transmission.max()*0.1)].min() for f in ofilters]) / (1+zred)
    filtered = [1230]
    for f in filtered: phot_mask[(wavemax > f) & (wavemin < f)] = False
    phot_mask[wavemin < 1200] = False

    ### build output dictionary
    obs = {}
    obs['filters'] = ofilters
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] =  maggies_unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs
예제 #17
0
파일: read_results.py 프로젝트: jrleja/bsfh
def read_hdf5(filename, **extras):
    """Read an HDF5 file (with a specific format) into a dictionary of results.

    This HDF5 file is assumed to have the groups ``sampling`` and ``obs`` which
    respectively contain the sampling chain and the observational data used in
    the inference.

    All attributes of these groups as well as top-level attributes are loaded
    into the top-level of the dictionary using ``json.loads``, and therefore
    must have been written with ``json.dumps``.  This should probably use
    JSONDecoders, but who has time to learn that.

    :param filename:
        Name of the HDF5 file.
    """
    groups = {'sampling': {}, 'obs': {}}
    res = {}
    with h5py.File(filename, "r") as hf:
        # loop over the groups
        for group, d in groups.items():
            # read the arrays in that group into the dictionary for that group
            for k, v in hf[group].items():
                d[k] = np.array(v)
            # unserialize the attributes and put them in the dictionary
            for k, v in hf[group].attrs.items():
                try:
                    d[k] = json.loads(v)
                except:
                    try:
                        d[k] = unpick(v)
                    except:
                        d[k] = v
        # do top-level attributes.
        for k, v in hf.attrs.items():
            try:
                res[k] = json.loads(v)
            except:
                try:
                    res[k] = unpick(v)
                except:
                    res[k] = v
        res.update(groups['sampling'])
        res['obs'] = groups['obs']
        try:
            res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])
        except:
            pass
        try:
            res['rstate'] = unpick(res['rstate'])
        except:
            pass
        try:
            mp = [names_to_functions(p.copy()) for p in res['model_params']]
            res['model_params'] = mp
        except:
            pass

    return res
예제 #18
0
def load_obs_mmt(filename=None, objname=None, #dist = 1e-5, vel = 0.0,
                  wlo=3750., whi=7200., verbose=False,
                  phottable='data/f2_apcanfinal_6phot_v2.fits',
                  **kwargs):
    """
    Read one of Caldwell's MMT spectra and find the matching PHAT
    photometry, return a dictionary containing the observations.
    """
    from sedpy import observate

    obs ={}

    ####### SPECTRUM #######
    if verbose:
        print('Loading data from {0}'.format(filename))

    scale = 1e0 #testing
    #fluxconv = np.pi * 4. * (dist * 1e6 * pc)**2 / lsun #erg/s/AA/cm^2 to L_sun/AA
    fluxconv =  1.0#5.0e-20 * scale #approximate counts to erg/s/AA/cm^2
    #redshift = 0.0 #vel / 2.998e8
    dat = np.squeeze(pyfits.getdata(filename))
    hdr = pyfits.getheader(filename)
    
    crpix = (hdr['CRPIX1'] -1) #convert from FITS to numpy indexing
    try:
        cd = hdr['CDELT1']
    except (KeyError):
        cd = hdr['CD1_1']

    obs['wavelength'] = (np.arange(dat.shape[1]) - crpix) * cd + hdr['CRVAL1']
    obs['spectrum'] = dat[0,:] * fluxconv
    obs['unc'] = np.sqrt(dat[1,:]) * fluxconv
    
    #Masking.  should move to a function that reads a mask definition file
    #one should really never mask in the rest frame - that should be modeled!
    obs['mask'] =  ((obs['wavelength'] >= wlo ) & (obs['wavelength'] <= whi))
    obs['mask'] = obs['mask'] & ((obs['wavelength'] <= 5570) |
                                 (obs['wavelength'] >= 5590)) #mask OI sky line
    obs['mask'] = obs['mask'] & ((obs['wavelength'] <= 6170) |
                                 (obs['wavelength'] >= 6180)) #mask...something.

    #obs['wavelength'] /= (1.0 + redshift)

    ######## PHOTOMETRY ########
    if verbose:
        print('Loading mags from {0} for {1}'.format(phottable, objname))
    mags, mags_unc, flag = query_phatcat(objname, phottable = phottable, **kwargs)
    
    obs['filters'] = observate.load_filters(['wfc3_uvis_'+b.lower() for b in
                                             ["F275W", "F336W", "F475W", "F814W"]] +
                                             ['wfc3_ir_'+b.lower() for b in
                                              ["F110W", "F160W"]])
    obs['maggies'] = 10**(-0.4 * (mags -
                                  np.array([f.ab_to_vega for f in obs['filters']]) -
                                  2.5*np.log10(scale) ))
    obs['maggies_unc'] = mags_unc * obs['maggies'] / 1.086

    return obs
예제 #19
0
def zsfh_to_obs(sfhlist, zlist, lfbandnames=None, select_function=None,
                bandnames=None, sps=None, isocs=None, **kwargs):
    """
    Go from a list of SFHs (one for each metallicity) to a broadband SED and set of
    luminosity functions for a stellar population.
    """
    sed_values, lf_values = {}, []
    #basti = np.any(sps.zlegend[0:2] == 0.0006) #Hack to check for Basti Isochrones
    nsfh = len(sfhlist)
    assert len(zlist) == nsfh

    ###########
    # SED
    ############
    if bandnames is not None:
        filterlist = observate.load_filters(bandnames)
        spec, wave, mass = rsed.one_region_sed(copy.deepcopy(sfhlist), total_zmet, sps)
        mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
        maggies = 10**(-0.4 * np.atleast_1d(mags))
        sed_values['sed_ab_maggies'] = maggies
        sed_values['sed_filters'] = bandnames

    #############
    # LFs
    ############
    #create the SSP CLFs, using nearest neighbor interpolation for the metallicity
    all_lf_base = []
    bins = rsed.lfbins
    for i,zmet in enumerate(zlist):
        if isocs is not None:
            isoc = isocs[i]
        else:
            sps.params['zmet'] = np.abs(sps.zlegend - zmet).argmin() + 1
            isoc = sps.isochrones()
            print("Using Zmet={0} in place of requested "
            "Zmet={1}".format(sps.zlegend[sps.params['zmet']-1],zmet))

        ldat = isochrone_to_clfs(copy.deepcopy(isoc), lfbandnames,
                                 select_function=select_function, **kwargs)
        all_lf_base += [ldat]
    #use the SSP CLFs to generate a total LF (for each band)
    for i, band in enumerate(lfbandnames):
        lf_oneband = {}
        lf_base = [zbase[i] for zbase in all_lf_base]
        lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(sfhlist), lf_base)
        lf_oneband['bandname'] = band
        lf_oneband['clf'] = lf
        lf_oneband['clf_mags'] = bins
        lf_oneband['logages'] = logages
        lf_oneband['clfs_zt'] = lfs_zt
        
        lf_values += [lf_oneband]
        
    #############
    # Write output
    ############
    return sed_values, lf_values
예제 #20
0
파일: ggcdata.py 프로젝트: bd-j/speccal
def ggc_mock(model,
             theta,
             sps,
             objname='',
             apply_cal=True,
             mask=True,
             add_noise=False,
             phot_snr=30,
             spec_snr=None,
             **extras):
    """Generate a mock spectrum
    """
    mock = {}
    fnames = [
        'galex_FUV', 'galex_NUV', 'sdss_g0', 'sdss_r0', 'sdss_i0', 'sdss_z0',
        'twomass_J', 'twomass_H', 'twomass_Ks'
    ]

    # load the calibrated spectrum
    cal = ggc_spec(objname, 'a', '1', fluxtype=None, **extras)
    if spec_snr is None:
        spec_snr = cal['spectrum'] / cal['unc']
    mock['filters'] = observate.load_filters(fnames)
    mock['wavelength'] = cal['wavelength']
    mock['sky'] = cal['sky']
    s, p, x = model.sed(theta, mock, sps=sps)
    mock['intrinsic_true_spectrum'] = s.copy()
    mock['intrinsic_true_maggies'] = p.copy()
    mock['mock_params'] = model.params
    mock['mock_theta'] = model.theta.copy()

    mock['calibration'], noise = 1.0, 0.0
    mock['added_noise'] = None
    if apply_cal:
        s *= cal['calibration']
        mock['calibration'] = cal['calibration'].copy()
    noise_sigma = s / spec_snr
    pnoise_sigma = p / phot_snr
    if add_noise:
        noise = np.random.normal(0, 1, len(s)) * noise_sigma
        s += noise.copy()
        mock['added_noise'] = noise.copy()
        pnoise = np.random.normal(0, 1, len(p)) * pnoise_sigma
        p += pnoise.copy()
        mock['added_phot_noise'] = pnoise.copy()

    mock['spectrum'] = s
    mock['maggies'] = p
    # Should use s/spec_snr??? no, that does not actually give the
    # likelihood of the mock data given the model and the uncertainty
    mock['unc'] = noise_sigma
    mock['maggies_unc'] = pnoise_sigma

    if mask:
        mock = ggc_mask(mock)

    return mock
예제 #21
0
def load_obs(objid=0, phottable=photfile, **kwargs):
    """Load photometry from an ascii file.  Assumes the following columns:
    `objid`, `filterset`, [`mag0`,....,`magN`] where N >= 11.  The User should
    modify this function (including adding keyword arguments) to read in their
    particular data format and put it in the required dictionary.

    :param objid:
        The object id for the row of the photomotery file to use.  Integer.
        Requires that there be an `objid` column in the ascii file.

    :param phottable:
        Name (and path) of the ascii file containing the photometry.

    :returns obs:
        Dictionary of observational data.
    """
    from astropy.io import ascii
    # Writes your code here to read data.  Can use FITS, h5py, astropy.table,
    # sqlite, whatever.
    # NO YOU CANT YOU CANT USE .DAT SO DON'T WHATEVER ME, MISTER
    # e.g.:
    import astropy.io.fits as pyfits
    #catalog = pyfits.getdata(phottable)
    global catalog
    catalog = ascii.read(phottable)

    #ind = catalog['objid'] == float(objid)
    # Pick up data from our file, yo.
    wavelengths = [475,814,1600]
    mags = np.array([catalog['f_{}'.format(i)] for i in wavelengths])
    ivars = np.array([catalog['ivar_{}'.format(i)] for i in wavelengths])
    ids = catalog['ID']
    zs = catalog['z']

    # Build output dictionary. 
    obs = {}
    # This is a list of sedpy filter objects.    See the
    # sedpy.observate.load_filters command for more details on its syntax.
    obs['filters'] = load_filters(filtersets)
    # sg_flux feeds nanomaggies, oops
    obs['all_maggies'] = np.squeeze(mags*10**(-9))
    #obs['maggies_unc'] = np.squeeze(ivars*10**(-9))
    #obs['maggies'] = np.squeeze(10**(-mags/2.5))
    obs['all_maggies_unc'] = obs['all_maggies'] * 0.07
    # Here we mask out any NaNs or infs
    obs['all_phot_mask'] = np.isfinite(np.squeeze(mags))
    # We have no spectrum.
    obs['wavelength'] = None
    obs['objid'] = ids
    obs['z'] = zs
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs

    '''
예제 #22
0
def load_obs(snr=10.0,
             filterset=["sdss_g0", "sdss_r0"],
             add_noise=True,
             **kwargs):
    """Make a mock dataset.  Feel free to add more complicated kwargs, and put
    other things in the run_params dictionary to control how the mock is
    generated.

    :param snr:
        The S/N of the phock photometry.  This can also be a vector of same
        lngth as the number of filters.

    :param filterset:
        A list of `sedpy` filter names.  Mock photometry will be generated
        for these filters.

    :param add_noise: (optional, boolean, default: True)
        If True, add a realization of the noise to the mock spectrum
    """
    # We'll put the mock data in this dictionary, just as we would for real
    # data.  But we need to know which bands (and wavelengths if doing
    # spectroscopy) in which to generate mock data.
    mock = {}
    mock['wavelength'] = None  # No spectrum
    mock['filters'] = load_filters(filterset)
    #

    # We need the models to make a mock
    sps = load_sps(**kwargs)
    mod = load_model(**kwargs)

    # Now we get the mock params from the kwargs dict
    params = {}
    for p in mod.params.keys():
        if p in kwargs:
            params[p] = np.atleast_1d(kwargs[p])

    # And build the mock
    mod.params.update(params)
    spec, phot, _ = mod.mean_model(mod.theta, mock, sps=sps)
    # Now store some output
    mock['true_spectrum'] = spec.copy()
    mock['true_maggies'] = phot.copy()
    mock['mock_params'] = deepcopy(mod.params)
    # And add noise
    pnoise_sigma = phot / snr
    if add_noise:
        pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
        mock['maggies'] = phot + pnoise
    else:
        mock['maggies'] = phot.copy()
    mock['maggies_unc'] = pnoise_sigma
    mock['mock_snr'] = snr
    mock['phot_mask'] = np.ones(len(phot), dtype=bool)

    return mock
예제 #23
0
파일: pyxydust.py 프로젝트: bd-j/pyxydust
 def load_models(self):
     """Load the Draine & Li basis models, initialize the grid to
     hold resampled models, and load the filters
     """
     # Draine and Li Basis
     self.dl07 = dustmodel.DraineLi()
     # object to hold the model grid
     self.dustgrid = dustmodel.SpecLibrary()
     # filter objects
     self.filterlist = observate.load_filters(self.rp['fnamelist'])
예제 #24
0
def build_obs(**kwargs):

    from hyperion.model import ModelOutput
    from astropy import units as u
    from astropy import constants
    print('galaxy: ', sys.argv[1])
    m = ModelOutput(
        "/ufrc/narayanan/s.lower/pd_runs/simba_m25n512/snap305_dustscreen/snap305/snap305.galaxy"
        + str(sys.argv[1]) + ".rtout.sed")
    wav, flux = m.get_sed(inclination=0, aperture=-1)
    wav = np.asarray(wav) * u.micron  #wav is in micron
    wav = wav.to(u.AA)
    flux = np.asarray(flux) * u.erg / u.s
    dl = (10. * u.pc).to(u.cm)
    flux /= (4. * 3.14 * dl**2.)
    nu = constants.c.cgs / (wav.to(u.cm))
    nu = nu.to(u.Hz)
    flux /= nu
    flux = flux.to(u.Jy)
    maggies = flux / 3631.

    filters_unsorted = load_filters(filternames)
    waves_unsorted = [x.wave_mean for x in filters_unsorted]
    filters = [x for _, x in sorted(zip(waves_unsorted, filters_unsorted))]
    flx = []
    flxe = []

    for i in range(len(filters)):
        flux_range = []
        wav_range = []
        for j in filters[i].wavelength:
            flux_range.append(maggies[find_nearest(wav.value, j)].value)
            wav_range.append(wav[find_nearest(wav.value, j)].value)
        a = np.trapz(wav_range * filters[i].transmission * flux_range,
                     wav_range,
                     axis=-1)
        b = np.trapz(wav_range * filters[i].transmission, wav_range)
        flx.append(a / b)
        flxe.append(0.03 * flx[i])

    flx = np.asarray(flx)
    flxe = np.asarray(flxe)
    flux_mag = flx
    unc_mag = flxe

    obs = {}
    obs['filters'] = filters
    obs['maggies'] = flux_mag
    obs['maggies_unc'] = unc_mag
    obs['phot_mask'] = np.isfinite(flux_mag)
    obs['wavelength'] = None
    obs['spectrum'] = None

    return obs
예제 #25
0
파일: test_photoz.py 프로젝트: ixkael/gasp
def test_photometry_and_transferfunctions():

    filternames = ["decam_g", "decam_r", "decam_z"]
    filterdir = "data/filters"
    filter_list = observate.load_filters(filternames, directory=filterdir)

    lambda_aa, f_lambda_aa = load_test_sed()
    speedoflight = 3e18
    f_nu_aa = f_lambda_aa * lambda_aa**2 / speedoflight

    redshift_grid = np.linspace(0.0, 2, 2)[1:]

    redshifted_fluxes, redshifted_fluxes2, redshift_factor = get_redshifted_photometry(
        lambda_aa, f_lambda_aa, redshift_grid, filter_list)

    relative_accuracy = 0.01
    assert np.allclose(redshifted_fluxes,
                       redshifted_fluxes2,
                       rtol=relative_accuracy)

    (
        transfer_functions_f_lambda,
        redshift_factor4,
    ) = build_restframe_photometric_transferfunction(redshift_grid,
                                                     lambda_aa,
                                                     filter_list,
                                                     f_lambda=True)

    (
        transfer_functions_f_nu,
        redshift_factor5,
    ) = build_restframe_photometric_transferfunction(redshift_grid,
                                                     lambda_aa,
                                                     filter_list,
                                                     f_lambda=False)

    redshifted_fluxes4 = np.sum(transfer_functions_f_lambda *
                                f_lambda_aa[None, :, None],
                                axis=1)
    redshifted_fluxes5 = np.sum(transfer_functions_f_nu *
                                f_nu_aa[None, :, None],
                                axis=1)

    rtol, atol = 1e-5, 1e-8
    assert np.allclose(redshifted_fluxes,
                       redshifted_fluxes4,
                       rtol=rtol,
                       atol=atol)
    assert np.allclose(redshift_factor, redshift_factor4, rtol=rtol, atol=atol)
    assert np.allclose(redshifted_fluxes,
                       redshifted_fluxes5,
                       rtol=rtol,
                       atol=atol)
    assert np.allclose(redshift_factor, redshift_factor5, rtol=rtol, atol=atol)
def read_results(filename):
    res, obs, mod = reader.results_from(path_res + filename)
    # update data table
    res['run_params']['data_table'] = path_wdir + 'data/halo7d_with_phot.fits'
    mod = reader.get_model(res)
    # update filters
    filternames = [str(ii) for ii in obs['filters']]
    obs['filters'] = load_filters(filternames, directory=filter_folder)
    # load sps
    sps = reader.get_sps(res)
    return (res, obs, mod, sps)
예제 #27
0
def load_obs(snr=10.0, filterset=["sdss_g0", "sdss_r0"],
             add_noise=True, **kwargs):
    """Make a mock dataset.  Feel free to add more complicated kwargs, and put
    other things in the run_params dictionary to control how the mock is
    generated.

    :param snr:
        The S/N of the phock photometry.  This can also be a vector of same
        lngth as the number of filters.

    :param filterset:
        A list of `sedpy` filter names.  Mock photometry will be generated
        for these filters.

    :param add_noise: (optional, boolean, default: True)
        If True, add a realization of the noise to the mock spectrum
    """
    # We'll put the mock data in this dictionary, just as we would for real
    # data.  But we need to know which bands (and wavelengths if doing
    # spectroscopy) in which to generate mock data.
    mock = {}
    mock['wavelength'] = None # No spectrum
    mock['filters'] = load_filters(filterset)
    # 

    # We need the models to make a mock
    sps = load_sps(**kwargs)
    mod = load_model(**kwargs)

    # Now we get the mock params from the kwargs dict
    params = {}
    for p in mod.params.keys():
        if p in kwargs:
            params[p] = np.atleast_1d(kwargs[p])

    # And build the mock
    mod.params.update(params)
    spec, phot, _ = mod.mean_model(mod.theta, mock, sps=sps)
    # Now store some output
    mock['true_spectrum'] = spec.copy()
    mock['true_maggies'] = phot.copy()
    mock['mock_params'] = deepcopy(mod.params)
    # And add noise
    pnoise_sigma = phot / snr
    if add_noise:
        pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
        mock['maggies'] = phot + pnoise
    else:
        mock['maggies'] = phot.copy()
    mock['maggies_unc'] = pnoise_sigma
    mock['mock_snr'] = snr
    mock['phot_mask'] = np.ones(len(phot), dtype=bool)

    return mock
예제 #28
0
def load_obs(filter_key=1, **extras):

    # what are our mock parameters?
    params = {
              'logmass': 10,
              'logzsol': -0.75,
              'logsfr_ratios': np.array([0.15,0.15,0.15,0.15,0.15,0.15]), # hacked, gently rising
              'dust2': 0.5,
              'dust1_fraction': 0.9,
              'fagn': 0.05,
              'agn_tau': 20,
              'duste_qpah': 0.5,
              'duste_umin': 2,
              'duste_gamma': 0.2,
              'dust_index': -0.3
              }
    for p in params.keys(): params[p] = np.atleast_1d(params[p])

    # what are our filters?
    filters = find_filters(filter_key)

    # load model, sps
    mod = load_model(**run_params)
    sps = load_sps(**run_params)

    # we will also need an obs dictionary
    obs = {}
    obs['filters'] = observate.load_filters(filters)
    obs['wavelength'] = None

    # signal to noise ratio
    snr = 20
    if filter_key == 10:
        snr = np.array([np.repeat(10,10).tolist() + [4]])

    # Generate the photometry, add noise
    mod.params.update(params)
    spec, maggies, _ = mod.mean_model(mod.theta, obs, sps=sps)
    maggies_unc = np.atleast_1d((maggies / snr).squeeze())
    phot_mask = np.ones_like(maggies,dtype=bool)

    ### build output dictionary
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] =  maggies_unc
    obs['spectrum'] = None
    obs['logify_spectrum'] = False
    obs['true_params'] = params
    obs['true_spec'] = spec

    return obs
예제 #29
0
def project_filter(wave, spectrum, bands=["sdss_r0"]):
    """
    :param wave: 
        Wavelengths in angstroms ndarray of shape (nw,)

    :param spectrum: 
        Spectrum, in units of f_lambda (magnitudes will be correct if they are
        units of erg/s/cm^2). same shape as `wave`
    """

    filters = load_filters(bands)
    mags = getSED(wave, spectrum, filterlist=filters)
    return mags
예제 #30
0
def load_obs_lris(
        filename=None,
        objname=None,  #dist = 1e-5, vel = 0.0,
        wlo=3550.,
        whi=5500.,
        verbose=False,
        phottable='data/f2_apcanfinal_6phot_v2.fits',
        **kwargs):
    """
    Read one of the Keck LRIS spectra and find the matching PHAT
    photometry, return a dictionary containing the observations.
    """
    from sedpy import observate

    obs = {}

    ####### SPECTRUM #######
    if verbose:
        print('Loading data from {0}'.format())

    #fluxconv = np.pi * 4. * (dist * 1e6 * pc)**2 / lsun #erg/s/AA/cm^2 to L_sun/AA
    fluxconv = 1.0
    scale = 1e0  #testing
    #redshift = vel / 2.998e8
    dat = pyfits.getdata(filename)
    hdr = pyfits.getheader(filename)

    obs['wavelength'] = dat[0]['wave_opt']
    obs['spectrum'] = dat[0]['spec']
    obs['unc'] = 1. / np.sqrt(dat[0]['ivar'])
    #masking
    obs['mask'] = ((obs['wavelength'] >= wlo) & (obs['wavelength'] <= whi))
    #obs['wavelength'] /= (1.0 + redshift)

    ######## PHOTOMETRY ######
    if verbose:
        print('Loading mags from {0} for {1}'.format(phottable, objname))
    mags, mags_unc, flag = query_phatcat(objname,
                                         phottable=phottable,
                                         **kwargs)

    obs['filters'] = observate.load_filters([
        'wfc3_uvis_' + b.lower() for b in ["F275W", "F336W", "F475W", "F814W"]
    ] + ['wfc3_ir_' + b.lower() for b in ["F110W", "F160W"]])
    obs['maggies'] = 10**(
        -0.4 *
        (mags - np.array([f.ab_to_vega
                          for f in obs['filters']]) - 2.5 * np.log10(scale)))
    obs['maggies_unc'] = mags_unc * obs['maggies'] / 1.086

    return obs
예제 #31
0
def build_obs(objid=objid, phottable=None, luminosity_distance=None, **kwargs):

    from prospect.utils.obsutils import fix_obs

    wl, flux, flux_err = prospector_wl_flux(path_spec, path_err)
    maggies, emaggies = phot_maggies(work_df_uvista)

    obs = {}
    filternames = [
        "UVISTA_Ks",
        "UVISTA_H",
        "UVISTA_J",
        "UVISTA_Y",
        "IB427.SuprimeCam",
        "IB464.SuprimeCam",
        "IB484.SuprimeCam",
        "IB505.SuprimeCam",
        "IB527.SuprimeCam",
        "IB574.SuprimeCam",
        "IB624.SuprimeCam",
        "IB679.SuprimeCam",
        "IB709.SuprimeCam",
        "IB738.SuprimeCam",
        "IB767.SuprimeCam",
        "IB827.SuprimeCam",
        "spitzer_irac_ch1",
        "spitzer_irac_ch2",
        "spitzer_irac_ch3",
        "spitzer_irac_ch4",  #"spitzer_mips_24",
        "galex_FUV",
        "galex_NUV",
        "u_megaprime_sagem",
        "B_subaru",
        "V_subaru",
        "g_subaru",
        "r_subaru",
        "i_subaru",
        "z_subaru",
    ]
    obs['filters'] = load_filters(filternames)
    obs['maggies'] = maggies
    floor_phot = 0.05 * obs['maggies']
    obs['maggies_unc'] = np.clip(emaggies, floor_phot, np.inf)
    obs['wavelength'] = wl
    obs['spectrum'] = flux
    floor_spec = 0.01 * obs['spectrum']
    obs['unc'] = np.clip(flux_err, floor_spec, np.inf)
    obs['objid'] = objid

    obs = fix_obs(obs)
    return obs
예제 #32
0
def median_by_band(x,y,avg=False,log=False):

    ##### get filter effective wavelengths for sorting
    delz = 0.15
    from brownseds_np_params import translate_filters
    from sedpy import observate

    filtnames = np.array(translate_filters(0,full_list=True))
    filts = observate.load_filters(filtnames[filtnames != 'nan'])
    wave_effective = np.array([filt.wave_effective for filt in filts])/1e4
    wave_effective.sort()

    ### remove overlapping filters
    ### and ones outside the plot
    delta_lam = 0.06
    for i in range(wave_effective.shape[0]):
        if i >= wave_effective.shape[0]:
            continue
        if ((np.abs(1-wave_effective/wave_effective[i]) < delta_lam).sum() > 1):
            wave_effective = np.delete(wave_effective,i)

    wave_effective = wave_effective[wave_effective < 25]


    avglam, outval, outval_up, outval_down, outval_upup, outval_downdown = [np.array([]) for i in range(6)]
    for lam in wave_effective:
        #in_bounds = (x <= lam) & (x > lam/(1+delz))
        in_bounds = (np.abs(1-lam/x) < delta_lam)
        avglam = np.append(avglam, np.mean(x[in_bounds]))
        
        mid, top, bot, toptop, botbot = np.percentile(y[in_bounds], [50, 84, 16, 95, 5])
        outval_up = np.append(outval_up, top)
        outval_down = np.append(outval_down, bot)
        outval_upup = np.append(outval_upup, toptop)
        outval_downdown = np.append(outval_downdown, botbot)
        
        if avg:
            mean, sig = y[in_bounds].mean(), np.std(y[in_bounds])
            outval = np.append(outval, mean)
            #outval_up = np.append(outval_up, mean+sig)
            #outval_down = np.append(outval_down, mean-sig)
            #outval_upup = np.append(outval_upup, mean+2*sig)
            #outval_downdown = np.append(outval_downdown, mean-2*sig)
        else:
            outval = np.append(outval, mid)

    if log:
        return avglam, -np.log10(1-outval), -np.log10(1-outval_up), -np.log10(1-outval_down), -np.log10(1-outval_upup), -np.log10(1-outval_downdown)
    else:
        return avglam, outval, outval_up, outval_down, outval_upup, outval_downdown
예제 #33
0
def load_obs(objname=None, datdir=None, runname=None, err_floor=0.05, zperr=True, no_zp_corrs=False, **extras):

    ''' 
    objname: number of object in the 3D-HST COSMOS photometric catalog
    err_floor: the fractional error floor (0.05 = 5% floor)
    zp_err: inflate the errors by the zeropoint offsets from Skelton+14
    '''

    filters = ['f1000w_miri','f1280w_miri','f1500w_miri','f1800w_miri','f2100w_miri','mips_24um_aegis']
    obs = {'maggies': None, 
           'phot_mask': np.ones_like(filters),
           'wavelength': None, 
           'filters': observate.load_filters(filters)}
    return obs
예제 #34
0
def load_obs(snr=10, out_of_priors=False, **extras):

    # if we want to generate observations of a low metallicity galaxy with a rising SFH instead
    # this is not in our allowed parameter space!
    if out_of_priors:
        from example3_params import load_obs as load_obs_ex3
        return load_obs_ex3()

    # what are our mock parameters?
    params = {
        'logmass': 10,
        'logzsol': -0.25,
        'dust2': 0.5,
        'logtau': 1.0,
        'tage': 8
    }
    for p in params.keys():
        params[p] = np.atleast_1d(params[p])

    # will need model and sps object to generate photometry
    mod = load_model(out_of_priors=out_of_priors, **run_params)
    sps = load_sps(**run_params)

    # we will also need filters
    obs = {}
    filters = [
        'sdss_u0', 'sdss_g0', 'sdss_r0', 'sdss_i0', 'twomass_J', 'twomass_H',
        'twomass_Ks'
    ]
    obs['filters'] = observate.load_filters(filters)
    obs['wavelength'] = None

    # generate the photometry, add noise
    mod.params.update(params)
    spec, maggies, _ = mod.mean_model(mod.theta, obs, sps=sps)
    maggies_unc = (maggies / snr).squeeze()
    phot_mask = np.ones_like(maggies, dtype=bool)

    # finishing building obs dictionary
    obs['wave_effective'] = np.array(
        [filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] = maggies_unc
    obs['spectrum'] = None
    obs['true_params'] = params
    obs['true_spec'] = spec

    return obs
예제 #35
0
def load_obs(photname, objname, err_floor=0.05, zperr=True, **extras):
    '''
    photname: photometric file location
    objname: number of object in the 3D-HST COSMOS photometric catalog
    err_floor: the fractional error floor (0.05 = 5% floor)
    zp_err: inflate the errors by the zeropoint offsets from Skelton+14
    '''

    # OPEN FILE, LOAD DATA
    with open(photname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(photname, comments='#', delimiter=' ', dtype=dtype)

    # EXTRACT FILTERS, FLUXES, ERRORS FOR OBJECT
    obj_idx = (dat['id'] == objname)
    # print(dat[obj_idx]['id'], 'idx')
    filters = np.array(
        filts)  # [f[2:] for f in dat.dtype.names if f[0:2] == 'f_'])
    flux = np.squeeze([dat[obj_idx]['f_' + f] for f in filternames])
    unc = np.squeeze([dat[obj_idx]['e_' + f] for f in filternames])

    # DEFINE PHOTOMETRIC MASK< CONVERT TO MAGGIES
    phot_mask = (flux != -99.0)
    maggies = flux * 10**-6 / 3631  # flux [uJy] * 1e-6 [Jy / uJy] * 1 [maggy] / 3631 [Jy]
    maggies_unc = unc * 10**-6 / 3631
    # print(maggies, 'maggies')
    # print(flux, 'flux')
    # print(maggies_unc, 'maggies_unc')
    # print(unc, 'unc')

    # ERROR FLOOR
    maggies_unc = np.clip(
        maggies_unc, maggies * err_floor,
        np.inf)  # for any unc < err_floor, replace with err_floor

    # BUILD OUTPUT DICTIONARY
    obs = {}
    obs['filters'] = observate.load_filters(filters)
    obs['wave_effective'] = np.array(
        [filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] = maggies_unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs
예제 #36
0
def build_obs(**kwargs):
    sps = build_sps()
    mod = build_model()
    fake_obs = {'filters': load_filters(filternames), 'wavelength': None}

    mod.params['dust_type'] = 0  # non-KC attenuation curve
    #mod.params['dust1'] = 1.0

    initial_theta = mod.initial_theta.copy()
    initial_theta[mod.theta_labels().index('dust2')] = 1.5
    initial_theta[mod.theta_labels().index('logzsol')] = 0.0
    initial_theta[mod.theta_labels().index('tage')] = 2.0
    spec, mags, stellar_mass = mod.mean_model(initial_theta,
                                              sps=sps,
                                              obs=fake_obs)

    obs = {}
    obs['maggies'] = mags
    obs['filters'] = load_filters(filternames)
    obs['maggies_unc'] = mags * 0.1
    obs['phot_mask'] = np.isfinite(mags)
    obs['wavelength'] = None
    obs['spectrum'] = None
    return obs
예제 #37
0
def load_obs(photname, objname, err_floor=0.05, zperr=True, **extras):
    '''
    photname: photometric file location
    objname: number of object in the 3D-HST COSMOS photometric catalog
    err_floor: the fractional error floor (0.05 = 5% floor)
    zp_err: inflate the errors by the zeropoint offsets from Skelton+14
    '''

    ### open file, load data
    with open(photname, 'r') as f:
        hdr = f.readline().split()
    dtype = np.dtype([(hdr[1], 'S20')] + [(n, np.float) for n in hdr[2:]])
    dat = np.loadtxt(photname, comments='#', delimiter=' ', dtype=dtype)

    ### extract filters, fluxes, errors for object
    # from ReadMe: "All fluxes are normalized to an AB zeropoint of 25, such that: magAB = 25.0-2.5*log10(flux)
    obj_idx = (dat['id'] == objname)
    # print(dat[obj_idx]['id'], 'idx')
    filters = np.array(
        filts)  # [f[2:] for f in dat.dtype.names if f[0:2] == 'f_'])
    flux = np.squeeze([dat[obj_idx]['f_' + f] for f in filternames])
    unc = np.squeeze([dat[obj_idx]['e_' + f] for f in filternames])

    ### define photometric mask, convert to maggies
    phot_mask = (flux != -99.0)
    maggies = flux * 10**-6 / 3631  # flux [uJy] * 1e-6 [Jy / uJy] * 1 [maggy] / 3631 [Jy]
    maggies_unc = unc * 10**-6 / 3631
    # print(maggies, 'maggies')
    # print(flux, 'flux')
    # print(maggies_unc, 'maggies_unc')
    # print(unc, 'unc')

    ### implement error floor
    maggies_unc = np.clip(maggies_unc, maggies * err_floor, np.inf)

    ### build output dictionary
    obs = {}
    obs['filters'] = observate.load_filters(filters)
    obs['wave_effective'] = np.array(
        [filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] = maggies_unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs
예제 #38
0
def do_all(runname='vis', outfolder=None,**opts):

    if outfolder is None:
        outfolder = os.getenv('APPS') + '/prospector_alpha/plots/'+runname+'/vis_plots/'
        if not os.path.isdir(outfolder):
            os.makedirs(outfolder)
            os.makedirs(outfolder+'data/')

    data = collate_data(runname,filename=outfolder+'data/dat.h5',**opts)
    for i in range(len(data['filters'])): data['filters'][i] = load_filters(data['filters'][i])

    if runname == 'vis':
        data['mod'] = vis_params.load_model(**vis_params.run_params)
    else:
        data['mod'] = vis_expsfh_params.load_model(**vis_expsfh_params.run_params)
    plot_addfilts(data,outfolder,runname)
예제 #39
0
def load_obs_lris(filename=None, objname=None, #dist = 1e-5, vel = 0.0,
                  wlo=3550., whi=5500., verbose=False,
                  phottable='data/f2_apcanfinal_6phot_v2.fits',
                  **kwargs):
    """
    Read one of the Keck LRIS spectra and find the matching PHAT
    photometry, return a dictionary containing the observations.
    """
    from sedpy import observate

    obs ={}
    
    ####### SPECTRUM #######
    if verbose:
        print('Loading data from {0}'.format())

    #fluxconv = np.pi * 4. * (dist * 1e6 * pc)**2 / lsun #erg/s/AA/cm^2 to L_sun/AA
    fluxconv = 1.0
    scale = 1e0 #testing
    #redshift = vel / 2.998e8
    dat = pyfits.getdata(filename)
    hdr = pyfits.getheader(filename)
    
    obs['wavelength'] = dat[0]['wave_opt']
    obs['spectrum'] = dat[0]['spec']
    obs['unc'] = 1./np.sqrt(dat[0]['ivar'])
    #masking
    obs['mask'] =  ((obs['wavelength'] >= wlo ) & (obs['wavelength'] <= whi))
    #obs['wavelength'] /= (1.0 + redshift)
    

    ######## PHOTOMETRY ######
    if verbose:
        print('Loading mags from {0} for {1}'.format(phottable, objname))
    mags, mags_unc, flag = query_phatcat(objname, phottable = phottable, **kwargs)
     
    obs['filters'] = observate.load_filters(['wfc3_uvis_'+b.lower() for b in
                                             ["F275W", "F336W", "F475W", "F814W"]] +
                                             ['wfc3_ir_'+b.lower() for b in
                                              ["F110W", "F160W"]])
    obs['maggies'] = 10**(-0.4 * (mags -
                                  np.array([f.ab_to_vega for f in obs['filters']]) -
                                  2.5*np.log10(scale) ))
    obs['maggies_unc'] = mags_unc * obs['maggies'] / 1.086

    return obs
예제 #40
0
def load_phot(phot, unit):
    """
    Return the photometry in a prospector compatible unit and format.
    
    Parameters
    ----------
    phot : [dict or pandas.DataFrame]
        Photometry to convert and load in prospector.
    
    unit : [string]
        Photometry's unit.
        Can be:
            - "Hz": erg/s/cm2/Hz
            - "AA": erg/s/cm2/AA
            - "mgy": maggies
            - "Jy": Jansky
            - "mag": magnitudes
    
    
    Returns
    -------
    dict
    """
    if phot is None:
        return phot

    from sedpy.observate import load_filters
    _data = phot.copy()
    _filters = keys_to_filters(_data.keys())
    _phot = np.array([float(_data[_filt]) for _filt in _filters])
    _dphot = np.array([float(_data[_filt + ".err"]) for _filt in _filters])
    _lbda = np.array([
        _filt.wave_effective
        for _filt in load_filters(filters_to_pysed(_filters))
    ])
    if unit == "mag":
        _phot, _dphot = tools.mag_to_flux(_phot, _dphot, _lbda, inhz=True)
        unit = "Hz"
    _phot, _dphot = tools.convert_flux_unit([_phot, _dphot], unit, "mgy",
                                            _lbda)
    _out = {_filt: _phot[ii] for ii, _filt in enumerate(_filters)}
    _out.update(
        {_filt + ".err": _dphot[ii]
         for ii, _filt in enumerate(_filters)})
    return _out
def build_obs(binNum=0, infile=None, **extras):

    import numpy as np
    from astropy.table import Table
    from sedpy.observate import load_filters
    from prospect.utils.obsutils import fix_obs

    table = Table.read(infile)

    flux_columns = [col for col in table.colnames if col.endswith('_flux')]
    e_flux_columns = [col.replace('flux', 'err') for col in flux_columns]
    nPix_columns = [col.replace('flux', 'nPix') for col in flux_columns]
    use_columns = [col.replace('flux', 'use') for col in flux_columns]
    filternames = ['hff_' + col.replace('_flux', '') for col in flux_columns]

    fluxes = np.array(list(table[flux_columns][binNum])) / 3631  # in maggies
    e_fluxes = np.array(list(
        table[e_flux_columns][binNum])) / 3631  # in maggies
    nPixels = np.array(list(table[nPix_columns][binNum]))
    use_col = np.array(list(table[use_columns][binNum]))

    fluxes_per_pixel = fluxes / nPixels
    e_fluxes_per_pixel = e_fluxes / nPixels

    max_SNR = 20
    SNR_acceptable = (fluxes_per_pixel / e_fluxes_per_pixel < max_SNR)
    e_fluxes_per_pixel[
        ~SNR_acceptable] = fluxes_per_pixel[~SNR_acceptable] / max_SNR

    obs = {}

    obs['filters'] = load_filters(filternames)
    obs['maggies'], obs['maggies_unc'] = fluxes_per_pixel, e_fluxes_per_pixel

    obs['phot_mask'] = use_col
    obs['phot_wave'] = np.array([f.wave_effective for f in obs['filters']])

    obs['wavelength'], obs['spectrum'] = None, None
    obs['unc'], obs['mask'] = None, None

    obs['binNum'], obs['nPixels'] = binNum, nPixels

    obs = fix_obs(obs)

    return obs
def load_obs(photname='', objname='', **extras):
    """
    Custom-built because the photometric files are actually generated by the model
    """
    obs ={}

    filters = translate_filters()

    # build output dictionary
    obs['filters'] = observate.load_filters(filters)
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = np.ones_like(filters,dtype=bool)
    obs['maggies'] = np.ones_like(filters,dtype=float)
    obs['maggies_unc'] = np.ones_like(filters,dtype=float)
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False
    return obs
예제 #43
0
def observe(fnames):

    #  units: lambda (A), flux: fnu normalized to unity
    dat = load_data()
    filters = load_filters(fnames)

    out = {}
    for name in dat.dtype.names:
        if name == 'blank' or name == 'lambda':
            continue

        # sourcewave: Spectrum wavelength (in AA), ndarray of shape (nwave).
        # sourceflux: Associated flux (assumed to be in erg/s/cm^2/AA), ndarray of shape (nsource,nwave).
        # filterlist: List of filter objects, of length nfilt.
        # array of AB broadband magnitudes, of shape (nsource, nfilter).
        out[name] = getSED(dat['lambda'], (lightspeed/dat['lambda']**2)*dat[name], filters)

    return out
예제 #44
0
파일: basis.py 프로젝트: bd-j/dice
def rectify_basis(wave, spectra, wlow=0, whigh=np.inf,
                  exclude=[], outwave=None, filters=None, **extras):
    """Mask a spectral basis using lists of include and exclude ranges
    """
    if filters is not None:
        flist = observate.load_filters(filters)
        sed = observate.getSED(wave, spectra, filterlist=flist)
        return np.array([f.wave_effective for f in flist]), 10**(-0.4 * sed)

    if outwave is not None:
        onedinterp = interpolate(wave, spectra, axis=-1)
        spectra = onedinterp(outwave)
        wave = outwave

    g = (wave >= wlow) & (wave <= whigh)
    for (lo, hi) in exclude:
        g = g & ((wave < lo) | (wave > hi))
    return wave[g], spectra[:, g]
예제 #45
0
def getobs(cat):
    """Generate the prospector-style "obs" dictionary which contains the input
    photometry, redshift, etc. for a single object.
    cat - fitsio structured array
    """
    from sedpy.observate import load_filters

    obs = {}

    #updated for our project
    # Photometric bandpasses
    sdss = ['sdss_{}0'.format(b) for b in ['u', 'g', 'r', 'i', 'z']]
    wise = ['wise_{}'.format(b) for b in ['w1', 'w2']]
    filternames = sdss + wise

    obs['filternames'] = filternames
    obs['filters'] = load_filters(filternames)
    #updated for our project
    # Input photometry
    obs['maggies'] = np.squeeze(cat['MAGGIES'])
    mask = cat['IVARMAGGIES'] > 0
    with np.errstate(invalid='ignore', divide='ignore'):
        obs['maggies_unc'] = np.squeeze(1.0 /
                                        np.sqrt(cat['IVARMAGGIES']))  #[:3, :])
    obs['phot_mask'] = mask

    # Input spectroscopy (none for this dataset)
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['unc'] = None
    obs['mask'] = None
    #update, customize
    # Use initial values based on the iSEDfit results.
    obs['zred'] = cat['Z']  # redshift
    obs['mass'] = 10**cat['MSTAR']  # stellar mass
    obs['logzsol'] = np.log10(cat['ZMETAL'] / 0.019)  # stellar metallicity
    obs['tage'] = cat['AGE']  # age
    obs['tau'] = cat['TAU']  # tau (for a delayed SFH)
    obs['dust2'] = 0.1

    # Additional informational keys.
    obs['isedfit_id'] = cat['ISEDFIT_ID']

    return obs
예제 #46
0
def load_obs(objname=None, datloc=None, err_floor=0.02, **extras):
    """ let's rock
    """

    # open data
    dat = fits.open(datloc)[1].data

    # extract filters, fluxes, errors for object
    obj_idx = (dat['CATAID'] == int(objname))
    filters = np.array([f[:-5] for f in dat.dtype.names if f[-4:] == 'flux'])
    flux = np.squeeze([dat[obj_idx][f+'_flux'] for f in filters])
    unc = np.squeeze([dat[obj_idx][f+'_fluxerr'] for f in filters])

    ### define photometric mask, convert from Jy to maggies
    phot_mask = (flux != -999.0)
    maggies = flux / 3631
    maggies_unc = unc / 3631

    ### implement error floor
    maggies_unc = np.clip(maggies_unc, maggies*err_floor, np.inf)

    ### mask anything touching or bluewards of Ly-a
    zred = dat['Z'][obj_idx]
    fnames = [trans[f] for f in filters]
    ofilters = observate.load_filters(fnames)

    wavemax = np.array([f.wavelength[f.transmission > (f.transmission.max()*0.1)].max() for f in ofilters]) / (1+zred)
    wavemin = np.array([f.wavelength[f.transmission > (f.transmission.max()*0.1)].min() for f in ofilters]) / (1+zred)
    filtered = [1230]
    for f in filtered: phot_mask[(wavemax > f) & (wavemin < f)] = False
    phot_mask[wavemin < 1200] = False

    ### build output dictionary
    obs = {}
    obs['filters'] = ofilters
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] =  maggies_unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs
예제 #47
0
def load_obs(mock_key=1, **extras):
    """ let's rock
    """

    # open photometric data to get filters
    dat = fits.open(run_params['datloc'])[1].data
    gama_fnames = np.array([f[:-5] for f in dat.dtype.names if f[-4:] == 'flux'])
    fnames = [trans[f] for f in gama_fnames]
    filters = observate.load_filters(fnames)
    phot_mask = np.ones_like(filters,dtype=bool)

    obs = {}
    obs['filters'] = filters
    obs['phot_mask'] = phot_mask
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    # generate mock parameters
    masses,agebins,names = set_masses(mock_key)
    model = load_model(agebins=agebins,**extras)
    itheta = model.initial_theta.copy()
    itheta[model.theta_index['mass']] = masses

    # determine S/N
    idx = np.in1d(dat['CATAID'],names.astype(np.int32))
    sn = []
    for f in gama_fnames: sn += [np.percentile(dat[f+'_flux'][idx]/dat[f+'_fluxerr'][idx],95)]
    sn = np.clip(sn,-np.inf,100)

    # generate photometry
    sps = load_sps(**extras)
    spec,maggies,sm = model.mean_model(itheta, obs, sps=sps)
    maggies_unc = maggies/sn
    obs['phot_mask'][sn == 1] = 0 # mask anything where S/N == 1 (this means median is nondetection!)

    ### build output dictionary
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['maggies'] = maggies
    obs['maggies_unc'] =  maggies_unc
    obs['spec_true'] = spec
    obs['lam_true'] = sps.wavelengths

    return obs
def load_obs(snr=10, **extras):

    # what are our mock parameters?
    params = {
        'logmass': 10,
        'logzsol': -1.5,
        'dust2': 0.05,
        'logtau': 1.0,
        'tage': 8
    }
    for p in params.keys():
        params[p] = np.atleast_1d(params[p])

    # will need model and sps object to generate photometry
    mod = load_model(**run_params)
    sps = load_sps(**run_params)

    # we will also need filters
    obs = {}
    filters = [
        'sdss_u0', 'sdss_g0', 'sdss_r0', 'sdss_i0', 'twomass_J', 'twomass_H',
        'twomass_Ks'
    ]
    obs['filters'] = observate.load_filters(filters)
    obs['wavelength'] = None

    # generate the photometry, add noise
    mod.params.update(params)
    spec, maggies, _ = mod.mean_model(mod.theta, obs, sps=sps)
    maggies_unc = (maggies / snr).squeeze()
    phot_mask = np.ones_like(maggies, dtype=bool)

    # finishing building obs dictionary
    obs['wave_effective'] = np.array(
        [filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] = maggies_unc
    obs['spectrum'] = None
    obs['true_params'] = params
    obs['true_spec'] = spec

    return obs
예제 #49
0
def load_obs(snr=10.0, add_noise=True, **kwargs):
    """Make a mock dataset.  Feel free to add more complicated kwargs, and put
    other things in the run_params dictionary to control how the mock is
    generated.
    """
    # We'll put the mock data in this dictionary, just as we would for real
    # data.  But we need to know which filters (and wavelengths if doing
    # spectroscopy) with which to generate mock data.
    mock = {}
    mock['wavelength'] = None # No spectrum
    filterset = galex + sdss + spitzer[:2] # only warm spitzer
    mock['filters'] = load_filters(filterset)

    # We need the models to make a mock
    sps = load_sps(**kwargs)
    mod = load_model(**kwargs)

    # Now we get the mock params from the kwargs dict
    params = {}
    for p in mod.params.keys():
        if p in kwargs:
            params[p] = np.atleast_1d(kwargs[p])

    # And build the mock
    mod.params.update(params)
    spec, phot, _ = mod.mean_model(mod.theta, mock, sps=sps)
    # Now store some output
    mock['true_spectrum'] = spec.copy()
    mock['true_maggies'] = phot.copy()
    mock['mock_params'] = deepcopy(mod.params)
    # And add noise
    pnoise_sigma = phot / snr
    pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
    if add_noise:
        mock['maggies'] = phot + pnoise
    else:
        mock['maggies'] = phot.copy()
    mock['maggies_unc'] = pnoise_sigma
    mock['mock_snr'] = snr
    mock['phot_mask'] = np.ones(len(phot), dtype=bool)

    return mock
예제 #50
0
def load_galaxy_for_prospector(galaxy, filter_selection):
    all_filters = get_filters(filter_selection)
    valid_filters = [
        f for f in all_filters if filter_has_valid_data(f, galaxy)
    ]
    if filter_selection == 'reliable' and len(valid_filters) != 12:
        raise ValueError(
            'Some reliable bands are missing - only got {}'.format(
                valid_filters))
    if filter_selection == 'euclid' and len(valid_filters) != 8:
        raise ValueError(
            'Needs 8 valid Euclid bands - only got {}'.format(valid_filters))
    logging.debug('valid filters: {}'.format(valid_filters))

    maggies, maggies_unc = load_maggies_to_array(galaxy, valid_filters)
    # Instantiate the `Filter()` objects using methods in `sedpy`
    # prospector needs these
    filters = observate.load_filters([f.bandpass_file for f in valid_filters])

    return filters, maggies, maggies_unc
예제 #51
0
def get_mags(wave,spectrum,z,lumdist,formed_mass,filterlist):
    
    # filter parameters
    filters = load_filters(filterlist)

    # redshift offset the spectrum
    a = 1+z
    wa, sa = wave*a, spectrum*a

    # get the absolute magnitudes
    mags = getSED(wa, lightspeed/wa**2 * sa * to_cgs, filters)
    phot = np.atleast_1d(10**(-0.4 * mags))
    
    # get the observed magnitudes
    dfactor = (lumdist*1e5)**2
    phot /= dfactor
    phot *= formed_mass
    phot = mgy_to_mag(phot)
    
    return phot
예제 #52
0
def load_obs(objname=None, datfile=None, **extras):

    # Read data from json
    with open(datfile,'r') as f:
        data = json.load(f)
    
    # pick out photometry information
    mag,err,fnames = [],[],[]
    for band in data[objname]['photometry']:
        fnames += [band['telescope'] + '_' + band['band']]
        mag += [float(band['mag'])]
        err += [float(band['err'])]

    # convert to maggies
    maggies = 10**((-2./5)*np.array(mag))
    maggies_unc = np.array(err)*maggies/1.086

    # photometric error floor (models aren't good to better than 5% or so anyway)
    maggies_unc = np.clip(maggies_unc, maggies*0.05, np.inf)

    # create mask, used to mask any bad photometry in the fit
    # here we don't know what "bad" photometry is, so it's a mask of ones.
    phot_mask = np.ones_like(maggies,dtype=bool)

    # translate photometric names into sedpy standards
    # note if we have any photometry redwards of rest-frame ~2um,
    # we need to turn on dust emission manually!
    filters = [translate[f] for f in fnames]

    ### build output dictionary
    obs = {}
    obs['filters'] = observate.load_filters(filters)
    obs['wave_effective'] = np.array([filt.wave_effective for filt in obs['filters']])
    obs['phot_mask'] = phot_mask
    obs['maggies'] = maggies
    obs['maggies_unc'] =  maggies_unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    return obs
예제 #53
0
파일: plotsed.py 프로젝트: bd-j/sedpy
def test():
    from sedpy import observate
    import fsps
    import matplotlib.pyplot as pl

    filters = ['galex_NUV', 'sdss_u0', 'sdss_r0', 'sdss_r0', 'sdss_i0', 'sdss_z0',
               'bessell_U', 'bessell_B', 'bessell_V', 'bessell_R', 'bessell_I',
               'twomass_J','twomass_H']
    flist = observate.load_filters(filters)

    sps = fsps.StellarPopulation(compute_vega_mags=False)
    wave, spec = sps.get_spectrum(tage=1.0, zmet=2, peraa=True)

    sed = observate.getSED(wave, spec, flist)
    sed_unc = np.abs(np.random.normal(1, 0.3, len(sed)))

    wgrid = np.linspace( 2e3, 13e3, 1000)
    fgrid = np.linspace( -13, -9, 100)
    psed, sedpoints = sed_to_psed(flist, sed, sed_unc, wgrid, fgrid)

    pl.imshow(np.exp(psed).T, cmap='Greys_r',
              interpolation='nearest', origin ='upper', aspect='auto')
예제 #54
0
def median_by_band(x,y,avg=False):

    ##### get filter effective wavelengths for sorting
    delz = 0.15
    from brownseds_np_params import translate_filters
    from sedpy import observate
    filtnames = np.array(translate_filters(0,full_list=True))
    filts = observate.load_filters(filtnames[filtnames != 'nan'])
    wave_effective = np.array([filt.wave_effective for filt in filts])/1e4
    wave_effective.sort()

    avglam = np.array([])
    outval = np.array([])
    for lam in wave_effective:
        in_bounds = (x <= lam) & (x > lam/(1+delz))
        avglam = np.append(avglam, np.mean(x[in_bounds]))
        if avg == False:
            outval = np.append(outval, np.median(y[in_bounds]))
        else:
            outval = np.append(outval, np.mean(y[in_bounds]))

    return avglam, outval
예제 #55
0
def load_obs(objname=None, noisefactor=1.0, calibrated=False,
             mask=True, broaden_obs=False, wlo=3750., whi=7500.,
             **extras):

    assert objname == 'M67'
    
    bmag = 7.5 # hack
    obs = {}
    dat = np.loadtxt(os.path.join(sdir, 'data/m67_nobs.dat'))
    obs['wavelength'] = observate.vac2air(dat[:, 0])
    obs['spectrum'] = dat[:, 1]
    obs['unc'] = dat[:, 2]
    
    obs['filters'] = observate.load_filters(['sdss_g0'])
    obs['maggies'] = np.array([10**(-0.4 * bmag)])
    obs['maggies_unc'] = 0.05 * obs['maggies']

    # mask
    obs['mask'] = (obs['wavelength'] > wlo) & (obs['wavelength'] < whi)
    #adjust uncertainties
    obs['unc'] *= noisefactor
    obs['noisefactor'] = noisefactor
    obs['spec_calibrated'] = calibrated
    return obs
예제 #56
0
def build_obs(filterlist=["sdss_r0"], snr=10,
              add_noise=True, seed=0, **run_params):
    """Build a mock observation
    """
    from sedpy import observate
    filters = observate.load_filters(filterlist)

    mock = {"wavelength": None, "spectrum": None, "filters": filters}
    
    # Build the mock model
    sp = build_sps(**run_params)
    mod = build_model(**run_params)
    spec, phot, x = mod.mean_model(mod.theta, mock, sps=sp)

    # Add to dict with uncertainties
    pnoise_sigma = phot / snr
    mock['maggies'] = phot.copy()
    mock['maggies_unc'] = pnoise_sigma

    # And add noise
    if add_noise:
        if int(seed) > 0:
            np.random.seed(int(seed))
        pnoise = np.random.normal(0, 1, len(phot)) * pnoise_sigma
        mock['maggies'] += pnoise

    # Ancillary info
    mock['true_spectrum'] = spec.copy()
    mock['true_maggies'] = phot.copy()
    mock['mock_params'] = deepcopy(mod.params)
    mock['mock_snr_phot'] = snr    
    mock['phot_wave'] = np.array([f.wave_effective for f in mock['filters']])

    obs = mock

    return obs
def load_obs(photname='', extinctname='', herschname='', objname='', **extras):
    """
    let's do this
    """
    obs ={}

    # load photometry
    hdulist = fits.open(photname)

    # find object
    if objname is not None:
        idx = hdulist[1].data['Name'] == objname
    else:
        idx = np.ones(len(hdulist[1].data['Name']),dtype=bool)
    
    # extract fluxes+uncertainties for all objects
    mag_fields = [f for f in hdulist[1].columns.names if (f[0:2] != 'e_') and (f != 'Name')]
    magunc_fields = [f for f in hdulist[1].columns.names if f[0:2] == 'e_']

    # extract fluxes for particular object
    mag = np.array([np.squeeze(hdulist[1].data[f][idx]) for f in mag_fields])
    magunc  = np.array([np.squeeze(hdulist[1].data[f][idx]) for f in magunc_fields])

    # extinctions
    extinct = fits.open(extinctname)
    extinctions = np.array([np.squeeze(extinct[1].data[f][idx]) for f in extinct[1].columns.names if f != 'Name'])

    # adjust fluxes for extinction
    # then convert to maggies
    mag_adj = mag - extinctions
    flux = 10**((-2./5)*mag_adj)

    # convert uncertainty to maggies
    unc = magunc*flux/1.086

    #### Herschel photometry
    herschel = fits.open(herschname)
    
    # find interesting fields
    hflux_fields = [f for f in herschel[1].columns.names if (('pacs' in f) or ('spire' in f)) and f[-3:] != 'unc']
    hunc_fields = [f for f in herschel[1].columns.names if (('pacs' in f) or ('spire' in f)) and f[-3:] == 'unc']

    # different versions if objname is passed or no
    if objname is not None:
        match = herschel[1].data['Name'] == objname.lower().replace(' ','')
        
        hflux = np.array([np.squeeze(herschel[1].data[match][hflux_fields[i]]) for i in xrange(len(hflux_fields))])
        hunc = np.array([np.squeeze(herschel[1].data[match][f]) for f in hunc_fields])
    else:
        optnames = hdulist[1].data['Name']
        hnames   = herschel[1].data['Name']

        # non-pythonic, i know, but why change if it works?
        hflux,hunc = np.zeros(shape=(len(hflux_fields),len(hnames))), np.zeros(shape=(len(hflux_fields),len(hnames)))
        for ii in xrange(len(optnames)):
            match = hnames == optnames[ii].lower().replace(' ','')
            for kk in xrange(len(hflux_fields)):
                hflux[kk,ii] = herschel[1].data[match][hflux_fields[kk]]
                hunc[kk,ii]  = herschel[1].data[match][hunc_fields[kk]]

    #### combine with brown catalog
    # convert from Jy to maggies
    flux = np.concatenate((flux,hflux/3631.))   
    unc = np.concatenate((unc, hunc/3631.))
    mag_fields = np.append(mag_fields,hflux_fields)   

    # phot mask
    phot_mask_brown = mag != 0
    phot_mask_hersch = hflux != 0
    phot_mask = np.concatenate((phot_mask_brown,phot_mask_hersch))

    # map brown filters to FSPS filters
    # and remove fields where we don't have filter definitions
    filters,fsps_filters = translate_filters(mag_fields)
    have_definition = np.array(filters) != 'nan'

    filters = filters[have_definition]
    fsps_filters = fsps_filters[have_definition]
    flux = flux[have_definition]
    unc = unc[have_definition]
    phot_mask = phot_mask[have_definition]

    # load wave_effective
    from translate_filter import calc_lameff_for_fsps
    wave_effective = calc_lameff_for_fsps(filters)

    # error floors
    # split at 4.2 micron observed frame
    #high = wave_effective > 4.2e4
    #low = wave_effective <= 4.2e4
    #if high[0] != -1:
    #    unc[high] = np.clip(unc[high], flux[high]*0.08, np.inf)
    #if low[0] != -1:
    #     unc[low] = np.clip(unc[low], flux[low]*0.02, np.inf)
    unc = np.clip(unc, flux*0.05, np.inf)

    # build output dictionary
    obs['wave_effective'] = wave_effective
    obs['filters'] = observate.load_filters(fsps_filters)
    obs['phot_mask'] = phot_mask
    obs['maggies'] = flux
    obs['maggies_unc'] =  unc
    obs['wavelength'] = None
    obs['spectrum'] = None
    obs['logify_spectrum'] = False

    if objname is None:
        obs['hnames'] = herschel[1].data['Name']
        obs['names'] = hdulist[1].data['Name']

    # tidy up
    hdulist.close()
    extinct.close()
    herschel.close()

    return obs
예제 #58
0
#Example hrdspy usage.  determine dispersion in integrated spectrum
#as a function of wavelength for a given mass and age.

import matplotlib.pyplot as pl
import time, pickle
import numpy as np
import starmodel, isochrone, cluster, imf
from sedpy import observate, attenuation

# choose a few filters and load them
filterlist = observate.load_filters(['bessell_B','bessell_R'])

# instantiate and load the isochrones and spectra first so you
# don't have to do it for each cluster realization
isoc = isochrone.Padova2007()
isoc.load_all_isoc()
speclib = starmodel.BaSeL3()
speclib.read_all_Z()
IMF = imf.SalpeterIMF(mlo=[0.1], mhi=[100.], alpha=[2.35])

A_v = 0.0
wave = speclib.wavelength
nw = wave.shape[0]

hdrtxt = ("A list of [header, wave, spectrum, stellar_masses] for {0} realizations "
          "of a cluster with M_*={1} M_sun of stars drawn from a Salpeter IMF (0.1, 100) "
          "at logt={2} yrs and metallicity Z={3} with A_v={4}.  Spectrum is "
          "a {0} x {5} array while stellar masses is a {0} element list of lists, "
          "where each sublist gives the stellar masses above {6} M_sun for one realization.")
nametemplate = "stochastic_lib/salp_stoch{0}_logM{1:3.1f}_logt{2:4.2f}.p"