コード例 #1
0
ファイル: qframe.py プロジェクト: desihub/desispec
    def asframe(self,wavelength=None) :
        """
        Converts QFrame to a Frame 

        """

        if wavelength is None :
            dwave=np.min(np.gradient(self.wave[self.nspec//2]))
            wmin=np.max(self.wave[:,0])
            wmax=np.min(self.wave[:,-1])
            n=int((wmax-wmin)/dwave)+1
            wavelength=np.linspace(wmin,wmax,n)
        
        rflux = np.zeros((self.nspec,wavelength.size))
        rivar = np.zeros((self.nspec,wavelength.size))
        if self.mask is None :
            for i in range(self.nspec) :
                rflux[i],rivar[i] = resample_flux(wavelength,self.wave[i],self.flux[i],self.ivar[i],extrapolate=False)
        else :
            for i in range(self.nspec) :
                rflux[i],rivar[i] = resample_flux(wavelength,self.wave[i],self.flux[i],self.ivar[i]*(self.mask[i]==0),extrapolate=False)
        
            
        return Frame(wave=wavelength,flux=rflux,ivar=rivar,mask=None,resolution_data=None,\
                     fibers=self.fibers, spectrograph=None, meta=self.meta, fibermap=self.fibermap,\
                     chi2pix=None,scores=None,scores_comments=None,\
                     wsigma=self.sigma,ndiag=1, suppress_res_warning=True)
コード例 #2
0
ファイル: mycoaddcam.py プロジェクト: segasai/prospect
def mycoaddcam(spectra) :
    """"
    Merges brz spectra into a single (wave,flux)
      takes into account noise and mis-matched wavelengths over the 3 arms
    Currently assumes b r z bands and two overlap regions
    """
    
    if np.all([ band in spectra.bands for band in ['b','r','z'] ]) :
    
        # Define (arbitrarily) wavelength grid
        margin = 20 # Angstrom. Avoids using edge-of-band at overlap regions
        wave = spectra.wave['b'].copy()
        wave = wave[ (wave<np.max(wave)-margin) ]
        tolerance = 0.0001
        length_bands = {'b' : wave.size}
        w_bands = {'b' : np.arange(wave.size)}
        for band in ['r','z'] :
            if band=='z' : w_bands[band], = np.where( spectra.wave[band]>wave[-1]+tolerance )
            else : w_bands[band], = np.where( (spectra.wave[band]>wave[-1]+tolerance)
                                      & (spectra.wave[band]<np.max(spectra.wave[band])-margin) )
            wave=np.append(wave,spectra.wave[band][w_bands[band]])
            length_bands[band] = w_bands[band].size

        nwave = wave.size
        nspec = spectra.num_spectra()
        flux = np.zeros((nspec,nwave),dtype=spectra.flux['b'].dtype)
        ivar = np.zeros((nspec,nwave),dtype=spectra.ivar['b'].dtype)

        # Flux in non-overlapping waves
        i = 0
        for band in ['b', 'r', 'z'] :
            flux[:,i:i+length_bands[band]] = spectra.flux[band][:,w_bands[band]]
            ivar[:,i:i+length_bands[band]] = spectra.ivar[band][:,w_bands[band]]
            i += length_bands[band]

        # Overlapping regions
        overlaps = ['br','rz']
        for the_overlap in overlaps :
            b1, b2 = the_overlap[0], the_overlap[1]
            w_overlap, = np.where( (wave > spectra.wave[b2][0]) & (wave < spectra.wave[b1][-1]) )
            assert (w_overlap.size > 0)
            lambd_over = wave[w_overlap]
            for ispec in range(nspec) :
                phi1, ivar1 = resample_flux(lambd_over, spectra.wave[b1], spectra.flux[b1][ispec,:], ivar=spectra.ivar[b1][ispec,:])
                phi2, ivar2 = resample_flux(lambd_over, spectra.wave[b2], spectra.flux[b2][ispec,:], ivar=spectra.ivar[b2][ispec,:])
                ivar[ispec,w_overlap] = ivar1+ivar2
                w_ok = np.where( ivar[ispec,w_overlap] > 0)
                flux[ispec,w_overlap] = (phi1+phi2)/2
                flux[ispec,w_overlap][w_ok] = (ivar1[w_ok]*phi1[w_ok] + ivar2[w_ok]*phi2[w_ok])/ivar[ispec,w_overlap][w_ok]
    
    elif spectra.bands == ['brz'] :
        wave = spectra.wave['brz']
        flux = spectra.flux['brz']
        ivar = spectra.ivar['brz']
    else :
        raise RunTimeError("mycoaddcam: set of bands for spectra not supported")
    
    
    return (wave, flux, ivar)
コード例 #3
0
def make_template_dicts(redrock_cat,
                        delta_lambd_templates=3,
                        with_fit_templates=True,
                        template_dir=None):
    """
    Input : TODO document
    - redrock_cat : Table produced by match_redrock_zfit_to_spectra (matches spectra).
    Create list of CDS including all data needed to plot other models (Nth best fit, std templates) :
    - list of templates used in fits
    - RR output for Nth best fits
    - list of std templates
    """

    assert _redrock_imported
    assert _desispec_imported  # for resample_flux
    rr_templts = load_redrock_templates(template_dir=template_dir)

    if with_fit_templates:
        dict_fit_templates = dict()
        for key, val in rr_templts.items():
            fulltype_key = "_".join(key)
            wave_array = np.arange(val.wave[0], val.wave[-1],
                                   delta_lambd_templates)
            flux_array = np.zeros((val.flux.shape[0], len(wave_array)))
            for i in range(val.flux.shape[0]):
                flux_array[i, :] = resample_flux(wave_array, val.wave,
                                                 val.flux[i, :])
            dict_fit_templates["wave_" + fulltype_key] = wave_array
            dict_fit_templates["flux_" + fulltype_key] = flux_array
    else:
        dict_fit_templates = None

    dict_fit_results = dict()
    for key in redrock_cat.keys():
        dict_fit_results[key] = np.asarray(redrock_cat[key])
    dict_fit_results['Nfit'] = redrock_cat['Z'].shape[1]

    # TODO fix the list of std templates
    # We take flux[0,:] : ie use first entry in RR template basis
    # We choose here not to convolve with a "typical" resolution (could easily be done)
    # Std template : corresponding RR template . TODO put this list somewhere else
    std_templates = {
        'QSO': ('QSO', ''),
        'GALAXY': ('GALAXY', ''),
        'STAR': ('STAR', 'F')
    }
    dict_std_templates = dict()
    for key, rr_key in std_templates.items():
        wave_array = np.arange(rr_templts[rr_key].wave[0],
                               rr_templts[rr_key].wave[-1],
                               delta_lambd_templates)
        flux_array = resample_flux(wave_array, rr_templts[rr_key].wave,
                                   rr_templts[rr_key].flux[0, :])
        dict_std_templates["wave_" + key] = wave_array
        dict_std_templates["flux_" + key] = flux_array

    return [dict_fit_templates, dict_fit_results, dict_std_templates]
コード例 #4
0
ファイル: Lya_deltas_lib.py プロジェクト: arkel1/lya-deltas
def get_PCA_model(QSOlist, niter, nvec, lmin=1040, lmax=1600):
    wwave = sp.arange(lmin, lmax, .1)
    nbObj = len(QSOlist)
    # nbObj = 20
    pcaflux = sp.zeros((nbObj, wwave.size))
    pcaivar = sp.zeros((nbObj, wwave.size))

    for nspectra in range(0, nbObj):
        pcaflux[nspectra], pcaivar[nspectra] = resample_flux(
            wwave, QSOlist[nspectra].w, QSOlist[nspectra].flux,
            QSOlist[nspectra].ivar)  # interpolation

    pcaivar[pcaivar < 0.] = 0.  # Remove if all measured bins are zero
    w = sp.sum(pcaivar, axis=0) > 0.
    pcawave = wwave[w]
    pcaflux = pcaflux[:, w]
    pcaivar = pcaivar[:, w]
    ### Cap the ivar
    pcaivar[pcaivar > 100.] = 100.

    ### Get the mean
    data_meanspec = sp.average(pcaflux, weights=pcaivar, axis=0)
    for i in range(nbObj):
        w = pcaivar[i] > 0.  # subtracting the mean for each spectrum
        pcaflux[i, w] -= data_meanspec[w]  #
    ### PCA
    print('Starting EMPCA: ')
    dmodel = empca.empca(pcaflux, weights=pcaivar, niter=niter, nvec=nvec)
    return dmodel, pcawave, pcaflux, pcaivar, data_meanspec
コード例 #5
0
ファイル: Lya_deltas_lib.py プロジェクト: arkel1/lya-deltas
def get_PCA_deltas(QSOlist,
                   pcawave,
                   pcamcont,
                   pcamcontstd,
                   pcacontin_mock,
                   lamin=1040,
                   lamax=1200):
    deltas = []
    for i in range(len(QSOlist)):  # len(QSOlist)
        dwave = QSOlist[i].w
        wmask = (dwave >= lamin) & (dwave <= lamax)
        dwave = dwave[wmask]
        flux = QSOlist[i].flux
        flux = flux[wmask]  # orig flux
        ivr = QSOlist[i].ivar
        ivr = ivr[wmask]

        # pcawave, pcacont from cont. fitting with PCA to QSO delta ticks
        # pcacont from pcawave to dwave
        cont = resample_flux(dwave, pcawave,
                             pcacontin_mock[i])  # continuum to dwave grid
        # ivar=pcamcontstd
        delta = flux / cont - 1

        s = np.vstack((dwave.conj().transpose(), delta.conj().transpose(),
                       ivr.conj().transpose(), cont.conj().transpose()))
        QSOlist[i].delta.w = dwave * (1 + QSOlist[i].z
                                      )  # restframe to selframe
        QSOlist[i].delta.delta = delta - np.sum(delta) / len(
            delta)  # zero Centered delta
        QSOlist[i].delta.cont = cont
        QSOlist[i].delta.ivar = ivr

    return QSOlist
コード例 #6
0
ファイル: Lya_deltas_lib.py プロジェクト: arkel1/lya-deltas
def blindQSO(qso):
    global lol_, l_, Zmz_, Z_, stack

    #print('\t\t QSO ', j, ' of ', len(catalog), '.' )
    # Z QSO ap shift
    Za = qso.z
    Z_rebin = np.interp(Za, Z_, Zmz_)
    qso.z = Za + Z_rebin

    # QSO forest ap shift with interval conservation
    l = qso.w * (1 + Za)
    lol_rebin = resample_flux(l, l_, lol_)

    flux = (qso.flux) * qso.get_mcont(qso.w) * stack(l)

    #dx = (l[1]-l[0])
    #f = ( lol_rebin - 1 )
    #A = np.sum(f)*dx
    #lol_rebin = ( lol_rebin - 1 ) - A/(dx*len(f)) + 1

    l_rebin = lol_rebin * l
    #l_rebin = l_rebin - l_rebin[int(len(l_rebin)/2)] + l[int(len(l)/2)]

    llog = np.log10(l)
    #l2 = l-( l[0]-l_rebin[0] )
    l2 = 10**(np.arange(np.log10(np.min(l_rebin)), np.log10(np.max(l_rebin)),
                        llog[1] - llog[0]))

    flux, ivar = resample_flux(l2, l_rebin, flux, ivar=qso.ivar)
    delta, ivar2 = resample_flux(l2,
                                 l_rebin,
                                 qso.delta.delta,
                                 ivar=qso.delta.ivar)
    cont = resample_flux(l2, l_rebin, qso.delta.cont)

    qso.w = l2 / (1 + qso.z)
    qso.delta.w = l2

    qso.flux = flux
    qso.ivar = ivar

    qso.delta.delta = delta
    qso.delta.ivar = ivar2

    qso.delta.cont = cont

    return qso
コード例 #7
0
ファイル: test_resample.py プロジェクト: desihub/desispec
 def test_edges(self):
     '''Test for large edge effects in resampling'''
     x = np.arange(0.0, 100)
     y = np.sin(x/20)
     xx = np.linspace(1, 99, 23)
     yy = resample_flux(xx, x, y)
     diff = np.abs(yy - np.interp(xx, x, y))
     self.assertLess(np.max(np.abs(diff)), 1e-2)
コード例 #8
0
 def test_edges(self):
     '''Test for large edge effects in resampling'''
     x = np.arange(0.0, 100)
     y = np.sin(x / 20)
     xx = np.linspace(1, 99, 23)
     yy = resample_flux(xx, x, y)
     diff = np.abs(yy - np.interp(xx, x, y))
     self.assertLess(np.max(np.abs(diff)), 1e-2)
コード例 #9
0
ファイル: desi_spectrum.py プロジェクト: iprafols/SQUEzE
def combine_bands(flux_dict, wave_dict, ivar_dict):
    """ Combine the different bands together

    Parameters
    ----------
    flux_dict : dict
    A dictionary with the flux arrays of the different reobserbations.
    Each key will contain an array with the fluxes in a given band.

    wave_dict : dict
    A dictionary with the wavalegth array.
    Each key will contain an array with the fluxes in a given band.

    ivar_dict : dict
    A dictionary with the ivar arrays of the different reobservations.
    Each key will contain an array with the ivars in a given band

    Returns
    -------
    flux : np.array
    Array containing the flux

    wave : np.array
    Array containing the wavelength

    ivar : np.array
    Array containing the inverse variance
    """
    # create empty combined arrays
    min_wave = np.min(np.array([np.min(flux) for flux in wave_dict.values()]))
    max_wave = np.max(np.array([np.max(flux) for flux in wave_dict.values()]))
    wave = np.linspace(min_wave, max_wave, 4000)
    ivar = np.zeros_like(wave, dtype=float)
    flux = np.zeros_like(wave, dtype=float)

    # populate arrays
    for band in flux_dict:
        ivar += resample_flux(wave, wave_dict[band], ivar_dict[band])
        flux += resample_flux(wave, wave_dict[band],
                              ivar_dict[band] * flux_dict[band])
    flux = flux / (ivar + (ivar == 0))

    return flux, wave, ivar
コード例 #10
0
    def _update_data(self, ispec=None):
        '''
        Update the data containers for target number ispec

        If ispec is None, use self.ispec; otherwise set self.ispec = ispec

        updates self.ispec, .izbest, .data, .xdata
        '''
        if ispec is not None:
            self.ispec = ispec

        targetid = self.spectra.fibermap['TARGETID'][self.ispec]
        self.izbest = np.where(self.zbest['TARGETID'] == targetid)[0][0]
        zb = self.zbest[self.izbest]
        tx = self.templates[(zb['SPECTYPE'], zb['SUBTYPE'])]
        coeff = zb['COEFF'][0:tx.nbasis]
        model = tx.flux.T.dot(coeff).T
        for channel in ('b', 'r', 'z'):
            wave = self.spectra.wave[channel]
            flux = self.spectra.flux[channel][self.ispec]
            ivar = self.spectra.ivar[channel][self.ispec]
            xwave = np.arange(wave[0], wave[-1], 3)
            xflux, xivar = resample_flux(xwave,
                                         wave,
                                         flux,
                                         ivar=ivar,
                                         extrapolate=False)
            xmodel = resample_flux(xwave, tx.wave * (1 + zb['Z']), model)
            rmodel = resample_flux(wave, tx.wave * (1 + zb['Z']), model)
            if channel in self.data:
                self.xdata[channel].data['wave'] = xwave
                self.xdata[channel].data['flux'] = xflux
                self.xdata[channel].data['ivar'] = xivar
                self.xdata[channel].data['model'] = xmodel
                self.data[channel].data['wave'] = wave
                self.data[channel].data['flux'] = flux
                self.data[channel].data['ivar'] = ivar
                self.data[channel].data['model'] = rmodel
            else:
                self.data[channel] = ColumnDataSource(
                    dict(wave=wave, flux=flux, ivar=ivar, model=rmodel))
                self.xdata[channel] = ColumnDataSource(
                    dict(wave=xwave, flux=xflux, ivar=xivar, model=xmodel))
コード例 #11
0
 def test_resample(self):
     n = 100
     x = np.arange(n)
     y = np.ones(n)
     # we need in this test to make sure we have the same boundaries of the edges bins
     # to obtain the same flux density on the edges
     # because the resampling routine considers the flux is 0 outside of the input bins
     nout = n // 2
     stepout = n / float(nout)
     xout = np.arange(nout) * stepout + stepout / 2 - 0.5
     yout = resample_flux(xout, x, y)
     self.assertTrue(np.all(yout == 1.0))
コード例 #12
0
ファイル: test_resample.py プロジェクト: desihub/desispec
 def test_resample(self):
     n = 100
     x = np.arange(n)
     y = np.ones(n)
     # we need in this test to make sure we have the same boundaries of the edges bins
     # to obtain the same flux density on the edges
     # because the resampling routine considers the flux is 0 outside of the input bins
     nout = n//2
     stepout = n/float(nout)
     xout = np.arange(nout)*stepout+stepout/2-0.5 
     yout = resample_flux(xout, x, y, extrapolate=True)
     self.assertTrue(np.all(yout == 1.0))                
コード例 #13
0
    def test_flux_conservation(self):
        n = 100
        x = np.arange(n)
        y = 1 + np.sin(x / 20.0)
        y[n / 2 + 1] += 10
        # xout must have edges including bin half width equal
        # or larger than input to get the same integrated flux
        xout = np.arange(0, n + 1, 2)
        yout = resample_flux(xout, x, y)

        fluxin = np.sum(y * np.gradient(x))
        fluxout = np.sum(yout * np.gradient(xout))
        self.assertAlmostEqual(fluxin, fluxout)
コード例 #14
0
ファイル: test_resample.py プロジェクト: profxj/desispec
 def test_flux_conservation(self):
     n = 100
     x = np.arange(n)
     y = 1+np.sin(x/20.0)
     y[n/2+1] += 10
     # xout must have edges including bin half width equal
     # or larger than input to get the same integrated flux
     xout = np.arange(0,n+1,2)
     yout = resample_flux(xout, x, y)
     
     fluxin = np.sum(y*np.gradient(x))
     fluxout = np.sum(yout*np.gradient(xout))
     self.assertAlmostEqual(fluxin, fluxout)
コード例 #15
0
ファイル: qframe.py プロジェクト: sdss/lvmspec
    def asframe(self, wavelength=None):
        """
        Converts QFrame to a Frame 

        """

        if wavelength is None:
            dwave = np.min(np.gradient(self.wave[self.nspec // 2]))
            wmin = np.max(self.wave[:, 0])
            wmax = np.min(self.wave[:, -1])
            n = int((wmax - wmin) / dwave) + 1
            wavelength = np.linspace(wmin, wmax, n)

        rflux = np.zeros((self.nspec, wavelength.size))
        rivar = np.zeros((self.nspec, wavelength.size))
        if self.mask is None:
            for i in range(self.nspec):
                rflux[i], rivar[i] = resample_flux(wavelength,
                                                   self.wave[i],
                                                   self.flux[i],
                                                   self.ivar[i],
                                                   extrapolate=False)
        else:
            for i in range(self.nspec):
                rflux[i], rivar[i] = resample_flux(wavelength,
                                                   self.wave[i],
                                                   self.flux[i],
                                                   self.ivar[i] *
                                                   (self.mask[i] == 0),
                                                   extrapolate=False)


        return Frame(wave=wavelength,flux=rflux,ivar=rivar,mask=None,resolution_data=None,\
                     fibers=self.fibers, spectrograph=None, meta=self.meta, fibermap=self.fibermap,\
                     chi2pix=None,scores=None,scores_comments=None,\
                     wsigma=self.sigma,ndiag=1, suppress_res_warning=True)
コード例 #16
0
ファイル: resample.py プロジェクト: desiTools/desispec_tools
def resample_to_same_wavelength_grid(spectra, ivar, wave) :

    #   Choose the average wavelength of all fibers
    same_wave   = np.mean(wave, axis=0)
    nfibers     = spectra.shape[0]

    #   Declaring output
    resampled_spectra   = np.zeros((nfibers, same_wave.size))
    resampled_ivar      = np.zeros((nfibers, same_wave.size))

    #  Iterating resampling function on each fibers
    for fiber in xrange(nfibers) :
        resampled_spectra[fiber], resampled_ivar[fiber] = resample_flux(same_wave, wave[fiber], spectra[fiber], ivar[fiber])
    
    return (resampled_spectra, resampled_ivar, same_wave)
コード例 #17
0
ファイル: resample.py プロジェクト: CAClaveau/teststand
def resample_to_same_wavelength_grid(spectra, ivar, wave):

    #   Choose the average wavelength of all fibers
    same_wave = np.mean(wave, axis=0)
    nfibers = spectra.shape[0]

    #   Declaring output
    resampled_spectra = np.zeros((nfibers, same_wave.size))
    resampled_ivar = np.zeros((nfibers, same_wave.size))

    #  Iterating resampling function on each fibers
    for fiber in range(nfibers):
        resampled_spectra[fiber], resampled_ivar[fiber] = resample_flux(
            same_wave, wave[fiber], spectra[fiber], ivar[fiber])

    return (resampled_spectra, resampled_ivar, same_wave)
コード例 #18
0
def get_appmags(vs, Fv, filters, printit=False):
    '''
  Given a list of filters, rest wavelengths ls and F_\lambda at those wavelengths, 
  return the apparent magnitude in each filter band.
  '''

    import collections

    mags = collections.OrderedDict()

    ##  Check vs is monotonically increasing.
    assert np.all(np.diff(vs) >= 0.)

    ## Implement eqn. (7) of ./GALAXEV/doc/bc03.pdf
    for i, band in enumerate(filters.keys()):
        filter = filters[band]

        ##  Assume filter vs are derived from ls and are monotonically decreasing,
        ##  in which case they need reversed.
        assert np.all(np.diff(filter['vs']) <= 0.)

        pFv = resample_flux(filter['vs'][::-1],
                            vs,
                            Fv,
                            ivar=None,
                            extrapolate=False)
        pFv = pFv[::-1]

        ## Filters are defined in wavelength; calculate normalisation by eqn. (8) denominator.
        norm = np.trapz(filter['Ts'] / filter['vs'], filter['vs'])

        result = np.trapz(pFv * filter['Ts'] / filter['vs'], filter['vs'])
        result /= norm

        if result == 0.0:
            mags[band] = 99.

        else:
            mags[band] = -2.5 * np.log10(
                result) - 48.60  ##  AB bandpass magnitude.

        if printit:
            print("%s \t %.6f" % (band, mags[band]))

    return mags
コード例 #19
0
ファイル: test_resample.py プロジェクト: desihub/desispec
 def test_non_uniform_grid(self):
     n = 100
     x = np.arange(n)+1.
     y = np.ones(n)
     # we need in this test to make sure we have the same boundaries of the edges bins
     # to obtain the same flux density on the edges
     # because the resampling routine considers the flux is 0 outside of the input bins
     # we consider here a logarithmic output grid
     nout = n//2
     lstepout = (log(x[-1])-log(x[0]))/float(nout)
     xout = np.exp(np.arange(nout)*lstepout)-0.5
     xout[0]  = x[0]-0.5+(xout[1]-xout[0])/2 # same edge of first bin
     offset   =  x[-1]+0.5-(xout[-1]-xout[-2])/2 - xout[-1]
     xout[-2:] += offset # same edge of last bin
     
     yout = resample_flux(xout, x, y,extrapolate=True)
     zero = np.max(np.abs(yout-1))
     self.assertAlmostEqual(zero,0.)
コード例 #20
0
ファイル: test_resample.py プロジェクト: gdhungana/desispec
 def test_weighted_resample(self):
     n = 100
     x = np.arange(n)
     y = 1+np.sin(x/20.0)        
     y[n/2+1] += 10
     ivar = np.ones(n)
     for rebin in (2, 3, 5):
         xout = np.arange(0,n+1,rebin)
         yout, ivout = resample_flux(xout, x, y, ivar)
         self.assertEqual(len(xout), len(yout))
         self.assertEqual(len(xout), len(ivout))
         # we have to compare the variance of ouput bins that
         # are fully contained in input
         self.assertAlmostEqual(ivout[ivout.size/2], ivar[ivar.size/2]*rebin)
         # check sum of weights is conserved 
         ivar_in  = np.sum(ivar)
         ivar_out = np.sum(ivout)
         self.assertAlmostEqual(ivar_in,ivar_out)
コード例 #21
0
def doublet_obs(z,
                twave,
                wave,
                res,
                continuum=0.0,
                sigmav=5.,
                r=0.1,
                linea=3726.032,
                lineb=3728.815):
    _, tflux = doublet(z=z,
                       twave=twave,
                       sigmav=sigmav,
                       r=r,
                       linea=linea,
                       lineb=lineb)
    tflux = resample_flux(wave, twave, tflux)

    return res.dot(tflux)
コード例 #22
0
ファイル: test_resample.py プロジェクト: gdhungana/desispec
 def test_non_uniform_grid(self):
     n = 100
     x = np.arange(n)+1.
     y = np.ones(n)
     # we need in this test to make sure we have the same boundaries of the edges bins
     # to obtain the same flux density on the edges
     # because the resampling routine considers the flux is 0 outside of the input bins
     # we consider here a logarithmic output grid
     nout = n/2
     lstepout = (log(x[-1])-log(x[0]))/float(nout)
     xout = np.exp(np.arange(nout)*lstepout)-0.5
     xout[0]  = x[0]-0.5+(xout[1]-xout[0])/2 # same edge of first bin
     offset   =  x[-1]+0.5-(xout[-1]-xout[-2])/2 - xout[-1]
     xout[-2:] += offset # same edge of last bin
     
     yout = resample_flux(xout, x, y)
     
     self.assertTrue(np.all(yout == 1.0))                
コード例 #23
0
ファイル: test_resample.py プロジェクト: desihub/desispec
 def test_weighted_resample(self):
     n = 100
     x = np.arange(n)
     y = 1+np.sin(x/20.0)        
     y[n//2+1] += 10
     ivar = np.ones(n)
     for rebin in (2, 3, 5):
         xout = np.arange(0,n+1,rebin)
         yout, ivout = resample_flux(xout, x, y, ivar)
         self.assertEqual(len(xout), len(yout))
         self.assertEqual(len(xout), len(ivout))
         # we have to compare the variance of ouput bins that
         # are fully contained in input
         self.assertAlmostEqual(ivout[ivout.size//2], ivar[ivar.size//2]*rebin)
         # check sum of weights is conserved 
         ivar_in  = np.sum(ivar)
         ivar_out = np.sum(ivout)
         self.assertAlmostEqual(ivar_in,ivar_out)
コード例 #24
0
ファイル: app_mags.py プロジェクト: michaelJwilson/LBGCMB
def get_appmags(vs, Fv, filters, printit=False):
    '''
  Given a list of filters, rest wavelengths ls and F_\lambda at those wavelengths, 
  return the apparent magnitude in each filter band.
  '''

    import collections

    mags = collections.OrderedDict()

    ##  Implement eqn. (7) of ./GALAXEV/doc/bc03.pdf
    for i, band in enumerate(filters.keys()):
        filter = filters[band]

        ##  Note:  Filters extend in wavelength much further than their transmission.
        ##  assert vs.max() >= filter['vs'].max()
        ##  assert vs.min() <= filter['vs'].min()

        pFv = resample_flux(filter['vs'][::-1],
                            vs[::-1],
                            Fv[::-1],
                            ivar=None,
                            extrapolate=False)
        pFv = pFv[::-1]

        ##  Filters are defined in wavelength;  Calculate normalisation by eqn. (8) denominator.
        norm = np.trapz(filter['Ts'], filter['vs'])  ##  / filter['vs']
        result = np.trapz(pFv * filter['Ts'], filter['vs'])  ##  / filter['vs']

        result /= norm

        if result == 0.0:
            mags[band] = 99.

        else:
            mags[band] = -2.5 * np.log10(
                result) - 48.60  ##  AB bandpass magnitude.

        if printit:
            print("%s \t %.6le \t %.6lf" % (band, result, mags[band]))

    return mags
コード例 #25
0
def lephare_madau(rwl, z):
    '''
    Input:   Rest wavelength, redshift. 
    Output:  Return the madau extragalactic extinction (by neutral hydrogen) 
             curve shipped with Le Phare.
    '''

    if z > 8.0:
        raise ValueError('Le Phare Madau correction is not available for z > 8.0.')

    ##  http://www.cfht.hawaii.edu/~arnouts/LEPHARE/download.html
    root   = os.environ['BEAST']
    dat    = np.loadtxt(root + '/gal_maker/Madau/tau{0:02d}.out'.format(np.int(10 * z)))

    lext   = np.arange(0.0,    18., 1.)
    hext   = np.arange(1216., 1.e4, 1.)



    result = resample_flux(rwl, np.concatenate([lext, dat[:,0], hext]), np.concatenate([dat[0,1] * np.ones_like(lext), dat[:,1], np.ones_like(hext)]), extrapolate=True)

    return  result
コード例 #26
0
def fast_resample_spectra(spectra, wave):
    """
    Fast resampling of spectra file.
    The output resolution = Id. The neighboring 
    flux bins are correlated.

    Args:
       spectra: desispec.spectra.Spectra object
       wave: 1D numy array with new wavelenght grid

    Returns:
       desispec.spectra.Spectra object, resolution data=Id
    """

    log = get_logger()
    log.debug("Resampling to wave grid: {}".format(wave))

    nwave = wave.size
    b = spectra._bands[0]
    ntarget = spectra.flux[b].shape[0]
    nres = spectra.resolution_data[b].shape[1]
    ivar = np.zeros((ntarget, nwave), dtype=spectra.flux[b].dtype)
    flux = np.zeros((ntarget, nwave), dtype=spectra.ivar[b].dtype)
    if spectra.mask is not None:
        mask = np.zeros((ntarget, nwave), dtype=spectra.mask[b].dtype)
    else:
        mask = None
    rdata = np.ones((ntarget, 1, nwave),
                    dtype=spectra.resolution_data[b].dtype
                    )  # pointless for this resampling
    bands = ""
    for b in spectra._bands:
        if spectra.mask is not None:
            tivar = spectra.ivar[b] * (spectra.mask[b] == 0)
        else:
            tivar = spectra.ivar[b]
        for i in range(ntarget):
            ivar[i] += resample_flux(wave, spectra.wave[b], tivar[i])
            flux[i] += resample_flux(wave, spectra.wave[b],
                                     tivar[i] * spectra.flux[b][i])
        bands += b
    for i in range(ntarget):
        ok = (ivar[i] > 0)
        flux[i, ok] /= ivar[i, ok]
    if spectra.mask is not None:
        dmask = {
            bands: mask,
        }
    else:
        dmask = None
    res = Spectra(bands=[
        bands,
    ],
                  wave={
                      bands: wave,
                  },
                  flux={
                      bands: flux,
                  },
                  ivar={
                      bands: ivar,
                  },
                  mask=dmask,
                  resolution_data={
                      bands: rdata,
                  },
                  fibermap=spectra.fibermap,
                  meta=spectra.meta,
                  extra=spectra.extra,
                  scores=spectra.scores)
    return res
コード例 #27
0
def create_model(spectra,
                 zbest,
                 archetype_fit=False,
                 archetypes_dir=None,
                 template_dir=None):
    '''
    Returns model_wave[nwave], model_flux[nspec, nwave], row matched to zbest,
    which can be in a different order than spectra.
    - zbest must be entry-matched to spectra.
    '''

    assert _redrock_imported
    assert _desispec_imported  # for resample_flux

    if np.any(zbest['TARGETID'] != spectra.fibermap['TARGETID']):
        raise ValueError(
            'zcatalog and spectra do not match (different targetids)')

    if archetype_fit:
        archetypes = All_archetypes(archetypes_dir=archetypes_dir).archetypes
    else:
        templates = load_redrock_templates(template_dir=template_dir)

    #- Empty model flux arrays per band to fill
    model_flux = dict()
    for band in spectra.bands:
        model_flux[band] = np.zeros(spectra.flux[band].shape)

    for i in range(len(zbest)):
        zb = zbest[i]

        if archetype_fit:
            archetype = archetypes[zb['SPECTYPE']]
            coeff = zb['COEFF']

            for band in spectra.bands:
                wave = spectra.wave[band]
                wavehash = hash((len(wave), wave[0], wave[1], wave[-2],
                                 wave[-1], spectra.R[band].data.shape[0]))
                dwave = {wavehash: wave}
                mx = archetype.eval(zb['SUBTYPE'], dwave, coeff, wave,
                                    zb['Z']) * (1 + zb['Z'])
                model_flux[band][i] = spectra.R[band][i].dot(mx)

        else:
            tx = templates[(zb['SPECTYPE'], zb['SUBTYPE'])]
            coeff = zb['COEFF'][0:tx.nbasis]
            model = tx.flux.T.dot(coeff).T

            for band in spectra.bands:
                mx = resample_flux(spectra.wave[band], tx.wave * (1 + zb['Z']),
                                   model)
                model_flux[band][i] = spectra.R[band][i].dot(mx)

    #- Now combine, if needed, to a single wavelength grid across all cameras
    if spectra.bands == ['brz']:
        model_wave = spectra.wave['brz']
        mflux = model_flux['brz']

    elif np.all([band in spectra.bands for band in ['b', 'r', 'z']]):
        br_split = 0.5 * (spectra.wave['b'][-1] + spectra.wave['r'][0])
        rz_split = 0.5 * (spectra.wave['r'][-1] + spectra.wave['z'][0])
        keep = dict()
        keep['b'] = (spectra.wave['b'] < br_split)
        keep['r'] = (br_split <= spectra.wave['r']) & (spectra.wave['r'] <
                                                       rz_split)
        keep['z'] = (rz_split <= spectra.wave['z'])
        model_wave = np.concatenate([
            spectra.wave['b'][keep['b']],
            spectra.wave['r'][keep['r']],
            spectra.wave['z'][keep['z']],
        ])
        mflux = np.concatenate([
            model_flux['b'][:, keep['b']],
            model_flux['r'][:, keep['r']],
            model_flux['z'][:, keep['z']],
        ],
                               axis=1)
    else:
        raise RuntimeError(
            "create_model: Set of bands for spectra not supported")

    return model_wave, mflux
コード例 #28
0
ファイル: quickquasars.py プロジェクト: desihub/desisim
def simulate_one_healpix(ifilename,args,model,obsconditions,decam_and_wise_filters,
                         bassmzls_and_wise_filters,footprint_healpix_weight,
                         footprint_healpix_nside,
                         bal=None,sfdmap=None,eboss=None) :
    log = get_logger()

    # open filename and extract basic HEALPix information
    pixel, nside, hpxnest = get_healpix_info(ifilename)

    # using global seed (could be None) get seed for this particular pixel
    global_seed = args.seed
    seed = get_pixel_seed(pixel, nside, global_seed)
    # use this seed to generate future random numbers
    np.random.seed(seed)

    # get output file (we will write there spectra for this HEALPix pixel)
    ofilename = get_spectra_filename(args,nside,pixel)
    # get directory name (we will also write there zbest file)
    pixdir = os.path.dirname(ofilename)

    # get filename for truth file
    truth_filename = get_truth_filename(args,pixdir,nside,pixel)

    # get filename for zbest file
    zbest_filename = get_zbest_filename(args,pixdir,nside,pixel)

    if not args.overwrite :
        # check whether output exists or not
        if args.zbest :
            if os.path.isfile(ofilename) and os.path.isfile(zbest_filename) :
                log.info("skip existing {} and {}".format(ofilename,zbest_filename))
                return
        else : # only test spectra file
            if os.path.isfile(ofilename) :
                log.info("skip existing {}".format(ofilename))
                return

    # create sub-directories if required
    if len(pixdir)>0 :
        if not os.path.isdir(pixdir) :
            log.info("Creating dir {}".format(pixdir))
            os.makedirs(pixdir)

    log.info("Read skewers in {}, random seed = {}".format(ifilename,seed))

    # Read transmission from files. It might include DLA information, and it
    # might add metal transmission as well (from the HDU file).
    log.info("Read transmission file {}".format(ifilename))
    trans_wave, transmission, metadata, dla_info = read_lya_skewers(ifilename,read_dlas=(args.dla=='file'),add_metals=args.metals_from_file)

    ### Add Finger-of-God, before generate the continua
    log.info("Add FOG to redshift with sigma {} to quasar redshift".format(args.sigma_kms_fog))
    DZ_FOG = args.sigma_kms_fog/c*(1.+metadata['Z'])*np.random.normal(0,1,metadata['Z'].size)
    metadata['Z'] += DZ_FOG

    ### Select quasar within a given redshift range
    w = (metadata['Z']>=args.zmin) & (metadata['Z']<=args.zmax)
    transmission = transmission[w]
    metadata = metadata[:][w]
    DZ_FOG = DZ_FOG[w]

    # option to make for BOSS+eBOSS
    if not eboss is None:
        if args.downsampling or args.desi_footprint:
            raise ValueError("eboss option can not be run with "
                    +"desi_footprint or downsampling")

        # Get the redshift distribution from SDSS
        selection = sdss_subsample_redshift(metadata["RA"],metadata["DEC"],metadata['Z'],eboss['redshift'])
        log.info("Select QSOs in BOSS+eBOSS redshift distribution {} -> {}".format(metadata['Z'].size,selection.sum()))
        if selection.sum()==0:
            log.warning("No intersection with BOSS+eBOSS redshift distribution")
            return
        transmission = transmission[selection]
        metadata = metadata[:][selection]
        DZ_FOG = DZ_FOG[selection]

        # figure out the density of all quasars
        N_highz = metadata['Z'].size
        # area of healpix pixel, in degrees
        area_deg2 = healpy.pixelfunc.nside2pixarea(nside,degrees=True)
        input_highz_dens_deg2 = N_highz/area_deg2
        selection = sdss_subsample(metadata["RA"], metadata["DEC"],
                        input_highz_dens_deg2,eboss['footprint'])
        log.info("Select QSOs in BOSS+eBOSS footprint {} -> {}".format(transmission.shape[0],selection.size))
        if selection.size == 0 :
            log.warning("No intersection with BOSS+eBOSS footprint")
            return
        transmission = transmission[selection]
        metadata = metadata[:][selection]
        DZ_FOG = DZ_FOG[selection]

    if args.desi_footprint :
        footprint_healpix = footprint.radec2pix(footprint_healpix_nside, metadata["RA"], metadata["DEC"])
        selection = np.where(footprint_healpix_weight[footprint_healpix]>0.99)[0]
        log.info("Select QSOs in DESI footprint {} -> {}".format(transmission.shape[0],selection.size))
        if selection.size == 0 :
            log.warning("No intersection with DESI footprint")
            return
        transmission = transmission[selection]
        metadata = metadata[:][selection]
        DZ_FOG = DZ_FOG[selection]



    nqso=transmission.shape[0]
    if args.downsampling is not None :
        if args.downsampling <= 0 or  args.downsampling > 1 :
           log.error("Down sampling fraction={} must be between 0 and 1".format(args.downsampling))
           raise ValueError("Down sampling fraction={} must be between 0 and 1".format(args.downsampling))
        indices = np.where(np.random.uniform(size=nqso)<args.downsampling)[0]
        if indices.size == 0 :
            log.warning("Down sampling from {} to 0 (by chance I presume)".format(nqso))
            return
        transmission = transmission[indices]
        metadata = metadata[:][indices]
        DZ_FOG = DZ_FOG[indices]
        nqso = transmission.shape[0]

    if args.nmax is not None :
        if args.nmax < nqso :
            log.info("Limit number of QSOs from {} to nmax={} (random subsample)".format(nqso,args.nmax))
            # take a random subsample
            indices = (np.random.uniform(size=args.nmax)*nqso).astype(int)
            transmission = transmission[indices]
            metadata = metadata[:][indices]
            DZ_FOG = DZ_FOG[indices]
            nqso = args.nmax

    # In previous versions of the London mocks we needed to enforce F=1 for
    # z > z_qso here, but this is not needed anymore. Moreover, now we also
    # have metal absorption that implies F < 1 for z > z_qso
    #for ii in range(len(metadata)):
    #    transmission[ii][trans_wave>lambda_RF_LYA*(metadata[ii]['Z']+1)]=1.0

    # if requested, add DLA to the transmission skewers
    if args.dla is not None :

        # if adding random DLAs, we will need a new random generator
        if args.dla=='random':
            log.info('Adding DLAs randomly')
            random_state_just_for_dlas = np.random.RandomState(seed)
        elif args.dla=='file':
            log.info('Adding DLAs from transmission file')
        else:
            log.error("Wrong option for args.dla: "+args.dla)
            sys.exit(1)

        # if adding DLAs, the information will be printed here
        dla_filename=os.path.join(pixdir,"dla-{}-{}.fits".format(nside,pixel))
        dla_NHI, dla_z, dla_qid,dla_id = [], [], [],[]

        # identify minimum Lya redshift in transmission files
        min_lya_z = np.min(trans_wave/lambda_RF_LYA - 1)

        # loop over quasars in pixel

        for ii in range(len(metadata)):

            # quasars with z < min_z will not have any DLA in spectrum
            if min_lya_z>metadata['Z'][ii]: continue

            # quasar ID
            idd=metadata['MOCKID'][ii]
            dlas=[]

            if args.dla=='file':
                for dla in dla_info[dla_info['MOCKID']==idd]:

                    # Adding only DLAs with z < zqso
                    if dla['Z_DLA_RSD']>=metadata['Z'][ii]: continue
                    dlas.append(dict(z=dla['Z_DLA_RSD'],N=dla['N_HI_DLA'],dlaid=dla['DLAID']))
                transmission_dla = dla_spec(trans_wave,dlas)

            elif args.dla=='random':
                dlas, transmission_dla = insert_dlas(trans_wave, metadata['Z'][ii], rstate=random_state_just_for_dlas)
                for idla in dlas:
                   idla['dlaid']+=idd*1000      #Added to have unique DLA ids. Same format as DLAs from file.

            # multiply transmissions and store information for the DLA file
            if len(dlas)>0:
                transmission[ii] = transmission_dla * transmission[ii]
                dla_z += [idla['z'] for idla in dlas]
                dla_NHI += [idla['N'] for idla in dlas]
                dla_id += [idla['dlaid'] for idla in dlas]
                dla_qid += [idd]*len(dlas)
        log.info('Added {} DLAs'.format(len(dla_id)))
        # write file with DLA information
        if len(dla_id)>0:
            dla_meta=Table()
            dla_meta['NHI'] = dla_NHI
            dla_meta['Z_DLA'] = dla_z  #This is Z_DLA_RSD in transmision.
            dla_meta['TARGETID']=dla_qid
            dla_meta['DLAID'] = dla_id
            hdu_dla = pyfits.convenience.table_to_hdu(dla_meta)
            hdu_dla.name="DLA_META"
            del(dla_meta)
            log.info("DLA metadata to be saved in {}".format(truth_filename))
        else:
            hdu_dla=pyfits.PrimaryHDU()
            hdu_dla.name="DLA_META"

    # if requested, extend transmission skewers to cover full spectrum
    if args.target_selection or args.bbflux :
        wanted_min_wave = 3329. # needed to compute magnitudes for decam2014-r (one could have trimmed the transmission file ...)
        wanted_max_wave = 55501. # needed to compute magnitudes for wise2010-W2

        if trans_wave[0]>wanted_min_wave :
            log.info("Increase wavelength range from {}:{} to {}:{} to compute magnitudes".format(int(trans_wave[0]),int(trans_wave[-1]),int(wanted_min_wave),int(trans_wave[-1])))
            # pad with ones at short wavelength, we assume F = 1 for z <~ 1.7
            # we don't need any wavelength resolution here
            new_trans_wave = np.append([wanted_min_wave,trans_wave[0]-0.01],trans_wave)
            new_transmission = np.ones((transmission.shape[0],new_trans_wave.size))
            new_transmission[:,2:] = transmission
            trans_wave   = new_trans_wave
            transmission = new_transmission

        if trans_wave[-1]<wanted_max_wave :
            log.info("Increase wavelength range from {}:{} to {}:{} to compute magnitudes".format(int(trans_wave[0]),int(trans_wave[-1]),int(trans_wave[0]),int(wanted_max_wave)))
            # pad with ones at long wavelength because we assume F = 1
            coarse_dwave = 2. # we don't care about resolution, we just need a decent QSO spectrum, there is no IGM transmission in this range
            n = int((wanted_max_wave-trans_wave[-1])/coarse_dwave)+1
            new_trans_wave = np.append(trans_wave,np.linspace(trans_wave[-1]+coarse_dwave,trans_wave[-1]+coarse_dwave*(n+1),n))
            new_transmission = np.ones((transmission.shape[0],new_trans_wave.size))
            new_transmission[:,:trans_wave.size] = transmission
            trans_wave   = new_trans_wave
            transmission = new_transmission

    # whether to use QSO or SIMQSO to generate quasar continua.  Simulate
    # spectra in the north vs south separately because they're on different
    # photometric systems.
    south = np.where( is_south(metadata['DEC']) )[0]
    north = np.where( ~is_south(metadata['DEC']) )[0]
    meta, qsometa = empty_metatable(nqso, objtype='QSO', simqso=not args.no_simqso)
    if args.no_simqso:
        log.info("Simulate {} QSOs with QSO templates".format(nqso))
        tmp_qso_flux = np.zeros([nqso, len(model.eigenwave)], dtype='f4')
        tmp_qso_wave = np.zeros_like(tmp_qso_flux)
    else:
        log.info("Simulate {} QSOs with SIMQSO templates".format(nqso))
        tmp_qso_flux = np.zeros([nqso, len(model.basewave)], dtype='f4')
        tmp_qso_wave = model.basewave

    for these, issouth in zip( (north, south), (False, True) ):

        # number of quasars in these
        nt = len(these)
        if nt<=0: continue

        if not eboss is None:
            # for eBOSS, generate only quasars with r<22
            magrange = (17.0, 21.3)
            _tmp_qso_flux, _tmp_qso_wave, _meta, _qsometa \
                = model.make_templates(nmodel=nt,
                    redshift=metadata['Z'][these], magrange=magrange,
                    lyaforest=False, nocolorcuts=True,
                    noresample=True, seed=seed, south=issouth)
        else:
            _tmp_qso_flux, _tmp_qso_wave, _meta, _qsometa \
                = model.make_templates(nmodel=nt,
                    redshift=metadata['Z'][these],
                    lyaforest=False, nocolorcuts=True,
                    noresample=True, seed=seed, south=issouth)

        _meta['TARGETID'] = metadata['MOCKID'][these]
        _qsometa['TARGETID'] = metadata['MOCKID'][these]
        meta[these] = _meta
        qsometa[these] = _qsometa
        tmp_qso_flux[these, :] = _tmp_qso_flux

        if args.no_simqso:
            tmp_qso_wave[these, :] = _tmp_qso_wave

    log.info("Resample to transmission wavelength grid")
    qso_flux=np.zeros((tmp_qso_flux.shape[0],trans_wave.size))
    if args.no_simqso:
        for q in range(tmp_qso_flux.shape[0]) :
            qso_flux[q]=np.interp(trans_wave,tmp_qso_wave[q],tmp_qso_flux[q])
    else:
        for q in range(tmp_qso_flux.shape[0]) :
            qso_flux[q]=np.interp(trans_wave,tmp_qso_wave,tmp_qso_flux[q])

    tmp_qso_flux = qso_flux
    tmp_qso_wave = trans_wave

    # if requested, add BAL features to the quasar continua
    if args.balprob:
        if args.balprob<=1. and args.balprob >0:
            log.info("Adding BALs with probability {}".format(args.balprob))
            # save current random state
            rnd_state = np.random.get_state()
            tmp_qso_flux,meta_bal=bal.insert_bals(tmp_qso_wave,tmp_qso_flux, metadata['Z'],
                                                  balprob=args.balprob,seed=seed)
            # restore random state to get the same random numbers later
            # as when we don't insert BALs
            np.random.set_state(rnd_state)
            meta_bal['TARGETID'] = metadata['MOCKID']
            w = meta_bal['TEMPLATEID']!=-1
            meta_bal = meta_bal[:][w]
            hdu_bal=pyfits.convenience.table_to_hdu(meta_bal); hdu_bal.name="BAL_META"
            del meta_bal
        else:
            balstr=str(args.balprob)
            log.error("BAL probability is not between 0 and 1 : "+balstr)
            sys.exit(1)

    # Multiply quasar continua by transmitted flux fraction
    # (at this point transmission file might include Ly-beta, metals and DLAs)
    log.info("Apply transmitted flux fraction")
    if not args.no_transmission:
        tmp_qso_flux = apply_lya_transmission(tmp_qso_wave,tmp_qso_flux,
                            trans_wave,transmission)

    # if requested, compute metal transmission on the fly
    # (if not included already from the transmission file)
    if args.metals is not None:
        if args.metals_from_file:
            log.error('you cannot add metals twice')
            raise ValueError('you cannot add metals twice')
        if args.no_transmission:
            log.error('you cannot add metals if asking for no-transmission')
            raise ValueError('can not add metals if using no-transmission')
        lstMetals = ''
        for m in args.metals: lstMetals += m+', '
        log.info("Apply metals: {}".format(lstMetals[:-2]))

        tmp_qso_flux = apply_metals_transmission(tmp_qso_wave,tmp_qso_flux,
                            trans_wave,transmission,args.metals)

    # if requested, compute magnitudes and apply target selection.  Need to do
    # this calculation separately for QSOs in the north vs south.
    bbflux=None
    if args.target_selection or args.bbflux :
        bands=['FLUX_G','FLUX_R','FLUX_Z', 'FLUX_W1', 'FLUX_W2']
        bbflux=dict()
        bbflux['SOUTH'] = is_south(metadata['DEC'])
        for band in bands:
            bbflux[band] = np.zeros(nqso)
        # need to recompute the magnitudes to account for lya transmission
        log.info("Compute QSO magnitudes")

        for these, filters in zip( (~bbflux['SOUTH'], bbflux['SOUTH']),
                                   (bassmzls_and_wise_filters, decam_and_wise_filters) ):
            if np.count_nonzero(these) > 0:
                maggies = filters.get_ab_maggies(1e-17 * tmp_qso_flux[these, :], tmp_qso_wave)
                for band, filt in zip( bands, maggies.colnames ):
                    bbflux[band][these] = np.ma.getdata(1e9 * maggies[filt]) # nanomaggies

    if args.target_selection :
        log.info("Apply target selection")
        isqso = np.ones(nqso, dtype=bool)
        for these, issouth in zip( (~bbflux['SOUTH'], bbflux['SOUTH']), (False, True) ):
            if np.count_nonzero(these) > 0:
                # optical cuts only if using QSO vs SIMQSO
                isqso[these] &= isQSO_colors(gflux=bbflux['FLUX_G'][these],
                                             rflux=bbflux['FLUX_R'][these],
                                             zflux=bbflux['FLUX_Z'][these],
                                             w1flux=bbflux['FLUX_W1'][these],
                                             w2flux=bbflux['FLUX_W2'][these],
                                             south=issouth, optical=args.no_simqso)

        log.info("Target selection: {}/{} QSOs selected".format(np.sum(isqso),nqso))
        selection=np.where(isqso)[0]
        if selection.size==0 : return
        tmp_qso_flux = tmp_qso_flux[selection]
        metadata     = metadata[:][selection]
        meta         = meta[:][selection]
        qsometa      = qsometa[:][selection]
        DZ_FOG      = DZ_FOG[selection]

        for band in bands :
            bbflux[band] = bbflux[band][selection]
        nqso         = selection.size

    log.info("Resample to a linear wavelength grid (needed by DESI sim.)")
    # careful integration of bins, not just a simple interpolation
    qso_wave=np.linspace(args.wmin,args.wmax,int((args.wmax-args.wmin)/args.dwave)+1)
    qso_flux=np.zeros((tmp_qso_flux.shape[0],qso_wave.size))
    for q in range(tmp_qso_flux.shape[0]) :
        qso_flux[q]=resample_flux(qso_wave,tmp_qso_wave,tmp_qso_flux[q])

    log.info("Simulate DESI observation and write output file")
    if "MOCKID" in metadata.dtype.names :
        #log.warning("Using MOCKID as TARGETID")
        targetid=np.array(metadata["MOCKID"]).astype(int)
    elif "ID" in metadata.dtype.names :
        log.warning("Using ID as TARGETID")
        targetid=np.array(metadata["ID"]).astype(int)
    else :
        log.warning("No TARGETID")
        targetid=None

    specmeta={"HPXNSIDE":nside,"HPXPIXEL":pixel, "HPXNEST":hpxnest}

    if args.target_selection or args.bbflux :
        fibermap_columns = dict(
            FLUX_G = bbflux['FLUX_G'],
            FLUX_R = bbflux['FLUX_R'],
            FLUX_Z = bbflux['FLUX_Z'],
            FLUX_W1 = bbflux['FLUX_W1'],
            FLUX_W2 = bbflux['FLUX_W2'],
            )
        photsys = np.full(len(bbflux['FLUX_G']), 'N', dtype='S1')
        photsys[bbflux['SOUTH']] = b'S'
        fibermap_columns['PHOTSYS'] = photsys
    else :
        fibermap_columns=None

    # Attenuate the spectra for extinction
    if not sfdmap is None:
       Rv=3.1   #set by default
       indx=np.arange(metadata['RA'].size)
       extinction =Rv*ext_odonnell(qso_wave)
       EBV = sfdmap.ebv(metadata['RA'],metadata['DEC'], scaling=1.0)
       qso_flux *=10**( -0.4 * EBV[indx, np.newaxis] * extinction)
       if fibermap_columns is not None:
          fibermap_columns['EBV']=EBV
       EBV0=0.0
       EBV_med=np.median(EBV)
       Ag = 3.303 * (EBV_med - EBV0)
       exptime_fact=np.power(10.0, (2.0 * Ag / 2.5))
       obsconditions['EXPTIME']*=exptime_fact
       log.info("Dust extinction added")
       log.info('exposure time adjusted to {}'.format(obsconditions['EXPTIME']))

    sim_spectra(qso_wave,qso_flux, args.program, obsconditions=obsconditions,spectra_filename=ofilename,
                sourcetype="qso", skyerr=args.skyerr,ra=metadata["RA"],dec=metadata["DEC"],targetid=targetid,
                meta=specmeta,seed=seed,fibermap_columns=fibermap_columns,use_poisson=False) # use Poisson = False to get reproducible results.

    ### Keep input redshift
    Z_spec = metadata['Z'].copy()
    Z_input = metadata['Z'].copy()-DZ_FOG

    ### Add a shift to the redshift, simulating the systematic imprecision of redrock
    DZ_sys_shift = args.shift_kms_los/c*(1.+Z_input)
    log.info('Added a shift of {} km/s to the redshift'.format(args.shift_kms_los))
    meta['REDSHIFT'] += DZ_sys_shift
    metadata['Z'] += DZ_sys_shift

    ### Add a shift to the redshift, simulating the statistic imprecision of redrock
    if args.gamma_kms_zfit:
        log.info("Added zfit error with gamma {} to zbest".format(args.gamma_kms_zfit))
        DZ_stat_shift = mod_cauchy(loc=0,scale=args.gamma_kms_zfit,size=nqso,cut=3000)/c*(1.+Z_input)
        meta['REDSHIFT'] += DZ_stat_shift
        metadata['Z'] += DZ_stat_shift

    ## Write the truth file, including metadata for DLAs and BALs
    log.info('Writing a truth file  {}'.format(truth_filename))
    meta.rename_column('REDSHIFT','Z')
    meta.add_column(Column(Z_spec,name='TRUEZ'))
    meta.add_column(Column(Z_input,name='Z_INPUT'))
    meta.add_column(Column(DZ_FOG,name='DZ_FOG'))
    meta.add_column(Column(DZ_sys_shift,name='DZ_SYS'))
    if args.gamma_kms_zfit:
        meta.add_column(Column(DZ_stat_shift,name='DZ_STAT'))
    if 'Z_noRSD' in metadata.dtype.names:
        meta.add_column(Column(metadata['Z_noRSD'],name='Z_NORSD'))
    else:
        log.info('Z_noRSD field not present in transmission file. Z_NORSD not saved to truth file')

    hdu = pyfits.convenience.table_to_hdu(meta)
    hdu.header['EXTNAME'] = 'TRUTH'
    hduqso=pyfits.convenience.table_to_hdu(qsometa)
    hduqso.header['EXTNAME'] = 'QSO_META'
    hdulist=pyfits.HDUList([pyfits.PrimaryHDU(),hdu,hduqso])
    if args.dla:
        hdulist.append(hdu_dla)
    if args.balprob:
        hdulist.append(hdu_bal)
    hdulist.writeto(truth_filename, overwrite=True)
    hdulist.close()




    if args.zbest :
        log.info("Read fibermap")
        fibermap = read_fibermap(ofilename)
        log.info("Writing a zbest file {}".format(zbest_filename))
        columns = [
            ('CHI2', 'f8'),
            ('COEFF', 'f8' , (4,)),
            ('Z', 'f8'),
            ('ZERR', 'f8'),
            ('ZWARN', 'i8'),
            ('SPECTYPE', (str,96)),
            ('SUBTYPE', (str,16)),
            ('TARGETID', 'i8'),
            ('DELTACHI2', 'f8'),
            ('BRICKNAME', (str,8))]
        zbest = Table(np.zeros(nqso, dtype=columns))
        zbest['CHI2'][:] = 0.
        zbest['Z'][:] = metadata['Z']
        zbest['ZERR'][:] = 0.
        zbest['ZWARN'][:] = 0
        zbest['SPECTYPE'][:] = 'QSO'
        zbest['SUBTYPE'][:] = ''
        zbest['TARGETID'][:] = metadata['MOCKID']
        zbest['DELTACHI2'][:] = 25.
        hzbest = pyfits.convenience.table_to_hdu(zbest); hzbest.name='ZBEST'
        hfmap  = pyfits.convenience.table_to_hdu(fibermap);  hfmap.name='FIBERMAP'
        hdulist =pyfits.HDUList([pyfits.PrimaryHDU(),hzbest,hfmap])
        hdulist.writeto(zbest_filename, overwrite=True)
        hdulist.close() # see if this helps with memory issue
コード例 #29
0
ファイル: io.py プロジェクト: gdhungana/desisim
def _resample_flux(args):
    return resample_flux(*args)
コード例 #30
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, zrange=(0.5,4.0), gmagrange=(21.0,23.0),
                       no_colorcuts=False):
        """Build Monte Carlo set of LRG spectra/templates.

        This function chooses random subsets of the LRG continuum spectra and
        finally normalizes the spectrum to a specific z-band magnitude.

        TODO (@moustakas): add a LINER- or AGN-like emission-line spectrum 

        Args:
          zrange (float, optional): Minimum and maximum redshift range.  Defaults
            to a uniform distribution between (0.5,4.0).
          gmagrange (float, optional): Minimum and maximum DECam g-band (AB)
            magnitude range.  Defaults to a uniform distribution between (21,23.0).
          no_colorcuts (bool, optional): Do not apply the fiducial rzW1W2 color-cuts
            cuts (default False) (not yet supported).
        
        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column
        from desisim.io import write_templates
        from desispec.interpolation import resample_flux

        # This is a temporary hack because the QSO basis templates are
        # already in the observed frame.
        keep = np.where(((self.basemeta['Z']>=zrange[0])*1)*
                        ((self.basemeta['Z']<=zrange[1])*1))[0]
        self.baseflux = self.baseflux[keep,:]
        self.basemeta = self.basemeta[keep]

        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        # meta['W1MAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        # meta['W2MAG'] = Column(np.zeros(self.nmodel,dtype='f4'))

        comments = dict(
            TEMPLATEID = 'template ID',
            REDSHIFT = 'object redshift',
            GMAG = 'DECam g-band AB magnitude',
            RMAG = 'DECam r-band AB magnitude',
            ZMAG = 'DECam z-band AB magnitude'
            # W1MAG = 'WISE W1-band AB magnitude',
            # W2MAG = 'WISE W2-band AB magnitude'
        )

        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and g-magnitude distributions.
            # redshift = self.rand.uniform(zrange[0],zrange[1],nchunk)
            gmag = self.rand.uniform(gmagrange[0],gmagrange[1],nchunk)
            zwave = self.basewave # hack!

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                this = np.random.random_integers(0,nbase)
                obsflux = self.baseflux[this,:] # [erg/s/cm2/A]

                gnorm = 10.0**(-0.4*gmag[ii])/self.gfilt.get_maggies(zwave,obsflux)
                flux = obsflux*gnorm # [erg/s/cm2/A, @redshift[ii]]

                # [grzW1W2]flux are in nanomaggies
                gflux = 10.0**(-0.4*(gmag[ii]-22.5))                      
                rflux = self.rfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                # w1flux = self.w1filt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                # w2flux = self.w1filt.get_maggies(zwave,flux)*10**(0.4*22.5) 

                if no_colorcuts:
                    grzW1W2mask = [True]
                else:
                    grzW1W2mask = [True]

                if all(grzW1W2mask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    meta['TEMPLATEID'][nobj] = nobj
                    meta['REDSHIFT'][nobj] = self.basemeta['Z'][this]
                    meta['GMAG'][nobj] = gmag[ii]
                    meta['RMAG'][nobj] = -2.5*np.log10(rflux)+22.5
                    meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                    # meta['W1MAG'][nobj] = -2.5*np.log10(w1flux)+22.5

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break

        return outflux, self.wave, meta
コード例 #31
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, zrange=(0.5,1.1), zmagrange=(19.0,20.5),
                       no_colorcuts=False):
        """Build Monte Carlo set of LRG spectra/templates.

        This function chooses random subsets of the LRG continuum spectra and
        finally normalizes the spectrum to a specific z-band magnitude.

        TODO (@moustakas): add a LINER- or AGN-like emission-line spectrum 

        Args:
          zrange (float, optional): Minimum and maximum redshift range.  Defaults
            to a uniform distribution between (0.5,1.1).
          zmagrange (float, optional): Minimum and maximum DECam z-band (AB)
            magnitude range.  Defaults to a uniform distribution between (19,20.5).
          no_colorcuts (bool, optional): Do not apply the fiducial rzW1 color-cuts
            cuts (default False).
        
        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column
        from desisim.io import write_templates
        from desispec.interpolation import resample_flux

        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['W1MAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMETAL'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['AGE'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['D4000'] = Column(np.zeros(self.nmodel,dtype='f4'))

        meta['AGE'].unit = 'Gyr'

        comments = dict(
            TEMPLATEID = 'template ID',
            REDSHIFT = 'object redshift',
            GMAG = 'DECam g-band AB magnitude',
            RMAG = 'DECam r-band AB magnitude',
            ZMAG = 'DECam z-band AB magnitude',
            W1MAG = 'WISE W1-band AB magnitude',
            ZMETAL = 'stellar metallicity',
            AGE = 'time since the onset of star formation',
            D4000 = '4000-Angstrom break'
        )

        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and z-magnitude distributions.
            redshift = self.rand.uniform(zrange[0],zrange[1],nchunk)
            zmag = self.rand.uniform(zmagrange[0],zmagrange[1],nchunk)

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                zwave = self.basewave*(1.0+redshift[ii])
                restflux = self.baseflux[iobj,:] # [erg/s/cm2/A @10pc]

                znorm = 10.0**(-0.4*zmag[ii])/self.zfilt.get_maggies(zwave,restflux)
                flux = restflux*znorm # [erg/s/cm2/A, @redshift[ii]]

                # [grzW1]flux are in nanomaggies
                zflux = 10.0**(-0.4*(zmag[ii]-22.5))                      
                gflux = self.gfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                rflux = self.rfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                w1flux = self.w1filt.get_maggies(zwave,flux)*10**(0.4*22.5) 

                if no_colorcuts:
                    rzW1mask = [True]
                else:
                    rzW1mask = [Cuts.LRG(rflux=rflux,zflux=zflux,w1flux=w1flux)]

                if all(rzW1mask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    meta['TEMPLATEID'][nobj] = nobj
                    meta['REDSHIFT'][nobj] = redshift[ii]
                    meta['GMAG'][nobj] = -2.5*np.log10(gflux)+22.5
                    meta['RMAG'][nobj] = -2.5*np.log10(rflux)+22.5
                    meta['ZMAG'][nobj] = zmag[ii]
                    meta['W1MAG'][nobj] = -2.5*np.log10(w1flux)+22.5
                    meta['ZMETAL'][nobj] = self.basemeta['ZMETAL'][iobj]
                    meta['AGE'][nobj] = self.basemeta['AGE'][iobj]
                    meta['D4000'][nobj] = self.basemeta['AGE'][iobj]

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break

        return outflux, self.wave, meta
コード例 #32
0
ファイル: desi_zfind.py プロジェクト: profxj/desispec
    #- wave, flux, and ivar for this target; concatenate
    xwave = list()
    xflux = list()
    xivar = list()
    for channel in ('b', 'r', 'z'):
        exp_flux, exp_ivar, resolution, info = brick[channel].get_target(targetid)
        weights = np.sum(exp_ivar, axis=0)
        ii, = np.where(weights > 0)
        xwave.extend(brick[channel].get_wavelength_grid()[ii])
        #- Average multiple exposures on the same wavelength grid for each channel
        xflux.extend(np.average(exp_flux[:,ii], weights=exp_ivar[:,ii], axis=0))
        xivar.extend(weights[ii])
            
    xwave = np.array(xwave)
    xivar = np.array(xivar)
    xflux = np.array(xflux)
            
    ii = np.argsort(xwave)
    flux[i], ivar[i] = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])

#- Do the redshift fit
zf = RedMonsterZfind(wave, flux, ivar)

#- Write some output
if opts.outfile is None:
    opts.outfile = io.findfile('zbest', brickid=opts.brick)

log.info("Writing "+opts.outfile)
io.write_zbest(opts.outfile, opts.brick, targetids, zf, zspec=opts.zspec)
    
コード例 #33
0
ファイル: redmonster.py プロジェクト: samikama/desispec
    def __init__(self,
                 wave,
                 flux,
                 ivar,
                 R=None,
                 dloglam=1e-4,
                 objtype=None,
                 zrange_galaxy=(0.0, 1.6),
                 zrange_qso=(0.0, 3.5),
                 zrange_star=(-0.005, 0.005),
                 group_galaxy=0,
                 group_qso=1,
                 group_star=2,
                 nproc=1,
                 npoly=2):
        """Uses Redmonster to classify and find redshifts.

        See :class:`desispec.zfind.zfind.ZfindBase` class for inputs/outputs.

        optional:
            objtype : list or string of template object types to try
                [ELG, LRG, QSO, GALAXY, STAR]

        TODO: document redmonster specific output variables
        """
        from redmonster.physics.zfinder import ZFinder
        from redmonster.physics.zfitter import ZFitter
        from redmonster.physics.zpicker2 import ZPicker

        log = get_logger()

        #- RedMonster templates don't quite go far enough into the blue,
        #- so chop off some data
        ii, = np.where(wave > 3965)
        wave = wave[ii]
        flux = flux[:, ii].astype(float)
        ivar = ivar[:, ii].astype(float)

        #- Resample inputs to a loglam grid
        start = round(np.log10(wave[0]), 4) + dloglam
        stop = round(np.log10(wave[-1]), 4)

        nwave = int((stop - start) / dloglam)
        loglam = start + np.arange(nwave) * dloglam

        nspec = flux.shape[0]
        self.flux = np.empty((nspec, nwave))
        self.ivar = np.empty((nspec, nwave))

        for i in range(nspec):
            self.flux[i], self.ivar[i] = resample_flux(10**loglam, wave,
                                                       flux[i], ivar[i])

        self.dloglam = dloglam
        self.loglam = loglam
        self.wave = 10**loglam
        self.nwave = nwave
        self.nspec = nspec

        #- Standardize objtype, converting ELG,LRG -> GALAXY, make upper case
        templatetypes = set()
        if objtype is None:
            templatetypes = set(['GALAXY', 'STAR', 'QSO'])
        else:
            if isinstance(objtype, str):
                objtype = [
                    objtype,
                ]

            objtype = [x.upper() for x in objtype]
            for x in objtype:
                if x in ['ELG', 'LRG']:
                    templatetypes.add('GALAXY')
                elif x in ['QSO', 'GALAXY', 'STAR']:
                    templatetypes.add(x)
                else:
                    raise ValueError('Unknown objtype ' + x)

        #- list of (templatename, zmin, zmax) to fix
        self.template_dir = os.getenv('REDMONSTER_TEMPLATES_DIR')
        self.templates = list()
        for x in templatetypes:
            if x == 'GALAXY':
                self.templates.append(
                    ('ndArch-ssp_em_galaxy-v000.fits', zrange_galaxy[0],
                     zrange_galaxy[1], group_galaxy))
            elif x == 'STAR':
                self.templates.append(
                    ('ndArch-spEigenStar-55734.fits', zrange_star[0],
                     zrange_star[1], group_star))
            elif x == 'QSO':
                self.templates.append(('ndArch-QSO-V003.fits', zrange_qso[0],
                                       zrange_qso[1], group_qso))
            else:
                raise ValueError("Bad template type " + x)

        #- Find and refine best redshift per template
        self.zfinders = list()
        self.zfitters = list()

        for template, zmin, zmax, group in self.templates:
            start = time.time()
            zfind = ZFinder(os.path.join(self.template_dir, template),
                            npoly=npoly,
                            zmin=zmin,
                            zmax=zmax,
                            nproc=nproc,
                            group=group)
            zfind.zchi2(self.flux, self.loglam, self.ivar, npixstep=2)
            stop = time.time()
            log.debug(
                "Time to find the redshifts of %d fibers for template %s =%f sec"
                % (self.flux.shape[0], template, stop - start))
            start = time.time()
            zfit = ZFitter(zfind.zchi2arr, zfind.zbase)
            zfit.z_refine2()
            stop = time.time()
            log.debug(
                "Time to refine the redshift fit of %d fibers for template %s =%f sec"
                % (zfit.z.shape[0], template, stop - start))

            for ifiber in range(zfit.z.shape[0]):
                log.debug(
                    "(after z_refine2) fiber #%d %s chi2s=%s zs=%s" %
                    (ifiber, template, zfit.chi2vals[ifiber], zfit.z[ifiber]))

            self.zfinders.append(zfind)
            self.zfitters.append(zfit)

        #- Create wrapper object needed for zpicker
        specobj = _RedMonsterSpecObj(self.wave, self.flux, self.ivar)
        flags = list()
        for i in range(len(self.zfitters)):
            flags.append(self.zfinders[i].zwarning.astype(int) | \
                         self.zfitters[i].zwarning.astype(int))

        #- Zpicker
        self.zpicker = ZPicker(specobj, self.zfinders, self.zfitters, flags)

        #- Fill in outputs
        self.spectype = np.asarray(
            [self.zpicker.type[i][0] for i in range(nspec)])
        self.subtype = np.asarray(["NA" for i in range(nspec)])
        # FIXME:  re-enable subtype writing once we have a sane
        # way to write and read this information to a FITS table
        # column.
        #self.subtype = np.asarray([json.dumps(self.zpicker.subtype[i][0]) for i in range(nspec)])
        self.z = np.array([self.zpicker.z[i][0] for i in range(nspec)])
        self.zerr = np.array([self.zpicker.z_err[i][0] for i in range(nspec)])
        self.zwarn = np.array(
            [int(self.zpicker.zwarning[i]) for i in range(nspec)])
        self.model = self.zpicker.models[:, 0]

        for ifiber in range(self.z.size):
            log.debug("(after zpicker) fiber #%d z=%s" %
                      (ifiber, self.z[ifiber]))
コード例 #34
0
ファイル: shapley_sn.py プロジェクト: michaelJwilson/BEAST
for exp, ax, fname in zip(['BEAST', 'PFS'], axarr, fnames):
    ##
    dat = fits.open(fname)
    tSN2 = 0.0

    for i, ARM in enumerate(['B', 'R', 'Z']):
        wave = dat['%s_WAVELENGTH' % ARM].data
        flux = dat['%s_FLUX' % ARM].data  ##  10^-17 erg/s/cm2/Angstrom.'

        ivar = dat['%s_IVAR' % ARM].data
        sig = 1. / np.sqrt(ivar)

        back = sig[0, :] * u.erg / u.s / u.cm / u.cm / u.AA

        SN2 = resample_flux(wave, swave, sflux)**2. / sig[0, :]**2.
        tSN2 += np.sum(SN2)

        ax.plot(wave / 1.e3,
                SN2,
                c=colors[i],
                label=str(meta['Lya-EW'][nth]) + ' ' +
                str(meta['REDSHIFT'][nth]))

    print(exp, np.sqrt(tSN2))

    ##  ax.set_ylim(0., 0.6)
    ##  ax.set_ylabel(r'$10^{-17}$ erg/$s$/cm$^2$/$\AA$')

    ax.set_axis_on()
コード例 #35
0
ファイル: quickquasars.py プロジェクト: LuzGarciaP/desisim
def simulate_one_healpix(ifilename,
                         args,
                         model,
                         obsconditions,
                         decam_and_wise_filters,
                         footprint_healpix_weight,
                         footprint_healpix_nside,
                         seed,
                         bal=None):
    log = get_logger()

    # set seed now
    # we need a seed per healpix because
    # the spectra simulator REQUIRES a seed
    np.random.seed(seed)

    # read the header of the tranmission file to find the healpix pixel number, nside
    # and if we are lucky the scheme.
    # if this fails, try to guess it from the filename (for backward compatibility)
    healpix = -1
    nside = -1
    hpxnest = True

    hdulist = pyfits.open(ifilename)
    if "METADATA" in hdulist:
        head = hdulist["METADATA"].header
        for k in ["HPXPIXEL", "PIXNUM"]:
            if k in head:
                healpix = int(head[k])
                log.info("healpix={}={}".format(k, healpix))
                break
        for k in ["HPXNSIDE", "NSIDE"]:
            if k in head:
                nside = int(head[k])
                log.info("nside={}={}".format(k, nside))
                break
        for k in ["HPXNEST", "NESTED", "SCHEME"]:
            if k in head:
                if k == "SCHEME":
                    hpxnest = (head[k] == "NEST")
                else:
                    hpxnest = bool(head[k])
                log.info("hpxnest from {} = {}".format(k, hpxnest))
                break
    if healpix >= 0 and nside < 0:
        log.error("Read healpix in header but not nside.")
        raise ValueError("Read healpix in header but not nside.")

    if healpix < 0:
        vals = os.path.basename(ifilename).split(".")[0].split("-")
        if len(vals) < 3:
            log.error("Cannot guess nside and healpix from filename {}".format(
                ifilename))
            raise ValueError(
                "Cannot guess nside and healpix from filename {}".format(
                    ifilename))
        try:
            healpix = int(vals[-1])
            nside = int(vals[-2])
        except ValueError:
            raise ValueError(
                "Cannot guess nside and healpix from filename {}".format(
                    ifilename))
        log.warning(
            "Guessed healpix and nside from filename, assuming the healpix scheme is 'NESTED'"
        )

    zbest_filename = None
    if args.outfile:
        ofilename = args.outfile
    else:
        ofilename = os.path.join(
            args.outdir,
            "{}/{}/spectra-{}-{}.fits".format(healpix // 100, healpix, nside,
                                              healpix))
    pixdir = os.path.dirname(ofilename)

    if args.zbest:
        zbest_filename = os.path.join(
            pixdir, "zbest-{}-{}.fits".format(nside, healpix))

    if not args.overwrite:
        # check whether output exists or not
        if args.zbest:
            if os.path.isfile(ofilename) and os.path.isfile(zbest_filename):
                log.info("skip existing {} and {}".format(
                    ofilename, zbest_filename))
                return
        else:  # only test spectra file
            if os.path.isfile(ofilename):
                log.info("skip existing {}".format(ofilename))
                return

    log.info("Read skewers in {}, random seed = {}".format(ifilename, seed))

    ##ALMA: It reads only the skewers only if there are no DLAs or if they are added randomly.
    if (not args.dla or args.dla == 'random'):
        trans_wave, transmission, metadata = read_lya_skewers(ifilename)
        ok = np.where((metadata['Z'] >= args.zmin)
                      & (metadata['Z'] <= args.zmax))[0]
        transmission = transmission[ok]
        metadata = metadata[:][ok]
##ALMA:Added to read dla_info

    elif (args.dla == 'file'):
        log.info("Read DLA information in {}".format(ifilename))
        trans_wave, transmission, metadata, dla_info = read_lya_skewers(
            ifilename, dla_='TRUE')
        ok = np.where((metadata['Z'] >= args.zmin)
                      & (metadata['Z'] <= args.zmax))[0]
        transmission = transmission[ok]
        metadata = metadata[:][ok]
    else:
        log.error(
            'Not a valid option to add DLAs. Valid options are "random" or "file"'
        )
        sys.exit(1)

    if args.dla:
        dla_NHI, dla_z, dla_id = [], [], []
        dla_filename = os.path.join(pixdir,
                                    "dla-{}-{}.fits".format(nside, healpix))

    if args.desi_footprint:
        footprint_healpix = footprint.radec2pix(footprint_healpix_nside,
                                                metadata["RA"],
                                                metadata["DEC"])
        selection = np.where(
            footprint_healpix_weight[footprint_healpix] > 0.99)[0]
        log.info("Select QSOs in DESI footprint {} -> {}".format(
            transmission.shape[0], selection.size))
        if selection.size == 0:
            log.warning("No intersection with DESI footprint")
            return
        transmission = transmission[selection]
        metadata = metadata[:][selection]

    nqso = transmission.shape[0]
    if args.downsampling is not None:
        if args.downsampling <= 0 or args.downsampling > 1:
            log.error(
                "Down sampling fraction={} must be between 0 and 1".format(
                    args.downsampling))
            raise ValueError(
                "Down sampling fraction={} must be between 0 and 1".format(
                    args.downsampling))
        indices = np.where(np.random.uniform(size=nqso) < args.downsampling)[0]
        if indices.size == 0:
            log.warning(
                "Down sampling from {} to 0 (by chance I presume)".format(
                    nqso))
            return
        transmission = transmission[indices]
        metadata = metadata[:][indices]
        nqso = transmission.shape[0]


##ALMA:added to set transmission to 1 for z>zqso, this can be removed when transmission is corrected.
    for ii in range(len(metadata)):
        transmission[ii][trans_wave > 1215.67 * (metadata[ii]['Z'] + 1)] = 1.0

    if (args.dla == 'file'):
        log.info('Adding DLAs from transmision file')
        min_trans_wave = np.min(trans_wave / 1215.67 - 1)
        for ii in range(len(metadata)):
            if min_trans_wave < metadata[ii]['Z']:
                idd = metadata['MOCKID'][ii]
                dlas = dla_info[dla_info['MOCKID'] == idd]
                dlass = []
                for i in range(len(dlas)):
                    ##Adding only dlas between zqso and 1.95, check again for the next version of London mocks...
                    if (dlas[i]['Z_DLA'] <
                            metadata[ii]['Z']) and (dlas[i]['Z_DLA'] > 1.95):
                        dlass.append(
                            dict(z=dlas[i]['Z_DLA'] + dlas[i]['DZ_DLA'],
                                 N=dlas[i]['N_HI_DLA']))
                if len(dlass) > 0:
                    dla_model = dla_spec(trans_wave, dlass)
                    transmission[ii] = dla_model * transmission[ii]
                    dla_z += [idla['z'] for idla in dlass]
                    dla_NHI += [idla['N'] for idla in dlass]
                    dla_id += [idd] * len(dlass)

    elif (args.dla == 'random'):
        log.info('Adding DLAs randomly')
        min_trans_wave = np.min(trans_wave / 1215.67 - 1)
        for ii in range(len(metadata)):
            if min_trans_wave < metadata[ii]['Z']:
                idd = metadata['MOCKID'][ii]
                dlass, dla_model = insert_dlas(trans_wave, metadata[ii]['Z'])
                if len(dlass) > 0:
                    transmission[ii] = dla_model * transmission[ii]
                    dla_z += [idla['z'] for idla in dlass]
                    dla_NHI += [idla['N'] for idla in dlass]
                    dla_id += [idd] * len(dlass)

    if args.dla:
        if len(dla_id) > 0:
            dla_meta = Table()
            dla_meta['NHI'] = dla_NHI
            dla_meta['z'] = dla_z
            dla_meta['ID'] = dla_id

    if args.nmax is not None:
        if args.nmax < nqso:
            log.info(
                "Limit number of QSOs from {} to nmax={} (random subsample)".
                format(nqso, args.nmax))
            # take a random subsample
            indices = (np.random.uniform(size=args.nmax) * nqso).astype(int)
            transmission = transmission[indices]
            metadata = metadata[:][indices]
            nqso = args.nmax

            if args.dla:
                dla_meta = dla_meta[:][dla_meta['ID'] == metadata['MOCKID']]

    if args.target_selection or args.mags:
        wanted_min_wave = 3329.  # needed to compute magnitudes for decam2014-r (one could have trimmed the transmission file ...)
        wanted_max_wave = 55501.  # needed to compute magnitudes for wise2010-W2

        if trans_wave[0] > wanted_min_wave:
            log.info(
                "Increase wavelength range from {}:{} to {}:{} to compute magnitudes"
                .format(int(trans_wave[0]), int(trans_wave[-1]),
                        int(wanted_min_wave), int(trans_wave[-1])))
            # pad with zeros at short wavelength because we assume transmission = 0
            # and we don't need any wavelength resolution here
            new_trans_wave = np.append([wanted_min_wave, trans_wave[0] - 0.01],
                                       trans_wave)
            new_transmission = np.zeros(
                (transmission.shape[0], new_trans_wave.size))
            new_transmission[:, 2:] = transmission
            trans_wave = new_trans_wave
            transmission = new_transmission

        if trans_wave[-1] < wanted_max_wave:
            log.info(
                "Increase wavelength range from {}:{} to {}:{} to compute magnitudes"
                .format(int(trans_wave[0]), int(trans_wave[-1]),
                        int(trans_wave[0]), int(wanted_max_wave)))
            # pad with ones at long wavelength because we assume transmission = 1
            coarse_dwave = 2.  # we don't care about resolution, we just need a decent QSO spectrum, there is no IGM transmission in this range
            n = int((wanted_max_wave - trans_wave[-1]) / coarse_dwave) + 1
            new_trans_wave = np.append(
                trans_wave,
                np.linspace(trans_wave[-1] + coarse_dwave,
                            trans_wave[-1] + coarse_dwave * (n + 1), n))
            new_transmission = np.ones(
                (transmission.shape[0], new_trans_wave.size))
            new_transmission[:, :trans_wave.size] = transmission
            trans_wave = new_trans_wave
            transmission = new_transmission

    log.info("Simulate {} QSOs".format(nqso))
    tmp_qso_flux, tmp_qso_wave, meta = model.make_templates(
        nmodel=nqso,
        redshift=metadata['Z'],
        lyaforest=False,
        nocolorcuts=True,
        noresample=True,
        seed=seed)

    log.info("Resample to transmission wavelength grid")
    # because we don't want to alter the transmission field with resampling here
    qso_flux = np.zeros((tmp_qso_flux.shape[0], trans_wave.size))
    for q in range(tmp_qso_flux.shape[0]):
        qso_flux[q] = np.interp(trans_wave, tmp_qso_wave, tmp_qso_flux[q])
    tmp_qso_flux = qso_flux
    tmp_qso_wave = trans_wave

    ##To add BALs to be checked by Luz and Jaime
    if (args.balprob):
        if (args.balprob <= 1. and args.balprob > 0):
            log.info("Adding BALs with probability {}".format(args.balprob))
            tmp_qso_flux, meta_bal = bal.insert_bals(tmp_qso_wave,
                                                     tmp_qso_flux,
                                                     metadata['Z'],
                                                     balprob=args.balprob,
                                                     seed=seed)
        else:
            log.error("Probability to add BALs is not between 0 and 1")
            sys.exit(1)

    log.info("Apply lya")
    tmp_qso_flux = apply_lya_transmission(tmp_qso_wave, tmp_qso_flux,
                                          trans_wave, transmission)

    if args.metals is not None:
        lstMetals = ''
        for m in args.metals:
            lstMetals += m + ', '
        log.info("Apply metals: {}".format(lstMetals[:-2]))
        tmp_qso_flux = apply_metals_transmission(tmp_qso_wave, tmp_qso_flux,
                                                 trans_wave, transmission,
                                                 args.metals)

    bbflux = None
    if args.target_selection or args.mags:
        bands = ['FLUX_G', 'FLUX_R', 'FLUX_Z', 'FLUX_W1', 'FLUX_W2']
        bbflux = dict()
        # need to recompute the magnitudes to account for lya transmission
        log.info("Compute QSO magnitudes")
        maggies = decam_and_wise_filters.get_ab_maggies(
            1e-17 * tmp_qso_flux, tmp_qso_wave)
        for band, filt in zip(bands, [
                'decam2014-g', 'decam2014-r', 'decam2014-z', 'wise2010-W1',
                'wise2010-W2'
        ]):

            bbflux[band] = np.ma.getdata(1e9 * maggies[filt])  # nanomaggies

    if args.target_selection:
        log.info("Apply target selection")
        isqso = isQSO_colors(gflux=bbflux['FLUX_G'],
                             rflux=bbflux['FLUX_R'],
                             zflux=bbflux['FLUX_Z'],
                             w1flux=bbflux['FLUX_W1'],
                             w2flux=bbflux['FLUX_W2'])
        log.info("Target selection: {}/{} QSOs selected".format(
            np.sum(isqso), nqso))
        selection = np.where(isqso)[0]
        if selection.size == 0: return
        tmp_qso_flux = tmp_qso_flux[selection]
        metadata = metadata[:][selection]
        meta = meta[:][selection]
        for band in bands:
            bbflux[band] = bbflux[band][selection]
        nqso = selection.size

    log.info("Resample to a linear wavelength grid (needed by DESI sim.)")
    # we need a linear grid. for this resampling we take care of integrating in bins
    # we do not do a simple interpolation
    qso_wave = np.linspace(args.wmin, args.wmax,
                           int((args.wmax - args.wmin) / args.dwave) + 1)
    qso_flux = np.zeros((tmp_qso_flux.shape[0], qso_wave.size))
    for q in range(tmp_qso_flux.shape[0]):
        qso_flux[q] = resample_flux(qso_wave, tmp_qso_wave, tmp_qso_flux[q])

    log.info("Simulate DESI observation and write output file")
    pixdir = os.path.dirname(ofilename)
    if len(pixdir) > 0:
        if not os.path.isdir(pixdir):
            log.info("Creating dir {}".format(pixdir))
            os.makedirs(pixdir)

    if "MOCKID" in metadata.dtype.names:
        #log.warning("Using MOCKID as TARGETID")
        targetid = np.array(metadata["MOCKID"]).astype(int)
    elif "ID" in metadata.dtype.names:
        log.warning("Using ID as TARGETID")
        targetid = np.array(metadata["ID"]).astype(int)
    else:
        log.warning("No TARGETID")
        targetid = None

    meta = {"HPXNSIDE": nside, "HPXPIXEL": healpix, "HPXNEST": hpxnest}

    if args.target_selection or args.mags:
        # today we write mags because that's what is in the fibermap
        mags = np.zeros((qso_flux.shape[0], 5))
        for i, band in enumerate(bands):
            jj = (bbflux[band] > 0)
            mags[jj,
                 i] = 22.5 - 2.5 * np.log10(bbflux[band][jj])  # AB magnitudes
        fibermap_columns = {"MAG": mags}
    else:
        fibermap_columns = None

    sim_spectra(qso_wave,
                qso_flux,
                args.program,
                obsconditions=obsconditions,
                spectra_filename=ofilename,
                sourcetype="qso",
                skyerr=args.skyerr,
                ra=metadata["RA"],
                dec=metadata["DEC"],
                targetid=targetid,
                meta=meta,
                seed=seed,
                fibermap_columns=fibermap_columns)

    if args.zbest:
        log.info("Read fibermap")
        fibermap = read_fibermap(ofilename)

        log.info("Writing a zbest file {}".format(zbest_filename))
        columns = [('CHI2', 'f8'), ('COEFF', 'f8', (4, )), ('Z', 'f8'),
                   ('ZERR', 'f8'), ('ZWARN', 'i8'), ('SPECTYPE', (str, 96)),
                   ('SUBTYPE', (str, 16)), ('TARGETID', 'i8'),
                   ('DELTACHI2', 'f8'), ('BRICKNAME', (str, 8))]
        zbest = Table(np.zeros(nqso, dtype=columns))
        zbest["CHI2"][:] = 0.
        zbest["Z"] = metadata['Z']
        zbest["ZERR"][:] = 0.
        zbest["ZWARN"][:] = 0
        zbest["SPECTYPE"][:] = "QSO"
        zbest["SUBTYPE"][:] = ""
        zbest["TARGETID"] = fibermap["TARGETID"]
        zbest["DELTACHI2"][:] = 25.

        hzbest = pyfits.convenience.table_to_hdu(zbest)
        hzbest.name = "ZBEST"
        hfmap = pyfits.convenience.table_to_hdu(fibermap)
        hfmap.name = "FIBERMAP"

        hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), hzbest, hfmap])
        hdulist.writeto(zbest_filename, clobber=True)
        hdulist.close()  # see if this helps with memory issue

        if args.dla:
            #This will change according to discussion
            log.info("Updating the spectra file to add DLA metadata {}".format(
                ofilename))
            hdudla = pyfits.table_to_hdu(dla_meta)
            hdudla.name = "DLA_META"
            hdul = pyfits.open(ofilename, mode='update')
            hdul.append(hdudla)
            hdul.flush()
            hdul.close()
コード例 #36
0
ファイル: bal.py プロジェクト: desihub/desisim
    def insert_bals(self, qsowave, qsoflux, qsoredshift, balprob=0.12,
                    seed=None, verbose=False):
        """Probabilistically inserts BALs into one or more QSO spectra.

        Args:
            qsowave (numpy.ndarray): observed-frame wavelength array [Angstrom]
            qsoflux (numpy.ndarray): array of observed frame flux values.
            qsoredshift (numpy.array or float): QSO redshift
            balprob (float, optional): Probability that a QSO is a BAL (default
                0.12).  Only used if QSO(balqso=True) at instantiation.
            seed (int, optional): input seed for the random numbers.
            verbose (bool, optional): Be verbose!

        Returns:
            bal_qsoflux (numpy.ndarray): QSO spectrum with the BAL included.
            balmeta (astropy.Table): metadata table for each BAL.

        """
        from desiutil.log import get_logger, DEBUG
        from desispec.interpolation import resample_flux

        if verbose:
            log = get_logger(DEBUG)
        else:
            log = get_logger()

        rand = np.random.RandomState(seed)

        if balprob < 0:
            log.warning('Balprob {} is negative; setting to zero.'.format(balprob))
            balprob = 0.0
        if balprob > 1:
            log.warning('Balprob {} cannot exceed unity; setting to 1.0.'.format(balprob))
            balprob = 1.0

        nqso, nwave = qsoflux.shape
        if len(qsoredshift) != nqso:
            log.fatal('Dimensions of qsoflux and qsoredshift do not agree!')
            raise ValueError
        
        if qsowave.ndim == 2: # desisim.QSO(resample=True) returns a 2D wavelength array
            w_nqso, w_nwave = qsowave.shape
            if w_nwave != nwave or w_nqso != nqso:
                log.fatal('Dimensions of qsoflux and qsowave do not agree!')
                raise ValueError
        else:
            if len(qsowave) != nwave:
                log.fatal('Dimensions of qsoflux and qsowave do not agree!')
                raise ValueError
        
        balmeta = self.empty_balmeta(qsoredshift)

        # Determine which QSO spectrum has BAL(s) and then loop on each. 
        hasbal = rand.random_sample(nqso) < balprob
        ihasbal = np.where(hasbal)[0]

        # Should probably return a BAL metadata table, too.
        if len(ihasbal) == 0:
            return qsoflux, balmeta

        balindx = rand.choice( len(self.balmeta), len(ihasbal) )
        balmeta['TEMPLATEID'][ihasbal] = balindx

        bal_qsoflux = qsoflux.copy()
        if qsowave.ndim == 2:
            for ii, indx in zip( ihasbal, balindx ):
                thisbalflux = resample_flux(qsowave[ii, :], self.balwave*(1 + qsoredshift[ii]),
                                            self.balflux[indx, :], extrapolate=True)
                bal_qsoflux[ii, :] *= thisbalflux
        else:
            for ii, indx in zip( ihasbal, balindx ):
                thisbalflux = resample_flux(qsowave, self.balwave*(1 + qsoredshift[ii]),
                                            self.balflux[indx, :], extrapolate=True)
                bal_qsoflux[ii, :] *= thisbalflux

        return bal_qsoflux, balmeta
コード例 #37
0
ファイル: redmonster.py プロジェクト: timahutchinson/desispec
    def __init__(self, wave, flux, ivar, R=None, dloglam=1e-4, objtype=None,
                 zrange_galaxy=(0.0, 1.6), zrange_qso=(0.0, 3.5), zrange_star=(-0.005, 0.005),nproc=1,npoly=2):
        """Uses Redmonster to classify and find redshifts.

        See :class:`desispec.zfind.zfind.ZfindBase` class for inputs/outputs.

        optional:
            objtype : list or string of template object types to try
                [ELG, LRG, QSO, GALAXY, STAR]

        TODO: document redmonster specific output variables
        """
        from redmonster.physics.zfinder import ZFinder
        from redmonster.physics.zfitter import ZFitter
        from redmonster.physics.zpicker2 import ZPicker
        
        log=get_logger()
        

        #- RedMonster templates don't quite go far enough into the blue,
        #- so chop off some data
        ii, = np.where(wave>3965)
        wave = wave[ii]
        flux = flux[:, ii]
        ivar = ivar[:, ii]

        #- Resample inputs to a loglam grid
        start = round(np.log10(wave[0]), 4)+dloglam
        stop = round(np.log10(wave[-1]), 4)

        nwave = int((stop-start)/dloglam)
        loglam = start + np.arange(nwave)*dloglam

        nspec = flux.shape[0]
        self.flux = np.empty((nspec, nwave))
        self.ivar = np.empty((nspec, nwave))

        for i in range(nspec):
            self.flux[i], self.ivar[i] = resample_flux(10**loglam, wave, flux[i], ivar[i])

        self.dloglam = dloglam
        self.loglam = loglam
        self.wave = 10**loglam
        self.nwave = nwave
        self.nspec = nspec

        #- Standardize objtype, converting ELG,LRG -> GALAXY, make upper case
        templatetypes = set()
        if objtype is None:
            templatetypes = set(['GALAXY', 'STAR', 'QSO'])
        else:
            if isinstance(objtype, str):
                objtype = [objtype,]
                
            objtype = [x.upper() for x in objtype]
            for x in objtype:
                if x in ['ELG', 'LRG']:
                    templatetypes.add('GALAXY')
                elif x in ['QSO', 'GALAXY', 'STAR']:
                    templatetypes.add(x)
                else:
                    raise ValueError('Unknown objtype '+x)
            
        #- list of (templatename, zmin, zmax) to fix
        self.template_dir = os.getenv('REDMONSTER_TEMPLATES_DIR')
        self.templates = list()
        for x in templatetypes:
            if x == 'GALAXY':
                self.templates.append(('ndArch-ssp_em_galaxy-v000.fits', zrange_galaxy[0], zrange_galaxy[1]))
            elif x == 'STAR':
                self.templates.append(('ndArch-spEigenStar-55734.fits', zrange_star[0], zrange_star[1]))
            elif x == 'QSO':
                self.templates.append(('ndArch-QSO-V003.fits', zrange_qso[0], zrange_qso[1]))
            else:
                raise ValueError("Bad template type "+x)

        #- Find and refine best redshift per template
        self.zfinders = list()
        self.zfitters = list()
        
        for template, zmin, zmax in self.templates:
            start=time.time()
            zfind = ZFinder(os.path.join(self.template_dir, template), npoly=npoly, zmin=zmin, zmax=zmax,nproc=nproc)
            zfind.zchi2(self.flux, self.loglam, self.ivar, npixstep=2)
            stop=time.time()
            log.debug("Time to find the redshifts of %d fibers for template %s =%f sec"%(self.flux.shape[0],template,stop-start))
            start=time.time()
            zfit = ZFitter(zfind.zchi2arr, zfind.zbase)
            zfit.z_refine2()
            stop=time.time()
            log.debug("Time to refine the redshift fit of %d fibers for template %s =%f sec"%(zfit.z.shape[0],template,stop-start))
            
            for ifiber in range(zfit.z.shape[0]) :
                log.debug("(after z_refine2) fiber #%d %s chi2s=%s zs=%s"%(ifiber,template,zfit.chi2vals[ifiber],zfit.z[ifiber]))
            
            self.zfinders.append(zfind)
            self.zfitters.append(zfit)

        #- Create wrapper object needed for zpicker
        specobj = _RedMonsterSpecObj(self.wave, self.flux, self.ivar)
        flags = list()
        for i in range(len(self.zfitters)):
            flags.append(self.zfinders[i].zwarning.astype(int) | \
                         self.zfitters[i].zwarning.astype(int))

        #- Zpicker
        self.zpicker = ZPicker(specobj, self.zfinders, self.zfitters, flags)

        #- Fill in outputs
        self.spectype = np.asarray([self.zpicker.type[i][0] for i in range(nspec)])
        self.subtype = np.asarray([repr(self.zpicker.subtype[i][0]) for i in range(nspec)])
        self.z = np.array([self.zpicker.z[i][0] for i in range(nspec)])
        self.zerr = np.array([self.zpicker.z_err[i][0] for i in range(nspec)])
        self.zwarn = np.array([int(self.zpicker.zwarning[i]) for i in range(nspec)])
        self.model = self.zpicker.models[:,0]

        for ifiber in range(self.z.size):
            log.debug("(after zpicker) fiber #%d z=%s"%(ifiber,self.z[ifiber]))
コード例 #38
0
ファイル: trace_shifts.py プロジェクト: desihub/desispec
def shift_ycoef_using_external_spectrum(psf,xytraceset,image,fibers,spectrum_filename,degyy=2,width=7) :
    """
    Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra
    and an external well-calibrated spectrum.
    The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now).
    A relative flux calibration of the spectra is performed internally.

    Args:
        psf : specter PSF
        xytraceset : XYTraceset object
        image : DESI preprocessed image object
        fibers : 1D np.array of fiber indices
        spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units)

    Optional:
        width  : int, extraction boxcar width, default is 7
        degyy  : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.

    Returns:
        ycoef  : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD

    """
    log = get_logger()

    wavemin = xytraceset.wavemin
    wavemax = xytraceset.wavemax
    xcoef   = xytraceset.x_vs_wave_traceset._coeff 
    ycoef   = xytraceset.y_vs_wave_traceset._coeff
    
    tmp=np.loadtxt(spectrum_filename).T
    ref_wave=tmp[0]
    ref_spectrum=tmp[1]
    log.info("read reference spectrum in %s with %d entries"%(spectrum_filename,ref_wave.size))

    log.info("rextract spectra with boxcar")

    # boxcar extraction
    qframe = qproc_boxcar_extraction(xytraceset, image, fibers=fibers, width=7)
        
    # resampling on common finer wavelength grid

    flux, ivar, wave = resample_boxcar_frame(qframe.flux, qframe.ivar, qframe.wave, oversampling=2)
    
    
    # median flux used as internal spectral reference
    mflux=np.median(flux,axis=0)
    mivar=np.median(ivar,axis=0)*flux.shape[0]*(2./np.pi) # very appoximate !


    # trim ref_spectrum
    i=(ref_wave>=wave[0])&(ref_wave<=wave[-1])
    ref_wave=ref_wave[i]
    ref_spectrum=ref_spectrum[i]

    # check wave is linear or make it linear
    if np.abs((ref_wave[1]-ref_wave[0])-(ref_wave[-1]-ref_wave[-2]))>0.0001*(ref_wave[1]-ref_wave[0]) :
        log.info("reference spectrum wavelength is not on a linear grid, resample it")
        dwave = np.min(np.gradient(ref_wave))
        tmp_wave = np.linspace(ref_wave[0],ref_wave[-1],int((ref_wave[-1]-ref_wave[0])/dwave))
        ref_spectrum = resample_flux(tmp_wave, ref_wave , ref_spectrum)
        ref_wave = tmp_wave

    i=np.argmax(ref_spectrum)
    central_wave_for_psf_evaluation  = ref_wave[i]
    fiber_for_psf_evaluation = (flux.shape[0]//2)
    try :
        # compute psf at most significant line of ref_spectrum
        dwave=ref_wave[i+1]-ref_wave[i]
        hw=int(3./dwave)+1 # 3A half width
        wave_range = ref_wave[i-hw:i+hw+1]
        x,y=psf.xy(fiber_for_psf_evaluation,wave_range)
        x=np.tile(x[hw]+np.arange(-hw,hw+1)*(y[-1]-y[0])/(2*hw+1),(y.size,1))
        y=np.tile(y,(2*hw+1,1)).T
        kernel2d=psf._value(x,y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
        kernel1d=np.sum(kernel2d,axis=1)
        log.info("convolve reference spectrum using PSF at fiber %d and wavelength %dA"%(fiber_for_psf_evaluation,central_wave_for_psf_evaluation))
        ref_spectrum=fftconvolve(ref_spectrum,kernel1d, mode='same')
    except :
        log.warning("couldn't convolve reference spectrum: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))



    # resample input spectrum
    log.info("resample convolved reference spectrum")
    ref_spectrum = resample_flux(wave, ref_wave , ref_spectrum)

    log.info("absorb difference of calibration")
    x=(wave-wave[wave.size//2])/50.
    kernel=np.exp(-x**2/2)
    f1=fftconvolve(mflux,kernel,mode='same')
    f2=fftconvolve(ref_spectrum,kernel,mode='same')
    if np.all(f2>0) :
        scale=f1/f2
        ref_spectrum *= scale

    log.info("fit shifts on wavelength bins")
    # define bins
    n_wavelength_bins = degyy+4
    y_for_dy=np.array([])
    dy=np.array([])
    ey=np.array([])
    wave_for_dy=np.array([])
    for b in range(n_wavelength_bins) :
        wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
        if b<n_wavelength_bins-1 :
            wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
        else :
            wmax=wave[-1]
        ok=(wave>=wmin)&(wave<=wmax)
        sw= np.sum(mflux[ok]*(mflux[ok]>0))
        if sw==0 :
            continue
        dwave,err = compute_dy_from_spectral_cross_correlation(mflux[ok],wave[ok],ref_spectrum[ok],ivar=mivar[ok],hw=3.)
        bin_wave  = np.sum(mflux[ok]*(mflux[ok]>0)*wave[ok])/sw
        x,y=psf.xy(fiber_for_psf_evaluation,bin_wave)
        eps=0.1
        x,yp=psf.xy(fiber_for_psf_evaluation,bin_wave+eps)
        dydw=(yp-y)/eps
        if err*dydw<1 :
            dy=np.append(dy,-dwave*dydw)
            ey=np.append(ey,err*dydw)
            wave_for_dy=np.append(wave_for_dy,bin_wave)
            y_for_dy=np.append(y_for_dy,y)
            log.info("wave = %fA , y=%d, measured dwave = %f +- %f A"%(bin_wave,y,dwave,err))

    if False : # we don't need this for now
        try :
            log.info("correcting bias due to asymmetry of PSF")

            hw=5
            oversampling=4
            xx=np.tile(np.arange(2*hw*oversampling+1)-hw*oversampling,(2*hw*oversampling+1,1))/float(oversampling)
            yy=xx.T
            x,y=psf.xy(fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
            prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
            dy_asym_central = np.sum(yy*prof)/np.sum(prof)
            for i in range(dy.size) :
                x,y=psf.xy(fiber_for_psf_evaluation,wave_for_dy[i])
                prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,wave_for_dy[i])
                dy_asym = np.sum(yy*prof)/np.sum(prof)
                log.info("y=%f, measured dy=%f , bias due to PSF asymetry = %f"%(y,dy[i],dy_asym-dy_asym_central))
                dy[i] -= (dy_asym-dy_asym_central)
        except :
            log.warning("couldn't correct for asymmetry of PSF: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))

    log.info("polynomial fit of shifts and modification of PSF ycoef")
    # pol fit
    coef = np.polyfit(wave_for_dy,dy,degyy,w=1./ey**2)
    pol  = np.poly1d(coef)

    for i in range(dy.size) :
        log.info("wave=%fA y=%f, measured dy=%f+-%f , pol(wave) = %f"%(wave_for_dy[i],y_for_dy[i],dy[i],ey[i],pol(wave_for_dy[i])))

    log.info("apply this to the PSF ycoef")
    wave = np.linspace(wavemin,wavemax,100)
    dy   = pol(wave)
    dycoef = legfit(legx(wave,wavemin,wavemax),dy,deg=ycoef.shape[1]-1)
    for fiber in range(ycoef.shape[0]) :
        ycoef[fiber] += dycoef

    return ycoef
コード例 #39
0
ファイル: trace_shifts.py プロジェクト: sbailey/desispec
def shift_ycoef_using_external_spectrum(psf,
                                        xytraceset,
                                        image,
                                        fibers,
                                        spectrum_filename,
                                        degyy=2,
                                        width=7):
    """
    Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra
    and an external well-calibrated spectrum.
    The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now).
    A relative flux calibration of the spectra is performed internally.

    Args:
        psf : specter PSF
        xytraceset : XYTraceset object
        image : DESI preprocessed image object
        fibers : 1D np.array of fiber indices
        spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units)

    Optional:
        width  : int, extraction boxcar width, default is 7
        degyy  : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.

    Returns:
        ycoef  : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD

    """
    log = get_logger()

    wavemin = xytraceset.wavemin
    wavemax = xytraceset.wavemax
    xcoef = xytraceset.x_vs_wave_traceset._coeff
    ycoef = xytraceset.y_vs_wave_traceset._coeff

    tmp = np.loadtxt(spectrum_filename).T
    ref_wave = tmp[0]
    ref_spectrum = tmp[1]
    log.info("read reference spectrum in %s with %d entries" %
             (spectrum_filename, ref_wave.size))

    log.info("rextract spectra with boxcar")

    # boxcar extraction
    qframe = qproc_boxcar_extraction(xytraceset, image, fibers=fibers, width=7)

    # resampling on common finer wavelength grid

    flux, ivar, wave = resample_boxcar_frame(qframe.flux,
                                             qframe.ivar,
                                             qframe.wave,
                                             oversampling=2)

    # median flux used as internal spectral reference
    mflux = np.median(flux, axis=0)
    mivar = np.median(ivar, axis=0) * flux.shape[0] * (2. / np.pi
                                                       )  # very appoximate !

    # trim ref_spectrum
    i = (ref_wave >= wave[0]) & (ref_wave <= wave[-1])
    ref_wave = ref_wave[i]
    ref_spectrum = ref_spectrum[i]

    # check wave is linear or make it linear
    if np.abs(
        (ref_wave[1] - ref_wave[0]) -
        (ref_wave[-1] - ref_wave[-2])) > 0.0001 * (ref_wave[1] - ref_wave[0]):
        log.info(
            "reference spectrum wavelength is not on a linear grid, resample it"
        )
        dwave = np.min(np.gradient(ref_wave))
        tmp_wave = np.linspace(ref_wave[0], ref_wave[-1],
                               int((ref_wave[-1] - ref_wave[0]) / dwave))
        ref_spectrum = resample_flux(tmp_wave, ref_wave, ref_spectrum)
        ref_wave = tmp_wave

    i = np.argmax(ref_spectrum)
    central_wave_for_psf_evaluation = ref_wave[i]
    fiber_for_psf_evaluation = (flux.shape[0] // 2)
    try:
        # compute psf at most significant line of ref_spectrum
        dwave = ref_wave[i + 1] - ref_wave[i]
        hw = int(3. / dwave) + 1  # 3A half width
        wave_range = ref_wave[i - hw:i + hw + 1]
        x, y = psf.xy(fiber_for_psf_evaluation, wave_range)
        x = np.tile(
            x[hw] + np.arange(-hw, hw + 1) * (y[-1] - y[0]) / (2 * hw + 1),
            (y.size, 1))
        y = np.tile(y, (2 * hw + 1, 1)).T
        kernel2d = psf._value(x, y, fiber_for_psf_evaluation,
                              central_wave_for_psf_evaluation)
        kernel1d = np.sum(kernel2d, axis=1)
        log.info(
            "convolve reference spectrum using PSF at fiber %d and wavelength %dA"
            % (fiber_for_psf_evaluation, central_wave_for_psf_evaluation))
        ref_spectrum = fftconvolve(ref_spectrum, kernel1d, mode='same')
    except:
        log.warning("couldn't convolve reference spectrum: %s %s" %
                    (sys.exc_info()[0], sys.exc_info()[1]))

    # resample input spectrum
    log.info("resample convolved reference spectrum")
    ref_spectrum = resample_flux(wave, ref_wave, ref_spectrum)

    log.info("absorb difference of calibration")
    x = (wave - wave[wave.size // 2]) / 50.
    kernel = np.exp(-x**2 / 2)
    f1 = fftconvolve(mflux, kernel, mode='same')
    f2 = fftconvolve(ref_spectrum, kernel, mode='same')
    if np.all(f2 > 0):
        scale = f1 / f2
        ref_spectrum *= scale

    log.info("fit shifts on wavelength bins")
    # define bins
    n_wavelength_bins = degyy + 4
    y_for_dy = np.array([])
    dy = np.array([])
    ey = np.array([])
    wave_for_dy = np.array([])
    for b in range(n_wavelength_bins):
        wmin = wave[0] + ((wave[-1] - wave[0]) / n_wavelength_bins) * b
        if b < n_wavelength_bins - 1:
            wmax = wave[0] + (
                (wave[-1] - wave[0]) / n_wavelength_bins) * (b + 1)
        else:
            wmax = wave[-1]
        ok = (wave >= wmin) & (wave <= wmax)
        sw = np.sum(mflux[ok] * (mflux[ok] > 0))
        if sw == 0:
            continue
        dwave, err = compute_dy_from_spectral_cross_correlation(
            mflux[ok], wave[ok], ref_spectrum[ok], ivar=mivar[ok], hw=10.)
        bin_wave = np.sum(mflux[ok] * (mflux[ok] > 0) * wave[ok]) / sw
        x, y = psf.xy(fiber_for_psf_evaluation, bin_wave)
        eps = 0.1
        x, yp = psf.xy(fiber_for_psf_evaluation, bin_wave + eps)
        dydw = (yp - y) / eps
        if err * dydw < 1:
            dy = np.append(dy, -dwave * dydw)
            ey = np.append(ey, err * dydw)
            wave_for_dy = np.append(wave_for_dy, bin_wave)
            y_for_dy = np.append(y_for_dy, y)
            log.info("wave = %fA , y=%d, measured dwave = %f +- %f A" %
                     (bin_wave, y, dwave, err))

    if False:  # we don't need this for now
        try:
            log.info("correcting bias due to asymmetry of PSF")

            hw = 5
            oversampling = 4
            xx = np.tile(
                np.arange(2 * hw * oversampling + 1) - hw * oversampling,
                (2 * hw * oversampling + 1, 1)) / float(oversampling)
            yy = xx.T
            x, y = psf.xy(fiber_for_psf_evaluation,
                          central_wave_for_psf_evaluation)
            prof = psf._value(xx + x, yy + y, fiber_for_psf_evaluation,
                              central_wave_for_psf_evaluation)
            dy_asym_central = np.sum(yy * prof) / np.sum(prof)
            for i in range(dy.size):
                x, y = psf.xy(fiber_for_psf_evaluation, wave_for_dy[i])
                prof = psf._value(xx + x, yy + y, fiber_for_psf_evaluation,
                                  wave_for_dy[i])
                dy_asym = np.sum(yy * prof) / np.sum(prof)
                log.info(
                    "y=%f, measured dy=%f , bias due to PSF asymetry = %f" %
                    (y, dy[i], dy_asym - dy_asym_central))
                dy[i] -= (dy_asym - dy_asym_central)
        except:
            log.warning("couldn't correct for asymmetry of PSF: %s %s" %
                        (sys.exc_info()[0], sys.exc_info()[1]))

    log.info("polynomial fit of shifts and modification of PSF ycoef")
    # pol fit
    coef = np.polyfit(wave_for_dy, dy, degyy, w=1. / ey**2)
    pol = np.poly1d(coef)

    for i in range(dy.size):
        log.info(
            "wave=%fA y=%f, measured dy=%f+-%f , pol(wave) = %f" %
            (wave_for_dy[i], y_for_dy[i], dy[i], ey[i], pol(wave_for_dy[i])))

    log.info("apply this to the PSF ycoef")
    wave = np.linspace(wavemin, wavemax, 100)
    dy = pol(wave)
    dycoef = legfit(legx(wave, wavemin, wavemax), dy, deg=ycoef.shape[1] - 1)
    for fiber in range(ycoef.shape[0]):
        ycoef[fiber] += dycoef

    return ycoef
コード例 #40
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, zrange=(0.6,1.6), rmagrange=(21.0,23.4),
                       oiiihbrange=(-0.5,0.1), oiidoublet_meansig=(0.73,0.05),
                       linesigma_meansig=(1.887,0.175), minoiiflux=1E-17,
                       no_colorcuts=False):
        """Build Monte Carlo set of ELG spectra/templates.

        This function chooses random subsets of the ELG continuum spectra, constructs
        an emission-line spectrum, redshifts, and then finally normalizes the spectrum
        to a specific r-band magnitude.

        TODO (@moustakas): optionally normalized to a g-band magnitude

        Args:
          zrange (float, optional): Minimum and maximum redshift range.  Defaults
            to a uniform distribution between (0.6,1.6).
          rmagrange (float, optional): Minimum and maximum DECam r-band (AB)
            magnitude range.  Defaults to a uniform distribution between (21,23.4).
          oiiihbrange (float, optional): Minimum and maximum logarithmic
            [OIII] 5007/H-beta line-ratio.  Defaults to a uniform distribution
            between (-0.5,0.1).
        
          oiidoublet_meansig (float, optional): Mean and sigma values for the (Gaussian) 
            [OII] 3726/3729 doublet ratio distribution.  Defaults to (0.73,0.05).
          linesigma_meansig (float, optional): *Logarithmic* mean and sigma values for the
            (Gaussian) emission-line velocity width distribution.  Defaults to
            log10-sigma(=1.887+/0.175) km/s.

          minoiiflux (float, optional): Minimum [OII] 3727 flux [default 1E-17 erg/s/cm2].
            Set this parameter to zero to not have a minimum flux cut.
          no_colorcuts (bool, optional): Do not apply the fiducial grz color-cuts
            cuts (default False).
        
        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column

        from desisim.templates import EMSpectrum
        from desispec.interpolation import resample_flux

        # Initialize the EMSpectrum object with the same wavelength array as
        # the "base" (continuum) templates so that we don't have to resample. 
        EM = EMSpectrum(log10wave=np.log10(self.basewave),seed=self.seed)
       
        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['W1MAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['OIIFLUX'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['EWOII'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['OIIIHBETA'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['OIIDOUBLET'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['LINESIGMA'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['D4000'] = Column(np.zeros(self.nmodel,dtype='f4'))

        meta['OIIFLUX'].unit = 'erg/s/cm2'
        meta['EWOII'].unit = 'Angstrom'
        meta['OIIIHBETA'].unit = 'dex'
        meta['LINESIGMA'].unit = 'km/s'

        comments = dict(
            TEMPLATEID = 'template ID',
            REDSHIFT = 'object redshift',
            GMAG = 'DECam g-band AB magnitude',
            RMAG = 'DECam r-band AB magnitude',
            ZMAG = 'DECam z-band AB magnitude',
            W1MAG = 'WISE W1-band AB magnitude',
            OIIFLUX = '[OII] 3727 flux',
            EWOII = 'rest-frame equivalenth width of [OII] 3727',
            OIIIHBETA = 'logarithmic [OIII] 5007/H-beta ratio',
            OIIDOUBLET = '[OII] 3726/3729 doublet ratio',
            LINESIGMA = 'emission line velocity width',
            D4000 = '4000-Angstrom break'
        )

        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and r-magnitude distributions.
            redshift = self.rand.uniform(zrange[0],zrange[1],nchunk)
            rmag = self.rand.uniform(rmagrange[0],rmagrange[1],nchunk)

            # Assume the emission-line priors are uncorrelated.
            oiiihbeta = self.rand.uniform(oiiihbrange[0],oiiihbrange[1],nchunk)
            oiidoublet = self.rand.normal(oiidoublet_meansig[0],
                                          oiidoublet_meansig[1],nchunk)
            linesigma = self.rand.normal(linesigma_meansig[0],
                                         linesigma_meansig[1],nchunk)

            d4000 = self.basemeta['D4000'][chunkindx]
            ewoii = 10.0**(np.polyval([1.1074,-4.7338,5.6585],d4000)+ 
                           self.rand.normal(0.0,0.3)) # rest-frame, Angstrom

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                zwave = self.basewave*(1.0+redshift[ii])

                # Add the continuum and emission-line spectra with the
                # right [OII] flux [erg/s/cm2]
                oiiflux = self.basemeta['OII_CONTINUUM'][iobj]*ewoii[ii] 
                emflux, emwave, emline = EM.spectrum(linesigma=linesigma[ii],
                                                      oiidoublet=oiidoublet[ii],
                                                      oiiihbeta=oiiihbeta[ii],
                                                      oiiflux=oiiflux)
                restflux = self.baseflux[iobj,:] + emflux # [erg/s/cm2/A @10pc]
                rnorm = 10.0**(-0.4*rmag[ii])/self.rfilt.get_maggies(zwave,restflux)
                flux = restflux*rnorm # [erg/s/cm2/A, @redshift[ii]]

                # [grz]flux are in nanomaggies
                rflux = 10.0**(-0.4*(rmag[ii]-22.5))                      
                gflux = self.gfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                w1flux = self.w1filt.get_maggies(zwave,flux)*10**(0.4*22.5) 

                zoiiflux = oiiflux*rnorm # [erg/s/cm2]
                oiimask = [zoiiflux>minoiiflux]

                if no_colorcuts:
                    grzmask = [True]
                else:
                    grzmask = [Cuts.ELG(gflux=gflux,rflux=rflux,zflux=zflux)]

                if all(grzmask) and all(oiimask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    meta['TEMPLATEID'][nobj] = nobj
                    meta['REDSHIFT'][nobj] = redshift[ii]
                    meta['GMAG'][nobj] = -2.5*np.log10(gflux)+22.5
                    meta['RMAG'][nobj] = rmag[ii]
                    meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                    meta['W1MAG'][nobj] = -2.5*np.log10(w1flux)+22.5
                    meta['OIIFLUX'][nobj] = zoiiflux
                    meta['EWOII'][nobj] = ewoii[ii]
                    meta['OIIIHBETA'][nobj] = oiiihbeta[ii]
                    meta['OIIDOUBLET'][nobj] = oiidoublet[ii]
                    meta['LINESIGMA'][nobj] = linesigma[ii]
                    meta['D4000'][nobj] = d4000[ii]

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break

        return outflux, self.wave, meta
コード例 #41
0
ファイル: quickgen.py プロジェクト: desihub/desisim
def main(args):

    # Set up the logger
    if args.verbose:
        log = get_logger(DEBUG)
    else:
        log = get_logger()

    # Make sure all necessary environment variables are set
    DESI_SPECTRO_REDUX_DIR="./quickGen"

    if 'DESI_SPECTRO_REDUX' not in os.environ:

        log.info('DESI_SPECTRO_REDUX environment is not set.')

    else:
        DESI_SPECTRO_REDUX_DIR=os.environ['DESI_SPECTRO_REDUX']

    if os.path.exists(DESI_SPECTRO_REDUX_DIR):

        if not os.path.isdir(DESI_SPECTRO_REDUX_DIR):
            raise RuntimeError("Path %s Not a directory"%DESI_SPECTRO_REDUX_DIR)
    else:
        try:
            os.makedirs(DESI_SPECTRO_REDUX_DIR)
        except:
            raise

    SPECPROD_DIR='specprod'
    if 'SPECPROD' not in os.environ:
        log.info('SPECPROD environment is not set.')
    else:
        SPECPROD_DIR=os.environ['SPECPROD']
    prod_Dir=specprod_root()

    if os.path.exists(prod_Dir):

        if not os.path.isdir(prod_Dir):
            raise RuntimeError("Path %s Not a directory"%prod_Dir)
    else:
        try:
            os.makedirs(prod_Dir)
        except:
            raise

    # Initialize random number generator to use.
    np.random.seed(args.seed)
    random_state = np.random.RandomState(args.seed)

    # Derive spectrograph number from nstart if needed
    if args.spectrograph is None:
        args.spectrograph = args.nstart / 500

    # Read fibermapfile to get object type, night and expid
    if args.fibermap:
        log.info("Reading fibermap file {}".format(args.fibermap))
        fibermap=read_fibermap(args.fibermap)
        objtype = get_source_types(fibermap)
        stdindx=np.where(objtype=='STD') # match STD with STAR
        mwsindx=np.where(objtype=='MWS_STAR') # match MWS_STAR with STAR
        bgsindx=np.where(objtype=='BGS') # match BGS with LRG
        objtype[stdindx]='STAR'
        objtype[mwsindx]='STAR'
        objtype[bgsindx]='LRG'
        NIGHT=fibermap.meta['NIGHT']
        EXPID=fibermap.meta['EXPID']
    else:
        # Create a blank fake fibermap
        fibermap = empty_fibermap(args.nspec)
        targetids = random_state.randint(2**62, size=args.nspec)
        fibermap['TARGETID'] = targetids
        night = get_night()
        expid = 0

    log.info("Initializing SpecSim with config {}".format(args.config))
    desiparams = load_desiparams()
    qsim = get_simulator(args.config, num_fibers=1)

    if args.simspec:
        # Read the input file
        log.info('Reading input file {}'.format(args.simspec))
        simspec = desisim.io.read_simspec(args.simspec)
        nspec = simspec.nspec
        if simspec.flavor == 'arc':
            log.warning("quickgen doesn't generate flavor=arc outputs")
            return
        else:
            wavelengths = simspec.wave
            spectra = simspec.flux
        if nspec < args.nspec:
            log.info("Only {} spectra in input file".format(nspec))
            args.nspec = nspec

    else:
        # Initialize the output truth table.
        spectra = []
        wavelengths = qsim.source.wavelength_out.to(u.Angstrom).value
        npix = len(wavelengths)
        truth = dict()
        meta = Table()
        truth['OBJTYPE'] = np.zeros(args.nspec, dtype=(str, 10))
        truth['FLUX'] = np.zeros((args.nspec, npix))
        truth['WAVE'] = wavelengths
        jj = list()

        for thisobj in set(true_objtype):
            ii = np.where(true_objtype == thisobj)[0]
            nobj = len(ii)
            truth['OBJTYPE'][ii] = thisobj
            log.info('Generating {} template'.format(thisobj))

            # Generate the templates
            if thisobj == 'ELG':
                elg = desisim.templates.ELG(wave=wavelengths, add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = elg.make_templates(nmodel=nobj, seed=args.seed, zrange=args.zrange_elg,sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'LRG':
                lrg = desisim.templates.LRG(wave=wavelengths, add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = lrg.make_templates(nmodel=nobj, seed=args.seed, zrange=args.zrange_lrg,sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'QSO':
                qso = desisim.templates.QSO(wave=wavelengths)
                flux, tmpwave, meta1 = qso.make_templates(nmodel=nobj, seed=args.seed, zrange=args.zrange_qso)
            elif thisobj == 'BGS':
                bgs = desisim.templates.BGS(wave=wavelengths, add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = bgs.make_templates(nmodel=nobj, seed=args.seed, zrange=args.zrange_bgs,rmagrange=args.rmagrange_bgs,sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj =='STD':
                std = desisim.templates.STD(wave=wavelengths)
                flux, tmpwave, meta1 = std.make_templates(nmodel=nobj, seed=args.seed)
            elif thisobj == 'QSO_BAD': # use STAR template no color cuts
                star = desisim.templates.STAR(wave=wavelengths)
                flux, tmpwave, meta1 = star.make_templates(nmodel=nobj, seed=args.seed)
            elif thisobj == 'MWS_STAR' or thisobj == 'MWS':
                mwsstar = desisim.templates.MWS_STAR(wave=wavelengths)
                flux, tmpwave, meta1 = mwsstar.make_templates(nmodel=nobj, seed=args.seed)
            elif thisobj == 'WD':
                wd = desisim.templates.WD(wave=wavelengths)
                flux, tmpwave, meta1 = wd.make_templates(nmodel=nobj, seed=args.seed)
            elif thisobj == 'SKY':
                flux = np.zeros((nobj, npix))
                meta1 = Table(dict(REDSHIFT=np.zeros(nobj, dtype=np.float32)))
            elif thisobj == 'TEST':
                flux = np.zeros((args.nspec, npix))
                indx = np.where(wave>5800.0-1E-6)[0][0]
                ref_integrated_flux = 1E-10
                ref_cst_flux_density = 1E-17
                single_line = (np.arange(args.nspec)%2 == 0).astype(np.float32)
                continuum   = (np.arange(args.nspec)%2 == 1).astype(np.float32)

                for spec in range(args.nspec) :
                    flux[spec,indx] = single_line[spec]*ref_integrated_flux/np.gradient(wavelengths)[indx] # single line
                    flux[spec] += continuum[spec]*ref_cst_flux_density # flat continuum

                meta1 = Table(dict(REDSHIFT=np.zeros(args.nspec, dtype=np.float32),
                                   LINE=wave[indx]*np.ones(args.nspec, dtype=np.float32),
                                   LINEFLUX=single_line*ref_integrated_flux,
                                   CONSTFLUXDENSITY=continuum*ref_cst_flux_density))
            else:
                log.fatal('Unknown object type {}'.format(thisobj))
                sys.exit(1)

            # Pack it in.
            truth['FLUX'][ii] = flux
            meta = vstack([meta, meta1])
            jj.append(ii.tolist())

            # Sanity check on units; templates currently return ergs, not 1e-17 ergs...
            # assert (thisobj == 'SKY') or (np.max(truth['FLUX']) < 1e-6)

        # Sort the metadata table.
        jj = sum(jj,[])
        meta_new = Table()
        for k in range(args.nspec):
            index = int(np.where(np.array(jj) == k)[0])
            meta_new = vstack([meta_new, meta[index]])
        meta = meta_new

        # Add TARGETID and the true OBJTYPE to the metadata table.
        meta.add_column(Column(true_objtype, dtype=(str, 10), name='TRUE_OBJTYPE'))
        meta.add_column(Column(targetids, name='TARGETID'))

        # Rename REDSHIFT -> TRUEZ anticipating later table joins with zbest.Z
        meta.rename_column('REDSHIFT', 'TRUEZ')

    # explicitly set location on focal plane if needed to support airmass
    # variations when using specsim v0.5
    if qsim.source.focal_xy is None:
        qsim.source.focal_xy = (u.Quantity(0, 'mm'), u.Quantity(100, 'mm'))

    # Set simulation parameters from the simspec header or desiparams
    bright_objects = ['bgs','mws','bright','BGS','MWS','BRIGHT_MIX']
    gray_objects = ['gray','grey']
    if args.simspec is None:
        object_type = objtype
        flavor = None
    elif simspec.flavor == 'science':
        object_type = None
        flavor = simspec.header['PROGRAM']
    else:
        object_type = None
        flavor = simspec.flavor
        log.warning('Maybe using an outdated simspec file with flavor={}'.format(flavor))

    # Set airmass
    if args.airmass is not None:
        qsim.atmosphere.airmass = args.airmass
    elif args.simspec and 'AIRMASS' in simspec.header:
        qsim.atmosphere.airmass = simspec.header['AIRMASS']
    else:
        qsim.atmosphere.airmass =  1.25   # Science Req. Doc L3.3.2
        
    # Set exptime
    if args.exptime is not None:
        qsim.observation.exposure_time = args.exptime * u.s
    elif args.simspec and 'EXPTIME' in simspec.header:
        qsim.observation.exposure_time = simspec.header['EXPTIME'] * u.s
    elif objtype in bright_objects:
        qsim.observation.exposure_time = desiparams['exptime_bright'] * u.s
    else:
        qsim.observation.exposure_time = desiparams['exptime_dark'] * u.s

    # Set Moon Phase
    if args.moon_phase is not None:
        qsim.atmosphere.moon.moon_phase = args.moon_phase
    elif args.simspec and 'MOONFRAC' in simspec.header:
        qsim.atmosphere.moon.moon_phase = simspec.header['MOONFRAC']
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_phase = 0.7
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_phase = 0.1
    else:
        qsim.atmosphere.moon.moon_phase = 0.5
        
    # Set Moon Zenith
    if args.moon_zenith is not None:
        qsim.atmosphere.moon.moon_zenith = args.moon_zenith * u.deg
    elif args.simspec and 'MOONALT' in simspec.header:
        qsim.atmosphere.moon.moon_zenith = simspec.header['MOONALT'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_zenith = 30 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_zenith = 80 * u.deg
    else:
        qsim.atmosphere.moon.moon_zenith = 100 * u.deg

    # Set Moon - Object Angle
    if args.moon_angle is not None:
        qsim.atmosphere.moon.separation_angle = args.moon_angle * u.deg
    elif args.simspec and 'MOONSEP' in simspec.header:
        qsim.atmosphere.moon.separation_angle = simspec.header['MOONSEP'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.separation_angle = 50 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg
    else:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg

    # Initialize per-camera output arrays that will be saved
    waves, trueflux, noisyflux, obsivar, resolution, sflux = {}, {}, {}, {}, {}, {}

    maxbin = 0
    nmax= args.nspec
    for camera in qsim.instrument.cameras:
        # Lookup this camera's resolution matrix and convert to the sparse
        # format used in desispec.
        R = Resolution(camera.get_output_resolution_matrix())
        resolution[camera.name] = np.tile(R.to_fits_array(), [args.nspec, 1, 1])
        waves[camera.name] = (camera.output_wavelength.to(u.Angstrom).value.astype(np.float32))
        nwave = len(waves[camera.name])
        maxbin = max(maxbin, len(waves[camera.name]))
        nobj = np.zeros((nmax,3,maxbin)) # object photons
        nsky = np.zeros((nmax,3,maxbin)) # sky photons
        nivar = np.zeros((nmax,3,maxbin)) # inverse variance (object+sky)
        cframe_observedflux = np.zeros((nmax,3,maxbin))  # calibrated object flux
        cframe_ivar = np.zeros((nmax,3,maxbin)) # inverse variance of calibrated object flux
        cframe_rand_noise = np.zeros((nmax,3,maxbin)) # random Gaussian noise to calibrated flux
        sky_ivar = np.zeros((nmax,3,maxbin)) # inverse variance of sky
        sky_rand_noise = np.zeros((nmax,3,maxbin)) # random Gaussian noise to sky only
        frame_rand_noise = np.zeros((nmax,3,maxbin)) # random Gaussian noise to nobj+nsky
        trueflux[camera.name] = np.empty((args.nspec, nwave)) # calibrated flux
        noisyflux[camera.name] = np.empty((args.nspec, nwave)) # observed flux with noise
        obsivar[camera.name] = np.empty((args.nspec, nwave)) # inverse variance of flux
        if args.simspec:
            for i in range(10):
                cn = camera.name + str(i)
                if cn in simspec.cameras:
                    dw = np.gradient(simspec.cameras[cn].wave)
                    break
            else:
                raise RuntimeError('Unable to find a {} camera in input simspec'.format(camera))
        else:
            sflux = np.empty((args.nspec, npix))

    #- Check if input simspec is for a continuum flat lamp instead of science
    #- This does not convolve to per-fiber resolution
    if args.simspec:
        if simspec.flavor == 'flat':
            log.info("Simulating flat lamp exposure")
            for i,camera in enumerate(qsim.instrument.cameras):
                channel = camera.name   #- from simspec, b/r/z not b0/r1/z9
                assert camera.output_wavelength.unit == u.Angstrom
                num_pixels = len(waves[channel])

                phot = list()
                for j in range(10):
                    cn = camera.name + str(j)
                    if cn in simspec.cameras:
                        camwave = simspec.cameras[cn].wave
                        dw = np.gradient(camwave)
                        phot.append(simspec.cameras[cn].phot)

                if len(phot) == 0:
                    raise RuntimeError('Unable to find a {} camera in input simspec'.format(camera))
                else:
                    phot = np.vstack(phot)

                meanspec = resample_flux(
                    waves[channel], camwave, np.average(phot/dw, axis=0))

                fiberflat = random_state.normal(loc=1.0,
                    scale=1.0 / np.sqrt(meanspec), size=(nspec, num_pixels))
                ivar = np.tile(meanspec, [nspec, 1])
                mask = np.zeros((simspec.nspec, num_pixels), dtype=np.uint32)

                for kk in range((args.nspec+args.nstart-1)//500+1):
                    camera = channel+str(kk)
                    outfile = desispec.io.findfile('fiberflat', NIGHT, EXPID, camera)
                    start=max(500*kk,args.nstart)
                    end=min(500*(kk+1),nmax)

                    if (args.spectrograph <= kk):
                        log.info("Writing files for channel:{}, spectrograph:{}, spectra:{} to {}".format(channel,kk,start,end))

                    ff = FiberFlat(
                        waves[channel], fiberflat[start:end,:],
                        ivar[start:end,:], mask[start:end,:], meanspec,
                        header=dict(CAMERA=camera))
                    write_fiberflat(outfile, ff)
                    filePath=desispec.io.findfile("fiberflat",NIGHT,EXPID,camera)
                    log.info("Wrote file {}".format(filePath))

            sys.exit(0)

    # Repeat the simulation for all spectra
    fluxunits = 1e-17 * u.erg / (u.s * u.cm ** 2 * u.Angstrom)
    for j in range(args.nspec):

        thisobjtype = objtype[j]
        sys.stdout.flush()
        if flavor == 'arc':
            qsim.source.update_in(
                'Quickgen source {0}'.format, 'perfect',
                wavelengths * u.Angstrom, spectra * fluxunits)
        else:
            qsim.source.update_in(
                'Quickgen source {0}'.format(j), thisobjtype.lower(),
                wavelengths * u.Angstrom, spectra[j, :] * fluxunits)
        qsim.source.update_out()

        qsim.simulate()
        qsim.generate_random_noise(random_state)

        for i, output in enumerate(qsim.camera_output):
            assert output['observed_flux'].unit == 1e17 * fluxunits
            # Extract the simulation results needed to create our uncalibrated
            # frame output file.
            num_pixels = len(output)
            nobj[j, i, :num_pixels] = output['num_source_electrons'][:,0]
            nsky[j, i, :num_pixels] = output['num_sky_electrons'][:,0]
            nivar[j, i, :num_pixels] = 1.0 / output['variance_electrons'][:,0]

            # Get results for our flux-calibrated output file.
            cframe_observedflux[j, i, :num_pixels] = 1e17 * output['observed_flux'][:,0]
            cframe_ivar[j, i, :num_pixels] = 1e-34 * output['flux_inverse_variance'][:,0]

            # Fill brick arrays from the results.
            camera = output.meta['name']
            trueflux[camera][j][:] = 1e17 * output['observed_flux'][:,0]
            noisyflux[camera][j][:] = 1e17 * (output['observed_flux'][:,0] +
                output['flux_calibration'][:,0] * output['random_noise_electrons'][:,0])
            obsivar[camera][j][:] = 1e-34 * output['flux_inverse_variance'][:,0]

            # Use the same noise realization in the cframe and frame, without any
            # additional noise from sky subtraction for now.
            frame_rand_noise[j, i, :num_pixels] = output['random_noise_electrons'][:,0]
            cframe_rand_noise[j, i, :num_pixels] = 1e17 * (
                output['flux_calibration'][:,0] * output['random_noise_electrons'][:,0])

            # The sky output file represents a model fit to ~40 sky fibers.
            # We reduce the variance by a factor of 25 to account for this and
            # give the sky an independent (Gaussian) noise realization.
            sky_ivar[j, i, :num_pixels] = 25.0 / (
                output['variance_electrons'][:,0] - output['num_source_electrons'][:,0])
            sky_rand_noise[j, i, :num_pixels] = random_state.normal(
                scale=1.0 / np.sqrt(sky_ivar[j,i,:num_pixels]),size=num_pixels)

    armName={"b":0,"r":1,"z":2}
    for channel in 'brz':

        #Before writing, convert from counts/bin to counts/A (as in Pixsim output)
        #Quicksim Default:
        #FLUX - input spectrum resampled to this binning; no noise added [1e-17 erg/s/cm2/s/Ang]
        #COUNTS_OBJ - object counts in 0.5 Ang bin
        #COUNTS_SKY - sky counts in 0.5 Ang bin

        num_pixels = len(waves[channel])
        dwave=np.gradient(waves[channel])
        nobj[:,armName[channel],:num_pixels]/=dwave
        frame_rand_noise[:,armName[channel],:num_pixels]/=dwave
        nivar[:,armName[channel],:num_pixels]*=dwave**2
        nsky[:,armName[channel],:num_pixels]/=dwave
        sky_rand_noise[:,armName[channel],:num_pixels]/=dwave
        sky_ivar[:,armName[channel],:num_pixels]/=dwave**2

        # Now write the outputs in DESI standard file system. None of the output file can have more than 500 spectra

        # Looping over spectrograph
        for ii in range((args.nspec+args.nstart-1)//500+1):

            start=max(500*ii,args.nstart) # first spectrum for a given spectrograph
            end=min(500*(ii+1),nmax) # last spectrum for the spectrograph

            if (args.spectrograph <= ii):
                camera = "{}{}".format(channel, ii)
                log.info("Writing files for channel:{}, spectrograph:{}, spectra:{} to {}".format(channel,ii,start,end))
                num_pixels = len(waves[channel])

                # Write frame file
                framefileName=desispec.io.findfile("frame",NIGHT,EXPID,camera)

                frame_flux=nobj[start:end,armName[channel],:num_pixels]+ \
                nsky[start:end,armName[channel],:num_pixels] + \
                frame_rand_noise[start:end,armName[channel],:num_pixels]
                frame_ivar=nivar[start:end,armName[channel],:num_pixels]

                sh1=frame_flux.shape[0]  # required for slicing the resolution metric, resolusion matrix has (nspec,ndiag,wave)
                                          # for example if nstart =400, nspec=150: two spectrographs:
                                          # 400-499=> 0 spectrograph, 500-549 => 1
                if (args.nstart==start):
                    resol=resolution[channel][:sh1,:,:]
                else:
                    resol=resolution[channel][-sh1:,:,:]

                # must create desispec.Frame object
                frame=Frame(waves[channel], frame_flux, frame_ivar,\
                    resolution_data=resol, spectrograph=ii, \
                    fibermap=fibermap[start:end], \
                    meta=dict(CAMERA=camera, FLAVOR=simspec.flavor) )
                desispec.io.write_frame(framefileName, frame)

                framefilePath=desispec.io.findfile("frame",NIGHT,EXPID,camera)
                log.info("Wrote file {}".format(framefilePath))

                if args.frameonly or simspec.flavor == 'arc':
                    continue

                # Write cframe file
                cframeFileName=desispec.io.findfile("cframe",NIGHT,EXPID,camera)
                cframeFlux=cframe_observedflux[start:end,armName[channel],:num_pixels]+cframe_rand_noise[start:end,armName[channel],:num_pixels]
                cframeIvar=cframe_ivar[start:end,armName[channel],:num_pixels]

                # must create desispec.Frame object
                cframe = Frame(waves[channel], cframeFlux, cframeIvar, \
                    resolution_data=resol, spectrograph=ii,
                    fibermap=fibermap[start:end],
                    meta=dict(CAMERA=camera, FLAVOR=simspec.flavor) )
                desispec.io.frame.write_frame(cframeFileName,cframe)

                cframefilePath=desispec.io.findfile("cframe",NIGHT,EXPID,camera)
                log.info("Wrote file {}".format(cframefilePath))

                # Write sky file
                skyfileName=desispec.io.findfile("sky",NIGHT,EXPID,camera)
                skyflux=nsky[start:end,armName[channel],:num_pixels] + \
                sky_rand_noise[start:end,armName[channel],:num_pixels]
                skyivar=sky_ivar[start:end,armName[channel],:num_pixels]
                skymask=np.zeros(skyflux.shape, dtype=np.uint32)

                # must create desispec.Sky object
                skymodel = SkyModel(waves[channel], skyflux, skyivar, skymask,
                    header=dict(CAMERA=camera))
                desispec.io.sky.write_sky(skyfileName, skymodel)

                skyfilePath=desispec.io.findfile("sky",NIGHT,EXPID,camera)
                log.info("Wrote file {}".format(skyfilePath))

                # Write calib file
                calibVectorFile=desispec.io.findfile("calib",NIGHT,EXPID,camera)
                flux = cframe_observedflux[start:end,armName[channel],:num_pixels]
                phot = nobj[start:end,armName[channel],:num_pixels]
                calibration = np.zeros_like(phot)
                jj = (flux>0)
                calibration[jj] = phot[jj] / flux[jj]

                #- TODO: what should calibivar be?
                #- For now, model it as the noise of combining ~10 spectra
                calibivar=10/cframe_ivar[start:end,armName[channel],:num_pixels]
                #mask=(1/calibivar>0).astype(int)??
                mask=np.zeros(calibration.shape, dtype=np.uint32)

                # write flux calibration
                fluxcalib = FluxCalib(waves[channel], calibration, calibivar, mask)
                write_flux_calibration(calibVectorFile, fluxcalib)

                calibfilePath=desispec.io.findfile("calib",NIGHT,EXPID,camera)
                log.info("Wrote file {}".format(calibfilePath))
コード例 #42
0
ファイル: templates.py プロジェクト: akremin/desisim
    def make_templates(self, vrad_meansig=(0.0,200.0), rmagrange=(18.0,23.4),
                       gmagrange=(16.0,19.0)):
        """Build Monte Carlo set of spectra/templates for stars. 

        This function chooses random subsets of the continuum spectra for stars,
        adds radial velocity "jitter", then normalizes the spectrum to a
        specified r- or g-band magnitude.

        Args:
          vrad_meansig (float, optional): Mean and sigma (standard deviation) of the 
            radial velocity "jitter" (in km/s) that should be added to each
            spectrum.  Defaults to a normal distribution with a mean of zero and
            sigma of 200 km/s.
          rmagrange (float, optional): Minimum and maximum DECam r-band (AB)
            magnitude range.  Defaults to a uniform distribution between (18,23.4).
          gmagrange (float, optional): Minimum and maximum DECam g-band (AB)
            magnitude range.  Defaults to a uniform distribution between (16,19). 

        Returns:
          outflux (numpy.ndarray): Array [nmodel,npix] of observed-frame spectra [erg/s/cm2/A]. 
          meta (astropy.Table): Table of meta-data for each output spectrum [nmodel].

        Raises:

        """
        from astropy.table import Table, Column
        from desisim.io import write_templates
        from desispec.interpolation import resample_flux

        # Initialize the output flux array and metadata Table.
        outflux = np.zeros([self.nmodel,len(self.wave)]) # [erg/s/cm2/A]

        meta = Table()
        meta['TEMPLATEID'] = Column(np.zeros(self.nmodel,dtype='i4'))
        meta['REDSHIFT'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['GMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['RMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['ZMAG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['LOGG'] = Column(np.zeros(self.nmodel,dtype='f4'))
        meta['TEFF'] = Column(np.zeros(self.nmodel,dtype='f4'))

        meta['LOGG'].unit = 'm/s^2'
        meta['TEFF'].unit = 'K'

        if self.objtype=='WD':
            comments = dict(
                TEMPLATEID = 'template ID',
                REDSHIFT = 'object redshift',
                GMAG = 'DECam g-band AB magnitude',
                RMAG = 'DECam r-band AB magnitude',
                ZMAG = 'DECam z-band AB magnitude',
                LOGG = 'log10 of the effective gravity',
                TEFF = 'stellar effective temperature'
            )
        else:
            meta['FEH'] = Column(np.zeros(self.nmodel,dtype='f4'))
            comments = dict(
                TEMPLATEID = 'template ID',
                REDSHIFT = 'object redshift',
                GMAG = 'DECam g-band AB magnitude',
                RMAG = 'DECam r-band AB magnitude',
                ZMAG = 'DECam z-band AB magnitude',
                LOGG = 'log10 of the effective gravity',
                TEFF = 'stellar effective temperature',
                FEH = 'log10 iron abundance relative to solar',
            )


        nobj = 0
        nbase = len(self.basemeta)
        nchunk = min(self.nmodel,500)

        Cuts = TargetCuts()
        while nobj<=(self.nmodel-1):
            # Choose a random subset of the base templates
            chunkindx = self.rand.randint(0,nbase-1,nchunk)

            # Assign uniform redshift and r-magnitude distributions.
            if self.objtype=='WD':
                gmag = self.rand.uniform(gmagrange[0],gmagrange[1],nchunk)
            else: 
                rmag = self.rand.uniform(rmagrange[0],rmagrange[1],nchunk)
                
            vrad = self.rand.normal(vrad_meansig[0],vrad_meansig[1],nchunk)
            redshift = vrad/2.99792458E5

            # Unfortunately we have to loop here.
            for ii, iobj in enumerate(chunkindx):
                zwave = self.basewave*(1.0+redshift[ii])
                restflux = self.baseflux[iobj,:] # [erg/s/cm2/A @10pc]

                # Normalize; Note that [grz]flux are in nanomaggies
                if self.objtype=='WD':
                    gnorm = 10.0**(-0.4*gmag[ii])/self.gfilt.get_maggies(zwave,restflux)
                    flux = restflux*gnorm # [erg/s/cm2/A, @redshift[ii]]

                    gflux = 10.0**(-0.4*(gmag[ii]-22.5))                      
                    rflux = self.rfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                    zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5)
                else:
                    rnorm = 10.0**(-0.4*rmag[ii])/self.rfilt.get_maggies(zwave,restflux)
                    flux = restflux*rnorm # [erg/s/cm2/A, @redshift[ii]]

                    rflux = 10.0**(-0.4*(rmag[ii]-22.5))                      
                    gflux = self.gfilt.get_maggies(zwave,flux)*10**(0.4*22.5) 
                    zflux = self.zfilt.get_maggies(zwave,flux)*10**(0.4*22.5)

                # Color cuts on just on the standard stars.
                if self.objtype=='FSTD':
                    grzmask = [Cuts.FSTD(gflux=gflux,rflux=rflux,zflux=zflux)]
                elif self.objtype=='WD':
                    grzmask = [True]
                else:
                    grzmask = [True]

                if all(grzmask):
                    if ((nobj+1)%10)==0:
                        print('Simulating {} template {}/{}'.format(self.objtype,nobj+1,self.nmodel))
                    outflux[nobj,:] = resample_flux(self.wave,zwave,flux)

                    if self.objtype=='WD':
                        meta['TEMPLATEID'][nobj] = nobj
                        meta['REDSHIFT'][nobj] = redshift[ii]
                        meta['GMAG'][nobj] = gmag[ii]
                        meta['RMAG'][nobj] = -2.5*np.log10(rflux)+22.5
                        meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                        meta['LOGG'][nobj] = self.basemeta['LOGG'][iobj]
                        meta['TEFF'][nobj] = self.basemeta['TEFF'][iobj]
                    else:
                        meta['TEMPLATEID'][nobj] = nobj
                        meta['REDSHIFT'][nobj] = redshift[ii]
                        meta['GMAG'][nobj] = -2.5*np.log10(gflux)+22.5
                        meta['RMAG'][nobj] = rmag[ii]
                        meta['ZMAG'][nobj] = -2.5*np.log10(zflux)+22.5
                        meta['LOGG'][nobj] = self.basemeta['LOGG'][iobj]
                        meta['TEFF'][nobj] = self.basemeta['TEFF'][iobj]
                        meta['FEH'][nobj] = self.basemeta['FEH'][iobj]

                    nobj = nobj+1

                # If we have enough models get out!
                if nobj>=(self.nmodel-1):
                    break
                
        return outflux, self.wave, meta
コード例 #43
0
def sim_source_spectra(allinfo, allzbest, infofile='source-truth.fits', debug=False):
    """Build the residual (source) spectra. No redshift-fitting.

    """
    from desispec.io import read_spectra, write_spectra
    from desispec.spectra import Spectra
    from desispec.interpolation import resample_flux
    from desispec.resolution import Resolution

    from redrock.external.desi import DistTargetsDESI
    from redrock.templates import find_templates, Template

    assert(np.all(allinfo['TARGETID'] == allzbest['TARGETID']))

    nsim = len(allinfo)

    # Select the subset of objects for which we got the correct lens (BGS)
    # redshift.
    these = np.where((allzbest['SPECTYPE'] == 'GALAXY') *
                     (np.abs(allzbest['Z'] - allinfo['LENS_Z']) < 0.003))[0]
    print('Selecting {}/{} lenses with the correct redshift'.format(len(these), nsim))
    if len(these) == 0:
        raise ValueError('No spectra passed the cuts!')

    allinfo = allinfo[these]
    allzbest = allzbest[these]

    print('Writing {}'.format(infofile))
    allinfo.write(infofile, overwrite=True)

    tempfile = find_templates()[0]
    rrtemp = Template(tempfile)
    
    # loop on each chunk of lens+source spectra
    nchunk = len(set(allinfo['CHUNK']))
    for ichunk in set(allinfo['CHUNK']):

        I = np.where(allinfo['CHUNK'] == ichunk)[0]
        info = allinfo[I]
        zbest = allzbest[I]
        
        specfile = 'lenssource-spectra-chunk{:03d}.fits'.format(ichunk)
        sourcefile = 'source-spectra-chunk{:03d}.fits'.format(ichunk)
        spectra = read_spectra(specfile).select(targets=info['TARGETID'])
        for igal, zz in enumerate(zbest):
            zwave = rrtemp.wave * (1 + zz['Z'])
            zflux = rrtemp.flux.T.dot(zz['COEFF']).T #/ (1 + zz['Z'])
            if debug:
                fig, ax = plt.subplots()
            for band in spectra.bands:
                R = Resolution(spectra.resolution_data[band][igal])
                # use fastspecfit here
                modelflux = R.dot(resample_flux(spectra.wave[band], zwave, zflux))
                if debug:
                    ax.plot(spectra.wave[band], spectra.flux[band][igal, :])
                    ax.plot(spectra.wave[band], modelflux)
                    ax.set_ylim(np.median(spectra.flux['r'][igal, :]) + np.std(spectra.flux['r'][igal, :]) * np.array([-1.5, 3]))
                    #ax.set_xlim(4500, 5500)
                spectra.flux[band][igal, :] -= modelflux # subtract
            if debug:
                qafile = 'source-spectra-chunk{:03d}-{}.png'.format(ichunk, igal)
                fig.savefig(qafile)
                plt.close()

        print('Writing {} spectra to {}'.format(len(zbest), sourcefile))
        write_spectra(outfile=sourcefile, spec=spectra)

    return allinfo    
コード例 #44
0
def main(args, comm=None):

    log = get_logger()

    if args.npoly < 0:
        log.warning("Need npoly>=0, changing this %d -> 1" % args.npoly)
        args.npoly = 0
    if args.nproc < 1:
        log.warning("Need nproc>=1, changing this %d -> 1" % args.nproc)
        args.nproc = 1

    if comm is not None:
        if args.nproc != 1:
            if comm.rank == 0:
                log.warning("Using MPI, forcing multiprocessing nproc -> 1")
            args.nproc = 1

    if args.objtype is not None:
        args.objtype = args.objtype.split(',')

    #- Read brick files for each channel
    if (comm is None) or (comm.rank == 0):
        log.info("Reading bricks")
    brick = dict()
    if args.brick is not None:
        if len(args.brickfiles) != 0:
            raise RuntimeError(
                'Give -b/--brick or input brickfiles but not both')
        for channel in ('b', 'r', 'z'):
            filename = None
            if (comm is None) or (comm.rank == 0):
                filename = io.findfile('brick',
                                       band=channel,
                                       brickname=args.brick,
                                       specprod_dir=args.specprod_dir)
            if comm is not None:
                filename = comm.bcast(filename, root=0)
            brick[channel] = io.Brick(filename)
    else:
        for filename in args.brickfiles:
            bx = io.Brick(filename)
            if bx.channel not in brick:
                brick[bx.channel] = bx
            else:
                if (comm is None) or (comm.rank == 0):
                    log.error('Channel {} in multiple input files'.format(
                        bx.channel))
                sys.exit(2)

    filters = brick.keys()
    for fil in filters:
        if (comm is None) or (comm.rank == 0):
            log.info("Filter found: " + fil)

    #- Assume all channels have the same number of targets
    #- TODO: generalize this to allow missing channels
    #if args.nspec is None:
    #    args.nspec = brick['b'].get_num_targets()
    #    log.info("Fitting {} targets".format(args.nspec))
    #else:
    #    log.info("Fitting {} of {} targets".format(args.nspec, brick['b'].get_num_targets()))

    #- Coadd individual exposures and combine channels
    #- Full coadd code is a bit slow, so try something quick and dirty for
    #- now to get something going for redshifting
    if (comm is None) or (comm.rank == 0):
        log.info("Combining individual channels and exposures")
    wave = []
    for fil in filters:
        wave = np.concatenate([wave, brick[fil].get_wavelength_grid()])
    np.ndarray.sort(wave)
    nwave = len(wave)

    #- flux and ivar arrays to fill for all targets
    #flux = np.zeros((nspec, nwave))
    #ivar = np.zeros((nspec, nwave))
    flux = []
    ivar = []
    good_targetids = []
    targetids = brick['b'].get_target_ids()

    fpinfo = None
    if args.print_info is not None:
        if (comm is None) or (comm.rank == 0):
            fpinfo = open(args.print_info, "w")

    for i, targetid in enumerate(targetids):
        #- wave, flux, and ivar for this target; concatenate
        xwave = list()
        xflux = list()
        xivar = list()

        good = True
        for channel in filters:
            exp_flux, exp_ivar, resolution, info = brick[channel].get_target(
                targetid)
            weights = np.sum(exp_ivar, axis=0)
            ii, = np.where(weights > 0)
            if len(ii) == 0:
                good = False
                break
            xwave.extend(brick[channel].get_wavelength_grid()[ii])
            #- Average multiple exposures on the same wavelength grid for each channel
            xflux.extend(
                np.average(exp_flux[:, ii], weights=exp_ivar[:, ii], axis=0))
            xivar.extend(weights[ii])

        if not good:
            continue

        xwave = np.array(xwave)
        xivar = np.array(xivar)
        xflux = np.array(xflux)

        ii = np.argsort(xwave)
        #flux[i], ivar[i] = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])
        fl, iv = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])
        flux.append(fl)
        ivar.append(iv)
        good_targetids.append(targetid)
        if not args.print_info is None:
            s2n = np.median(fl[:-1] * np.sqrt(iv[:-1]) /
                            np.sqrt(wave[1:] - wave[:-1]))
            if (comm is None) or (comm.rank == 0):
                print targetid, s2n
                fpinfo.write(str(targetid) + " " + str(s2n) + "\n")

    if not args.print_info is None:
        if (comm is None) or (comm.rank == 0):
            fpinfo.close()
        sys.exit()

    good_targetids = good_targetids[args.first_spec:]
    flux = np.array(flux[args.first_spec:])
    ivar = np.array(ivar[args.first_spec:])
    nspec = len(good_targetids)
    if (comm is None) or (comm.rank == 0):
        log.info("number of good targets = %d" % nspec)
    if (args.nspec is not None) and (args.nspec < nspec):
        if (comm is None) or (comm.rank == 0):
            log.info("Fitting {} of {} targets".format(args.nspec, nspec))
        nspec = args.nspec
        good_targetids = good_targetids[:nspec]
        flux = flux[:nspec]
        ivar = ivar[:nspec]
    else:
        if (comm is None) or (comm.rank == 0):
            log.info("Fitting {} targets".format(nspec))

    if (comm is None) or (comm.rank == 0):
        log.debug("flux.shape={}".format(flux.shape))

    zf = None
    if comm is None:
        # Use multiprocessing built in to RedMonster.

        zf = RedMonsterZfind(wave=wave,
                             flux=flux,
                             ivar=ivar,
                             objtype=args.objtype,
                             zrange_galaxy=args.zrange_galaxy,
                             zrange_qso=args.zrange_qso,
                             zrange_star=args.zrange_star,
                             nproc=args.nproc,
                             npoly=args.npoly)

    else:
        # Use MPI

        # distribute the spectra among processes
        my_firstspec, my_nspec = dist_uniform(nspec, comm.size, comm.rank)
        my_specs = slice(my_firstspec, my_firstspec + my_nspec)
        for p in range(comm.size):
            if p == comm.rank:
                if my_nspec > 0:
                    log.info("process {} fitting spectra {} - {}".format(
                        p, my_firstspec, my_firstspec + my_nspec - 1))
                else:
                    log.info("process {} idle".format(p))
                sys.stdout.flush()
            comm.barrier()

        # do redshift fitting on each process
        myzf = None
        if my_nspec > 0:
            savelevel = os.environ["DESI_LOGLEVEL"]
            os.environ["DESI_LOGLEVEL"] = "WARNING"
            myzf = RedMonsterZfind(wave=wave,
                                   flux=flux[my_specs, :],
                                   ivar=ivar[my_specs, :],
                                   objtype=args.objtype,
                                   zrange_galaxy=args.zrange_galaxy,
                                   zrange_qso=args.zrange_qso,
                                   zrange_star=args.zrange_star,
                                   nproc=args.nproc,
                                   npoly=args.npoly)
            os.environ["DESI_LOGLEVEL"] = savelevel

        # Combine results into a single ZFindBase object on the root process.
        # We could do this with a gather, but we are using a small number of
        # processes, and point-to-point communication is easier for people to
        # understand.

        if comm.rank == 0:
            zf = ZfindBase(myzf.wave,
                           np.zeros((nspec, myzf.nwave)),
                           np.zeros((nspec, myzf.nwave)),
                           R=None,
                           results=None)

        for p in range(comm.size):
            if comm.rank == 0:
                if p == 0:
                    # root process copies its own data into output
                    zf.flux[my_specs] = myzf.flux
                    zf.ivar[my_specs] = myzf.ivar
                    zf.model[my_specs] = myzf.model
                    zf.z[my_specs] = myzf.z
                    zf.zerr[my_specs] = myzf.zerr
                    zf.zwarn[my_specs] = myzf.zwarn
                    zf.spectype[my_specs] = myzf.spectype
                    zf.subtype[my_specs] = myzf.subtype
                else:
                    # root process receives from process p and copies
                    # it into the output.
                    p_nspec = comm.recv(source=p, tag=0)
                    # only proceed if the sending process actually
                    # has some spectra assigned to it.
                    if p_nspec > 0:
                        p_firstspec = comm.recv(source=p, tag=1)
                        p_slice = slice(p_firstspec, p_firstspec + p_nspec)

                        p_flux = comm.recv(source=p, tag=2)
                        zf.flux[p_slice] = p_flux

                        p_ivar = comm.recv(source=p, tag=3)
                        zf.ivar[p_slice] = p_ivar

                        p_model = comm.recv(source=p, tag=4)
                        zf.model[p_slice] = p_model

                        p_z = comm.recv(source=p, tag=5)
                        zf.z[p_slice] = p_z

                        p_zerr = comm.recv(source=p, tag=6)
                        zf.zerr[p_slice] = p_zerr

                        p_zwarn = comm.recv(source=p, tag=7)
                        zf.zwarn[p_slice] = p_zwarn

                        p_type = comm.recv(source=p, tag=8)
                        zf.spectype[p_slice] = p_type

                        p_subtype = comm.recv(source=p, tag=9)
                        zf.subtype[p_slice] = p_subtype
            else:
                if p == comm.rank:
                    # process p sends to root
                    comm.send(my_nspec, dest=0, tag=0)
                    if my_nspec > 0:
                        comm.send(my_firstspec, dest=0, tag=1)
                        comm.send(myzf.flux, dest=0, tag=2)
                        comm.send(myzf.ivar, dest=0, tag=3)
                        comm.send(myzf.model, dest=0, tag=4)
                        comm.send(myzf.z, dest=0, tag=5)
                        comm.send(myzf.zerr, dest=0, tag=6)
                        comm.send(myzf.zwarn, dest=0, tag=7)
                        comm.send(myzf.spectype, dest=0, tag=8)
                        comm.send(myzf.subtype, dest=0, tag=9)
            comm.barrier()

    if (comm is None) or (comm.rank == 0):
        # The full results exist only on the rank zero process.

        # reformat results
        dtype = list()

        dtype = [
            ('Z', zf.z.dtype),
            ('ZERR', zf.zerr.dtype),
            ('ZWARN', zf.zwarn.dtype),
            ('SPECTYPE', zf.spectype.dtype),
            ('SUBTYPE', zf.subtype.dtype),
        ]

        formatted_data = np.empty(nspec, dtype=dtype)
        formatted_data['Z'] = zf.z
        formatted_data['ZERR'] = zf.zerr
        formatted_data['ZWARN'] = zf.zwarn
        formatted_data['SPECTYPE'] = zf.spectype
        formatted_data['SUBTYPE'] = zf.subtype

        # Create a ZfindBase object with formatted results
        zfi = ZfindBase(None, None, None, results=formatted_data)
        zfi.nspec = nspec

        # QA
        if (args.qafile is not None) or (args.qafig is not None):
            log.info("performing skysub QA")
            # Load
            qabrick = load_qa_brick(args.qafile)
            # Run
            qabrick.run_qa('ZBEST', (zfi, brick))
            # Write
            if args.qafile is not None:
                write_qa_brick(args.qafile, qabrick)
                log.info("successfully wrote {:s}".format(args.qafile))
            # Figure(s)
            if args.qafig is not None:
                raise IOError("Not yet implemented")
                qa_plots.brick_zbest(args.qafig, zfi, qabrick)

        #- Write some output
        if args.outfile is None:
            args.outfile = io.findfile('zbest', brickname=args.brick)

        log.info("Writing " + args.outfile)
        #io.write_zbest(args.outfile, args.brick, targetids, zfi, zspec=args.zspec)
        io.write_zbest(args.outfile,
                       args.brick,
                       good_targetids,
                       zfi,
                       zspec=args.zspec)

    return
コード例 #45
0
def new_exposure(flavor, nspec=5000, night=None, expid=None, tileid=None, \
    airmass=1.0, exptime=None):
    """
    Create a new exposure and output input simulation files.
    Does not generate pixel-level simulations or noisy spectra.
    
    Args:
        nspec (optional): integer number of spectra to simulate
        night (optional): YEARMMDD string
        expid (optional): positive integer exposure ID
        tileid (optional): tile ID
        airmass (optional): airmass, default 1.0
    
    Writes:
        $DESI_SPECTRO_SIM/$PIXPROD/{night}/fibermap-{expid}.fits
        $DESI_SPECTRO_SIM/$PIXPROD/{night}/simspec-{expid}.fits
        
    Returns:
        fibermap numpy structured array
        truth dictionary
    """
    if expid is None:
        expid = get_next_expid()

    if tileid is None:
        tileid = get_next_tileid()

    if night is None:
        #- simulation obs time = now, even if sun is up
        dateobs = time.gmtime()
        night = get_night(utc=dateobs)
    else:
        #- 10pm on night YEARMMDD
        dateobs = time.strptime(night + ':22', '%Y%m%d:%H')

    params = desimodel.io.load_desiparams()
    if flavor == 'arc':
        infile = os.getenv(
            'DESI_ROOT'
        ) + '/spectro/templates/calib/v0.2/arc-lines-average.fits'
        d = fits.getdata(infile, 1)
        wave = d['AIRWAVE']
        phot = d['ELECTRONS']

        truth = dict(WAVE=wave)
        meta = None
        fibermap = desispec.io.fibermap.empty_fibermap(nspec)
        for channel in ('B', 'R', 'Z'):
            thru = desimodel.io.load_throughput(channel)
            ii = np.where((thru.wavemin <= wave) & (wave <= thru.wavemax))[0]
            truth['WAVE_' + channel] = wave[ii]
            truth['PHOT_' + channel] = np.tile(phot[ii],
                                               nspec).reshape(nspec, len(ii))

    elif flavor == 'flat':
        infile = os.getenv(
            'DESI_ROOT'
        ) + '/spectro/templates/calib/v0.2/flat-3100K-quartz-iodine.fits'
        flux = fits.getdata(infile, 0)
        hdr = fits.getheader(infile, 0)
        wave = desispec.io.util.header2wave(hdr)

        #- resample to 0.2 A grid
        dw = 0.2
        ww = np.arange(wave[0], wave[-1] + dw / 2, dw)
        flux = resample_flux(ww, wave, flux)
        wave = ww

        #- Convert to 2D for projection
        flux = np.tile(flux, nspec).reshape(nspec, len(wave))

        truth = dict(WAVE=wave, FLUX=flux)
        meta = None
        fibermap = desispec.io.fibermap.empty_fibermap(nspec)
        for channel in ('B', 'R', 'Z'):
            thru = desimodel.io.load_throughput(channel)
            ii = (thru.wavemin <= wave) & (wave <= thru.wavemax)
            phot = thru.photons(wave[ii],
                                flux[:, ii],
                                units=hdr['BUNIT'],
                                objtype='CALIB',
                                exptime=10)

            truth['WAVE_' + channel] = wave[ii]
            truth['PHOT_' + channel] = phot

    elif flavor == 'science':
        fibermap, truth = get_targets(nspec, tileid=tileid)

        flux = truth['FLUX']
        wave = truth['WAVE']
        nwave = len(wave)

        if exptime is None:
            exptime = params['exptime']

        #- Load sky [Magic knowledge of units 1e-17 erg/s/cm2/A/arcsec2]
        skyfile = os.getenv('DESIMODEL') + '/data/spectra/spec-sky.dat'
        skywave, skyflux = np.loadtxt(skyfile, unpack=True)
        skyflux = np.interp(wave, skywave, skyflux)
        truth['SKYFLUX'] = skyflux

        for channel in ('B', 'R', 'Z'):
            thru = desimodel.io.load_throughput(channel)

            ii = np.where((thru.wavemin <= wave) & (wave <= thru.wavemax))[0]

            #- Project flux to photons
            phot = thru.photons(wave[ii],
                                flux[:, ii],
                                units=truth['UNITS'],
                                objtype=truth['OBJTYPE'],
                                exptime=exptime,
                                airmass=airmass)

            truth['PHOT_' + channel] = phot
            truth['WAVE_' + channel] = wave[ii]

            #- Project sky flux to photons
            skyphot = thru.photons(wave[ii],
                                   skyflux[ii] * airmass,
                                   units='1e-17 erg/s/cm2/A/arcsec2',
                                   objtype='SKY',
                                   exptime=exptime,
                                   airmass=airmass)

            #- 2D version
            ### truth['SKYPHOT_'+channel] = np.tile(skyphot, nspec).reshape((nspec, len(ii)))
            #- 1D version
            truth['SKYPHOT_' + channel] = skyphot.astype(np.float32)

        #- NOTE: someday skyflux and skyphot may be 2D instead of 1D

        #- Extract the metadata part of the truth dictionary into a table
        columns = (
            'OBJTYPE',
            'REDSHIFT',
            'TEMPLATEID',
            'D4000',
            'OIIFLUX',
            'VDISP',
        )
        meta = {key: truth[key] for key in columns}

    #- (end indentation for arc/flat/science flavors)

    #- Override $DESI_SPECTRO_DATA in order to write to simulation area
    datadir_orig = os.getenv('DESI_SPECTRO_DATA')
    simbase = os.path.join(os.getenv('DESI_SPECTRO_SIM'), os.getenv('PIXPROD'))
    os.environ['DESI_SPECTRO_DATA'] = simbase

    #- Write fibermap
    telera, teledec = io.get_tile_radec(tileid)
    hdr = dict(
        NIGHT=(night, 'Night of observation YEARMMDD'),
        EXPID=(expid, 'DESI exposure ID'),
        TILEID=(tileid, 'DESI tile ID'),
        FLAVOR=(flavor, 'Flavor [arc, flat, science, ...]'),
        TELRA=(telera, 'Telescope pointing RA [degrees]'),
        TELDEC=(teledec, 'Telescope pointing dec [degrees]'),
    )
    #- ISO 8601 DATE-OBS year-mm-ddThh:mm:ss
    fiberfile = desispec.io.findfile('fibermap', night, expid)
    desispec.io.write_fibermap(fiberfile, fibermap, header=hdr)
    print fiberfile

    #- Write simspec; expand fibermap header
    hdr['AIRMASS'] = (airmass, 'Airmass at middle of exposure')
    hdr['EXPTIME'] = (exptime, 'Exposure time [sec]')
    hdr['DATE-OBS'] = (time.strftime('%FT%T', dateobs), 'Start of exposure')

    simfile = io.write_simspec(meta, truth, expid, night, header=hdr)
    print(simfile)

    #- Update obslog that we succeeded with this exposure
    update_obslog(flavor, expid, dateobs, tileid)

    #- Restore $DESI_SPECTRO_DATA
    if datadir_orig is not None:
        os.environ['DESI_SPECTRO_DATA'] = datadir_orig
    else:
        del os.environ['DESI_SPECTRO_DATA']

    return fibermap, truth
コード例 #46
0
ファイル: obs.py プロジェクト: forero/desisim
def new_exposure(flavor, nspec=5000, night=None, expid=None, tileid=None, airmass=1.0, \
    exptime=None):
    """
    Create a new exposure and output input simulation files.
    Does not generate pixel-level simulations or noisy spectra.
    
    Args:
        nspec (optional): integer number of spectra to simulate
        night (optional): YEARMMDD string
        expid (optional): positive integer exposure ID
        tileid (optional): tile ID
        airmass (optional): airmass, default 1.0
    
    Writes:
        $DESI_SPECTRO_SIM/$PIXPROD/{night}/fibermap-{expid}.fits
        $DESI_SPECTRO_SIM/$PIXPROD/{night}/simspec-{expid}.fits
        
    Returns:
        fibermap numpy structured array
        truth dictionary
    """
    if expid is None:
        expid = get_next_expid()
    
    if tileid is None:
        tileid = get_next_tileid()

    if night is None:
        #- simulation obs time = now, even if sun is up
        dateobs = time.gmtime()
        night = get_night(utc=dateobs)
    else:
        #- 10pm on night YEARMMDD
        dateobs = time.strptime(night+':22', '%Y%m%d:%H')
    
    params = desimodel.io.load_desiparams()    
    if flavor == 'arc':
        infile = os.getenv('DESI_ROOT')+'/spectro/templates/calib/v0.1/arc-lines-average.fits'
        d = fits.getdata(infile, 1)
        wave = d['AIRWAVE']
        phot = d['ELECTRONS']
        
        truth = dict(WAVE=wave)
        meta = None
        fibermap = desispec.io.fibermap.empty_fibermap(nspec)
        for channel in ('B', 'R', 'Z'):
            thru = desimodel.io.load_throughput(channel)        
            ii = np.where( (thru.wavemin <= wave) & (wave <= thru.wavemax) )[0]
            truth['WAVE_'+channel] = wave[ii]
            truth['PHOT_'+channel] = np.tile(phot[ii], nspec).reshape(nspec, len(ii))

    elif flavor == 'flat':
        infile = os.getenv('DESI_ROOT')+'/spectro/templates/calib/v0.1/flat-3100K-quartz-iodine.fits'
        flux = fits.getdata(infile, 0)
        hdr = fits.getheader(infile, 0)
        wave = desispec.io.util.header2wave(hdr)

        #- resample to 0.2 A grid
        dw = 0.2
        ww = np.arange(wave[0], wave[-1]+dw/2, dw)
        flux = resample_flux(ww, wave, flux)
        wave = ww

        #- Convert to 2D for projection
        flux = np.tile(flux, nspec).reshape(nspec, len(wave))

        truth = dict(WAVE=wave, FLUX=flux)
        meta = None
        fibermap = desispec.io.fibermap.empty_fibermap(nspec)
        for channel in ('B', 'R', 'Z'):
            psf = desimodel.io.load_psf(channel)
            thru = desimodel.io.load_throughput(channel)
            ii = (psf.wmin <= wave) & (wave <= psf.wmax)
            phot = thru.photons(wave[ii], flux[:,ii], units=hdr['BUNIT'], objtype='CALIB')
        
            truth['WAVE_'+channel] = wave[ii]
            truth['PHOT_'+channel] = phot
        
    elif flavor == 'science':
        fibermap, truth = get_targets(nspec, tileid=tileid)
            
        flux = truth['FLUX']
        wave = truth['WAVE']
        nwave = len(wave)
    
        if exptime is None:
            exptime = params['exptime']
    
        #- Load sky [Magic knowledge of units 1e-17 erg/s/cm2/A/arcsec2]
        skyfile = os.getenv('DESIMODEL')+'/data/spectra/spec-sky.dat'
        skywave, skyflux = np.loadtxt(skyfile, unpack=True)
        skyflux = np.interp(wave, skywave, skyflux)
        truth['SKYFLUX'] = skyflux

        for channel in ('B', 'R', 'Z'):
            thru = desimodel.io.load_throughput(channel)
        
            ii = np.where( (thru.wavemin <= wave) & (wave <= thru.wavemax) )[0]
        
            #- Project flux to photons
            phot = thru.photons(wave[ii], flux[:,ii], units='1e-17 erg/s/cm2/A',
                    objtype=truth['OBJTYPE'], exptime=exptime,
                    airmass=airmass)
                
            truth['PHOT_'+channel] = phot
            truth['WAVE_'+channel] = wave[ii]
    
            #- Project sky flux to photons
            skyphot = thru.photons(wave[ii], skyflux[ii]*airmass,
                units='1e-17 erg/s/cm2/A/arcsec2',
                objtype='SKY', exptime=exptime, airmass=airmass)
    
            #- 2D version
            ### truth['SKYPHOT_'+channel] = np.tile(skyphot, nspec).reshape((nspec, len(ii)))
            #- 1D version
            truth['SKYPHOT_'+channel] = skyphot.astype(np.float32)
        
        #- NOTE: someday skyflux and skyphot may be 2D instead of 1D
        
        #- Extract the metadata part of the truth dictionary into a table
        columns = (
            'OBJTYPE',
            'REDSHIFT',
            'TEMPLATEID',
            'O2FLUX',
        )
        meta = _dict2ndarray(truth, columns)
        
    #- (end indentation for arc/flat/science flavors)
        
    #- Write fibermap
    telera, teledec = io.get_tile_radec(tileid)
    hdr = dict(
        NIGHT = (night, 'Night of observation YEARMMDD'),
        EXPID = (expid, 'DESI exposure ID'),
        TILEID = (tileid, 'DESI tile ID'),
        FLAVOR = (flavor, 'Flavor [arc, flat, science, ...]'),
        TELERA = (telera, 'Telescope pointing RA [degrees]'),
        TELEDEC = (teledec, 'Telescope pointing dec [degrees]'),
        )
    fiberfile = desispec.io.findfile('fibermap', night, expid)
    desispec.io.write_fibermap(fiberfile, fibermap, header=hdr)
    print fiberfile
    
    #- Write simfile
    hdr = dict(
        AIRMASS=(airmass, 'Airmass at middle of exposure'),
        EXPTIME=(exptime, 'Exposure time [sec]'),
        FLAVOR=(flavor, 'exposure flavor [arc, flat, science]'),
        )
    simfile = io.write_simspec(meta, truth, expid, night, header=hdr)
    print simfile

    #- Update obslog that we succeeded with this exposure
    update_obslog(flavor, expid, dateobs, tileid)
    
    return fibermap, truth
コード例 #47
0
ファイル: zfind.py プロジェクト: rstaten/desispec
def main(args, comm=None) :

    log = get_logger()

    if args.npoly < 0 :
        log.warning("Need npoly>=0, changing this %d -> 1"%args.npoly)
        args.npoly=0
    if args.nproc < 1 :
        log.warning("Need nproc>=1, changing this %d -> 1"%args.nproc)
        args.nproc=1
    
    if comm is not None:
        if args.nproc != 1:
            if comm.rank == 0:
                log.warning("Using MPI, forcing multiprocessing nproc -> 1")
            args.nproc = 1

    if args.objtype is not None:
        args.objtype = args.objtype.split(',')

    #- Read brick files for each channel
    if (comm is None) or (comm.rank == 0):
        log.info("Reading bricks")
    brick = dict()
    if args.brick is not None:
        if len(args.brickfiles) != 0:
            raise RuntimeError('Give -b/--brick or input brickfiles but not both')
        for channel in ('b', 'r', 'z'):
            filename = None
            if (comm is None) or (comm.rank == 0):
                filename = io.findfile('brick', band=channel, brickname=args.brick,
                                        specprod_dir=args.specprod_dir)
            if comm is not None:
                filename = comm.bcast(filename, root=0)
            brick[channel] = io.Brick(filename)
    else:
        for filename in args.brickfiles:
            bx = io.Brick(filename)
            if bx.channel not in brick:
                brick[bx.channel] = bx
            else:
                if (comm is None) or (comm.rank == 0):
                    log.error('Channel {} in multiple input files'.format(bx.channel))
                sys.exit(2)

    filters=brick.keys()
    for fil in filters:
        if (comm is None) or (comm.rank == 0):
            log.info("Filter found: "+fil)

    #- Assume all channels have the same number of targets
    #- TODO: generalize this to allow missing channels
    #if args.nspec is None:
    #    args.nspec = brick['b'].get_num_targets()
    #    log.info("Fitting {} targets".format(args.nspec))
    #else:
    #    log.info("Fitting {} of {} targets".format(args.nspec, brick['b'].get_num_targets()))
    
    #- Coadd individual exposures and combine channels
    #- Full coadd code is a bit slow, so try something quick and dirty for
    #- now to get something going for redshifting
    if (comm is None) or (comm.rank == 0):
        log.info("Combining individual channels and exposures")
    wave=[]
    for fil in filters:
        wave=np.concatenate([wave,brick[fil].get_wavelength_grid()])
    np.ndarray.sort(wave)
    nwave = len(wave)

    #- flux and ivar arrays to fill for all targets
    #flux = np.zeros((nspec, nwave))
    #ivar = np.zeros((nspec, nwave))
    flux = []
    ivar = []
    good_targetids=[]
    targetids = brick['b'].get_target_ids()

    fpinfo = None
    if args.print_info is not None:
        if (comm is None) or (comm.rank == 0):
            fpinfo = open(args.print_info,"w")

    for i, targetid in enumerate(targetids):
        #- wave, flux, and ivar for this target; concatenate
        xwave = list()
        xflux = list()
        xivar = list()

        good=True
        for channel in filters:
            exp_flux, exp_ivar, resolution, info = brick[channel].get_target(targetid)
            weights = np.sum(exp_ivar, axis=0)
            ii, = np.where(weights > 0)
            if len(ii)==0:
                good=False
                break
            xwave.extend(brick[channel].get_wavelength_grid()[ii])
            #- Average multiple exposures on the same wavelength grid for each channel
            xflux.extend(np.average(exp_flux[:,ii], weights=exp_ivar[:,ii], axis=0))
            xivar.extend(weights[ii])

        if not good:
            continue

        xwave = np.array(xwave)
        xivar = np.array(xivar)
        xflux = np.array(xflux)

        ii = np.argsort(xwave)
        #flux[i], ivar[i] = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])
        fl, iv = resample_flux(wave, xwave[ii], xflux[ii], xivar[ii])
        flux.append(fl)
        ivar.append(iv)
        good_targetids.append(targetid)
        if not args.print_info is None:
            s2n = np.median(fl[:-1]*np.sqrt(iv[:-1])/np.sqrt(wave[1:]-wave[:-1]))
            if (comm is None) or (comm.rank == 0):
                print targetid,s2n
                fpinfo.write(str(targetid)+" "+str(s2n)+"\n")

    if not args.print_info is None:
        if (comm is None) or (comm.rank == 0):
            fpinfo.close()
        sys.exit()

    good_targetids=good_targetids[args.first_spec:]
    flux=np.array(flux[args.first_spec:])
    ivar=np.array(ivar[args.first_spec:])
    nspec=len(good_targetids)
    if (comm is None) or (comm.rank == 0):
        log.info("number of good targets = %d"%nspec)
    if (args.nspec is not None) and (args.nspec < nspec):
        if (comm is None) or (comm.rank == 0):
            log.info("Fitting {} of {} targets".format(args.nspec, nspec))
        nspec=args.nspec
        good_targetids=good_targetids[:nspec]
        flux=flux[:nspec]
        ivar=ivar[:nspec]
    else :
        if (comm is None) or (comm.rank == 0):
            log.info("Fitting {} targets".format(nspec))
    
    if (comm is None) or (comm.rank == 0):
        log.debug("flux.shape={}".format(flux.shape))
    
    zf = None
    if comm is None:
        # Use multiprocessing built in to RedMonster.

        zf = RedMonsterZfind(wave= wave,flux= flux,ivar=ivar,
                             objtype=args.objtype,zrange_galaxy= args.zrange_galaxy,
                             zrange_qso=args.zrange_qso,zrange_star=args.zrange_star,
                             nproc=args.nproc,npoly=args.npoly)
    
    else:
        # Use MPI

        # distribute the spectra among processes
        my_firstspec, my_nspec = dist_uniform(nspec, comm.size, comm.rank)
        my_specs = slice(my_firstspec, my_firstspec + my_nspec)
        for p in range(comm.size):
            if p == comm.rank:
                if my_nspec > 0:
                    log.info("process {} fitting spectra {} - {}".format(p, my_firstspec, my_firstspec+my_nspec-1))
                else:
                    log.info("process {} idle".format(p))
                sys.stdout.flush()
            comm.barrier()

        # do redshift fitting on each process
        myzf = None
        if my_nspec > 0:
            savelevel = os.environ["DESI_LOGLEVEL"]
            os.environ["DESI_LOGLEVEL"] = "WARNING"
            myzf = RedMonsterZfind(wave=wave, flux=flux[my_specs,:], ivar=ivar[my_specs,:],
                             objtype=args.objtype,zrange_galaxy= args.zrange_galaxy,
                             zrange_qso=args.zrange_qso,zrange_star=args.zrange_star,
                             nproc=args.nproc,npoly=args.npoly)
            os.environ["DESI_LOGLEVEL"] = savelevel

        # Combine results into a single ZFindBase object on the root process.
        # We could do this with a gather, but we are using a small number of
        # processes, and point-to-point communication is easier for people to
        # understand.

        if comm.rank == 0:
            zf = ZfindBase(myzf.wave, np.zeros((nspec, myzf.nwave)), np.zeros((nspec, myzf.nwave)), R=None, results=None)
        
        for p in range(comm.size):
            if comm.rank == 0:
                if p == 0:
                    # root process copies its own data into output
                    zf.flux[my_specs] = myzf.flux
                    zf.ivar[my_specs] = myzf.ivar
                    zf.model[my_specs] = myzf.model
                    zf.z[my_specs] = myzf.z
                    zf.zerr[my_specs] = myzf.zerr
                    zf.zwarn[my_specs] = myzf.zwarn
                    zf.spectype[my_specs] = myzf.spectype
                    zf.subtype[my_specs] = myzf.subtype
                else:
                    # root process receives from process p and copies
                    # it into the output.
                    p_nspec = comm.recv(source=p, tag=0)
                    # only proceed if the sending process actually
                    # has some spectra assigned to it.
                    if p_nspec > 0:
                        p_firstspec = comm.recv(source=p, tag=1)
                        p_slice = slice(p_firstspec, p_firstspec+p_nspec)

                        p_flux = comm.recv(source=p, tag=2)
                        zf.flux[p_slice] = p_flux

                        p_ivar = comm.recv(source=p, tag=3)
                        zf.ivar[p_slice] = p_ivar

                        p_model = comm.recv(source=p, tag=4)
                        zf.model[p_slice] = p_model

                        p_z = comm.recv(source=p, tag=5)
                        zf.z[p_slice] = p_z

                        p_zerr = comm.recv(source=p, tag=6)
                        zf.zerr[p_slice] = p_zerr

                        p_zwarn = comm.recv(source=p, tag=7)
                        zf.zwarn[p_slice] = p_zwarn
                        
                        p_type = comm.recv(source=p, tag=8)
                        zf.spectype[p_slice] = p_type
                        
                        p_subtype = comm.recv(source=p, tag=9)
                        zf.subtype[p_slice] = p_subtype
            else:
                if p == comm.rank:
                    # process p sends to root
                    comm.send(my_nspec, dest=0, tag=0)
                    if my_nspec > 0:
                        comm.send(my_firstspec, dest=0, tag=1)
                        comm.send(myzf.flux, dest=0, tag=2)
                        comm.send(myzf.ivar, dest=0, tag=3)
                        comm.send(myzf.model, dest=0, tag=4)
                        comm.send(myzf.z, dest=0, tag=5)
                        comm.send(myzf.zerr, dest=0, tag=6)
                        comm.send(myzf.zwarn, dest=0, tag=7)
                        comm.send(myzf.spectype, dest=0, tag=8)
                        comm.send(myzf.subtype, dest=0, tag=9)
            comm.barrier()

    if (comm is None) or (comm.rank == 0):
        # The full results exist only on the rank zero process.

        # reformat results
        dtype = list()

        dtype = [
            ('Z',         zf.z.dtype),
            ('ZERR',      zf.zerr.dtype),
            ('ZWARN',     zf.zwarn.dtype),
            ('SPECTYPE',  zf.spectype.dtype),
            ('SUBTYPE',   zf.subtype.dtype),    
        ]

        formatted_data  = np.empty(nspec, dtype=dtype)
        formatted_data['Z']        = zf.z
        formatted_data['ZERR']     = zf.zerr
        formatted_data['ZWARN']    = zf.zwarn
        formatted_data['SPECTYPE'] = zf.spectype
        formatted_data['SUBTYPE']  = zf.subtype
        
        # Create a ZfindBase object with formatted results
        zfi = ZfindBase(None, None, None, results=formatted_data)
        zfi.nspec = nspec

        # QA
        if (args.qafile is not None) or (args.qafig is not None):
            log.info("performing skysub QA")
            # Load
            qabrick = load_qa_brick(args.qafile)
            # Run
            qabrick.run_qa('ZBEST', (zfi,brick))
            # Write
            if args.qafile is not None:
                write_qa_brick(args.qafile, qabrick)
                log.info("successfully wrote {:s}".format(args.qafile))
            # Figure(s)
            if args.qafig is not None:
                raise IOError("Not yet implemented")
                qa_plots.brick_zbest(args.qafig, zfi, qabrick)

        #- Write some output
        if args.outfile is None:
            args.outfile = io.findfile('zbest', brickname=args.brick)

        log.info("Writing "+args.outfile)
        #io.write_zbest(args.outfile, args.brick, targetids, zfi, zspec=args.zspec)
        io.write_zbest(args.outfile, args.brick, good_targetids, zfi, zspec=args.zspec)

    return
コード例 #48
0
ファイル: quickgen.py プロジェクト: michaelJwilson/desisim
def main(args):

    # Set up the logger
    if args.verbose:
        log = get_logger(DEBUG)
    else:
        log = get_logger()

    # Make sure all necessary environment variables are set
    DESI_SPECTRO_REDUX_DIR = "./quickGen"

    if 'DESI_SPECTRO_REDUX' not in os.environ:

        log.info('DESI_SPECTRO_REDUX environment is not set.')

    else:
        DESI_SPECTRO_REDUX_DIR = os.environ['DESI_SPECTRO_REDUX']

    if os.path.exists(DESI_SPECTRO_REDUX_DIR):

        if not os.path.isdir(DESI_SPECTRO_REDUX_DIR):
            raise RuntimeError("Path %s Not a directory" %
                               DESI_SPECTRO_REDUX_DIR)
    else:
        try:
            os.makedirs(DESI_SPECTRO_REDUX_DIR)
        except:
            raise

    SPECPROD_DIR = 'specprod'
    if 'SPECPROD' not in os.environ:
        log.info('SPECPROD environment is not set.')
    else:
        SPECPROD_DIR = os.environ['SPECPROD']
    prod_Dir = specprod_root()

    if os.path.exists(prod_Dir):

        if not os.path.isdir(prod_Dir):
            raise RuntimeError("Path %s Not a directory" % prod_Dir)
    else:
        try:
            os.makedirs(prod_Dir)
        except:
            raise

    # Initialize random number generator to use.
    np.random.seed(args.seed)
    random_state = np.random.RandomState(args.seed)

    # Derive spectrograph number from nstart if needed
    if args.spectrograph is None:
        args.spectrograph = args.nstart / 500

    # Read fibermapfile to get object type, night and expid
    if args.fibermap:
        log.info("Reading fibermap file {}".format(args.fibermap))
        fibermap = read_fibermap(args.fibermap)
        objtype = get_source_types(fibermap)
        stdindx = np.where(objtype == 'STD')  # match STD with STAR
        mwsindx = np.where(objtype == 'MWS_STAR')  # match MWS_STAR with STAR
        bgsindx = np.where(objtype == 'BGS')  # match BGS with LRG
        objtype[stdindx] = 'STAR'
        objtype[mwsindx] = 'STAR'
        objtype[bgsindx] = 'LRG'
        NIGHT = fibermap.meta['NIGHT']
        EXPID = fibermap.meta['EXPID']
    else:
        # Create a blank fake fibermap
        fibermap = empty_fibermap(args.nspec)
        targetids = random_state.randint(2**62, size=args.nspec)
        fibermap['TARGETID'] = targetids
        night = get_night()
        expid = 0

    log.info("Initializing SpecSim with config {}".format(args.config))
    desiparams = load_desiparams()
    qsim = get_simulator(args.config, num_fibers=1)

    if args.simspec:
        # Read the input file
        log.info('Reading input file {}'.format(args.simspec))
        simspec = desisim.io.read_simspec(args.simspec)
        nspec = simspec.nspec
        if simspec.flavor == 'arc':
            log.warning("quickgen doesn't generate flavor=arc outputs")
            return
        else:
            wavelengths = simspec.wave
            spectra = simspec.flux
        if nspec < args.nspec:
            log.info("Only {} spectra in input file".format(nspec))
            args.nspec = nspec

    else:
        # Initialize the output truth table.
        spectra = []
        wavelengths = qsim.source.wavelength_out.to(u.Angstrom).value
        npix = len(wavelengths)
        truth = dict()
        meta = Table()
        truth['OBJTYPE'] = np.zeros(args.nspec, dtype=(str, 10))
        truth['FLUX'] = np.zeros((args.nspec, npix))
        truth['WAVE'] = wavelengths
        jj = list()

        for thisobj in set(true_objtype):
            ii = np.where(true_objtype == thisobj)[0]
            nobj = len(ii)
            truth['OBJTYPE'][ii] = thisobj
            log.info('Generating {} template'.format(thisobj))

            # Generate the templates
            if thisobj == 'ELG':
                elg = desisim.templates.ELG(wave=wavelengths,
                                            add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = elg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_elg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'LRG':
                lrg = desisim.templates.LRG(wave=wavelengths,
                                            add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = lrg.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_lrg,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'QSO':
                qso = desisim.templates.QSO(wave=wavelengths)
                flux, tmpwave, meta1 = qso.make_templates(
                    nmodel=nobj, seed=args.seed, zrange=args.zrange_qso)
            elif thisobj == 'BGS':
                bgs = desisim.templates.BGS(wave=wavelengths,
                                            add_SNeIa=args.add_SNeIa)
                flux, tmpwave, meta1 = bgs.make_templates(
                    nmodel=nobj,
                    seed=args.seed,
                    zrange=args.zrange_bgs,
                    rmagrange=args.rmagrange_bgs,
                    sne_rfluxratiorange=args.sne_rfluxratiorange)
            elif thisobj == 'STD':
                std = desisim.templates.STD(wave=wavelengths)
                flux, tmpwave, meta1 = std.make_templates(nmodel=nobj,
                                                          seed=args.seed)
            elif thisobj == 'QSO_BAD':  # use STAR template no color cuts
                star = desisim.templates.STAR(wave=wavelengths)
                flux, tmpwave, meta1 = star.make_templates(nmodel=nobj,
                                                           seed=args.seed)
            elif thisobj == 'MWS_STAR' or thisobj == 'MWS':
                mwsstar = desisim.templates.MWS_STAR(wave=wavelengths)
                flux, tmpwave, meta1 = mwsstar.make_templates(nmodel=nobj,
                                                              seed=args.seed)
            elif thisobj == 'WD':
                wd = desisim.templates.WD(wave=wavelengths)
                flux, tmpwave, meta1 = wd.make_templates(nmodel=nobj,
                                                         seed=args.seed)
            elif thisobj == 'SKY':
                flux = np.zeros((nobj, npix))
                meta1 = Table(dict(REDSHIFT=np.zeros(nobj, dtype=np.float32)))
            elif thisobj == 'TEST':
                flux = np.zeros((args.nspec, npix))
                indx = np.where(wave > 5800.0 - 1E-6)[0][0]
                ref_integrated_flux = 1E-10
                ref_cst_flux_density = 1E-17
                single_line = (np.arange(args.nspec) % 2 == 0).astype(
                    np.float32)
                continuum = (np.arange(args.nspec) % 2 == 1).astype(np.float32)

                for spec in range(args.nspec):
                    flux[spec, indx] = single_line[
                        spec] * ref_integrated_flux / np.gradient(wavelengths)[
                            indx]  # single line
                    flux[spec] += continuum[
                        spec] * ref_cst_flux_density  # flat continuum

                meta1 = Table(
                    dict(REDSHIFT=np.zeros(args.nspec, dtype=np.float32),
                         LINE=wave[indx] *
                         np.ones(args.nspec, dtype=np.float32),
                         LINEFLUX=single_line * ref_integrated_flux,
                         CONSTFLUXDENSITY=continuum * ref_cst_flux_density))
            else:
                log.fatal('Unknown object type {}'.format(thisobj))
                sys.exit(1)

            # Pack it in.
            truth['FLUX'][ii] = flux
            meta = vstack([meta, meta1])
            jj.append(ii.tolist())

            # Sanity check on units; templates currently return ergs, not 1e-17 ergs...
            # assert (thisobj == 'SKY') or (np.max(truth['FLUX']) < 1e-6)

        # Sort the metadata table.
        jj = sum(jj, [])
        meta_new = Table()
        for k in range(args.nspec):
            index = int(np.where(np.array(jj) == k)[0])
            meta_new = vstack([meta_new, meta[index]])
        meta = meta_new

        # Add TARGETID and the true OBJTYPE to the metadata table.
        meta.add_column(
            Column(true_objtype, dtype=(str, 10), name='TRUE_OBJTYPE'))
        meta.add_column(Column(targetids, name='TARGETID'))

        # Rename REDSHIFT -> TRUEZ anticipating later table joins with zbest.Z
        meta.rename_column('REDSHIFT', 'TRUEZ')

    # explicitly set location on focal plane if needed to support airmass
    # variations when using specsim v0.5
    if qsim.source.focal_xy is None:
        qsim.source.focal_xy = (u.Quantity(0, 'mm'), u.Quantity(100, 'mm'))

    # Set simulation parameters from the simspec header or desiparams
    bright_objects = ['bgs', 'mws', 'bright', 'BGS', 'MWS', 'BRIGHT_MIX']
    gray_objects = ['gray', 'grey']
    if args.simspec is None:
        object_type = objtype
        flavor = None
    elif simspec.flavor == 'science':
        object_type = None
        flavor = simspec.header['PROGRAM']
    else:
        object_type = None
        flavor = simspec.flavor
        log.warning(
            'Maybe using an outdated simspec file with flavor={}'.format(
                flavor))

    # Set airmass
    if args.airmass is not None:
        qsim.atmosphere.airmass = args.airmass
    elif args.simspec and 'AIRMASS' in simspec.header:
        qsim.atmosphere.airmass = simspec.header['AIRMASS']
    else:
        qsim.atmosphere.airmass = 1.25  # Science Req. Doc L3.3.2

    # Set exptime
    if args.exptime is not None:
        qsim.observation.exposure_time = args.exptime * u.s
    elif args.simspec and 'EXPTIME' in simspec.header:
        qsim.observation.exposure_time = simspec.header['EXPTIME'] * u.s
    elif objtype in bright_objects:
        qsim.observation.exposure_time = desiparams['exptime_bright'] * u.s
    else:
        qsim.observation.exposure_time = desiparams['exptime_dark'] * u.s

    # Set Moon Phase
    if args.moon_phase is not None:
        qsim.atmosphere.moon.moon_phase = args.moon_phase
    elif args.simspec and 'MOONFRAC' in simspec.header:
        qsim.atmosphere.moon.moon_phase = simspec.header['MOONFRAC']
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_phase = 0.7
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_phase = 0.1
    else:
        qsim.atmosphere.moon.moon_phase = 0.5

    # Set Moon Zenith
    if args.moon_zenith is not None:
        qsim.atmosphere.moon.moon_zenith = args.moon_zenith * u.deg
    elif args.simspec and 'MOONALT' in simspec.header:
        qsim.atmosphere.moon.moon_zenith = simspec.header['MOONALT'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.moon_zenith = 30 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.moon_zenith = 80 * u.deg
    else:
        qsim.atmosphere.moon.moon_zenith = 100 * u.deg

    # Set Moon - Object Angle
    if args.moon_angle is not None:
        qsim.atmosphere.moon.separation_angle = args.moon_angle * u.deg
    elif args.simspec and 'MOONSEP' in simspec.header:
        qsim.atmosphere.moon.separation_angle = simspec.header[
            'MOONSEP'] * u.deg
    elif flavor in bright_objects or object_type in bright_objects:
        qsim.atmosphere.moon.separation_angle = 50 * u.deg
    elif flavor in gray_objects:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg
    else:
        qsim.atmosphere.moon.separation_angle = 60 * u.deg

    # Initialize per-camera output arrays that will be saved
    waves, trueflux, noisyflux, obsivar, resolution, sflux = {}, {}, {}, {}, {}, {}

    maxbin = 0
    nmax = args.nspec
    for camera in qsim.instrument.cameras:
        # Lookup this camera's resolution matrix and convert to the sparse
        # format used in desispec.
        R = Resolution(camera.get_output_resolution_matrix())
        resolution[camera.name] = np.tile(R.to_fits_array(),
                                          [args.nspec, 1, 1])
        waves[camera.name] = (camera.output_wavelength.to(
            u.Angstrom).value.astype(np.float32))
        nwave = len(waves[camera.name])
        maxbin = max(maxbin, len(waves[camera.name]))
        nobj = np.zeros((nmax, 3, maxbin))  # object photons
        nsky = np.zeros((nmax, 3, maxbin))  # sky photons
        nivar = np.zeros((nmax, 3, maxbin))  # inverse variance (object+sky)
        cframe_observedflux = np.zeros(
            (nmax, 3, maxbin))  # calibrated object flux
        cframe_ivar = np.zeros(
            (nmax, 3, maxbin))  # inverse variance of calibrated object flux
        cframe_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to calibrated flux
        sky_ivar = np.zeros((nmax, 3, maxbin))  # inverse variance of sky
        sky_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to sky only
        frame_rand_noise = np.zeros(
            (nmax, 3, maxbin))  # random Gaussian noise to nobj+nsky
        trueflux[camera.name] = np.empty(
            (args.nspec, nwave))  # calibrated flux
        noisyflux[camera.name] = np.empty(
            (args.nspec, nwave))  # observed flux with noise
        obsivar[camera.name] = np.empty(
            (args.nspec, nwave))  # inverse variance of flux
        if args.simspec:
            for i in range(10):
                cn = camera.name + str(i)
                if cn in simspec.cameras:
                    dw = np.gradient(simspec.cameras[cn].wave)
                    break
            else:
                raise RuntimeError(
                    'Unable to find a {} camera in input simspec'.format(
                        camera))
        else:
            sflux = np.empty((args.nspec, npix))

    #- Check if input simspec is for a continuum flat lamp instead of science
    #- This does not convolve to per-fiber resolution
    if args.simspec:
        if simspec.flavor == 'flat':
            log.info("Simulating flat lamp exposure")
            for i, camera in enumerate(qsim.instrument.cameras):
                channel = camera.name  #- from simspec, b/r/z not b0/r1/z9
                assert camera.output_wavelength.unit == u.Angstrom
                num_pixels = len(waves[channel])

                phot = list()
                for j in range(10):
                    cn = camera.name + str(j)
                    if cn in simspec.cameras:
                        camwave = simspec.cameras[cn].wave
                        dw = np.gradient(camwave)
                        phot.append(simspec.cameras[cn].phot)

                if len(phot) == 0:
                    raise RuntimeError(
                        'Unable to find a {} camera in input simspec'.format(
                            camera))
                else:
                    phot = np.vstack(phot)

                meanspec = resample_flux(waves[channel], camwave,
                                         np.average(phot / dw, axis=0))

                fiberflat = random_state.normal(loc=1.0,
                                                scale=1.0 / np.sqrt(meanspec),
                                                size=(nspec, num_pixels))
                ivar = np.tile(meanspec, [nspec, 1])
                mask = np.zeros((simspec.nspec, num_pixels), dtype=np.uint32)

                for kk in range((args.nspec + args.nstart - 1) // 500 + 1):
                    camera = channel + str(kk)
                    outfile = desispec.io.findfile('fiberflat', NIGHT, EXPID,
                                                   camera)
                    start = max(500 * kk, args.nstart)
                    end = min(500 * (kk + 1), nmax)

                    if (args.spectrograph <= kk):
                        log.info(
                            "Writing files for channel:{}, spectrograph:{}, spectra:{} to {}"
                            .format(channel, kk, start, end))

                    ff = FiberFlat(waves[channel],
                                   fiberflat[start:end, :],
                                   ivar[start:end, :],
                                   mask[start:end, :],
                                   meanspec,
                                   header=dict(CAMERA=camera))
                    write_fiberflat(outfile, ff)
                    filePath = desispec.io.findfile("fiberflat", NIGHT, EXPID,
                                                    camera)
                    log.info("Wrote file {}".format(filePath))

            sys.exit(0)

    # Repeat the simulation for all spectra
    fluxunits = 1e-17 * u.erg / (u.s * u.cm**2 * u.Angstrom)
    for j in range(args.nspec):

        thisobjtype = objtype[j]
        sys.stdout.flush()
        if flavor == 'arc':
            qsim.source.update_in('Quickgen source {0}'.format, 'perfect',
                                  wavelengths * u.Angstrom,
                                  spectra * fluxunits)
        else:
            qsim.source.update_in('Quickgen source {0}'.format(j),
                                  thisobjtype.lower(),
                                  wavelengths * u.Angstrom,
                                  spectra[j, :] * fluxunits)
        qsim.source.update_out()

        qsim.simulate()
        qsim.generate_random_noise(random_state)

        for i, output in enumerate(qsim.camera_output):
            assert output['observed_flux'].unit == 1e17 * fluxunits
            # Extract the simulation results needed to create our uncalibrated
            # frame output file.
            num_pixels = len(output)
            nobj[j, i, :num_pixels] = output['num_source_electrons'][:, 0]
            nsky[j, i, :num_pixels] = output['num_sky_electrons'][:, 0]
            nivar[j, i, :num_pixels] = 1.0 / output['variance_electrons'][:, 0]

            # Get results for our flux-calibrated output file.
            cframe_observedflux[
                j, i, :num_pixels] = 1e17 * output['observed_flux'][:, 0]
            cframe_ivar[
                j,
                i, :num_pixels] = 1e-34 * output['flux_inverse_variance'][:, 0]

            # Fill brick arrays from the results.
            camera = output.meta['name']
            trueflux[camera][j][:] = 1e17 * output['observed_flux'][:, 0]
            noisyflux[camera][j][:] = 1e17 * (
                output['observed_flux'][:, 0] +
                output['flux_calibration'][:, 0] *
                output['random_noise_electrons'][:, 0])
            obsivar[camera][j][:] = 1e-34 * output['flux_inverse_variance'][:,
                                                                            0]

            # Use the same noise realization in the cframe and frame, without any
            # additional noise from sky subtraction for now.
            frame_rand_noise[
                j, i, :num_pixels] = output['random_noise_electrons'][:, 0]
            cframe_rand_noise[j, i, :num_pixels] = 1e17 * (
                output['flux_calibration'][:, 0] *
                output['random_noise_electrons'][:, 0])

            # The sky output file represents a model fit to ~40 sky fibers.
            # We reduce the variance by a factor of 25 to account for this and
            # give the sky an independent (Gaussian) noise realization.
            sky_ivar[
                j,
                i, :num_pixels] = 25.0 / (output['variance_electrons'][:, 0] -
                                          output['num_source_electrons'][:, 0])
            sky_rand_noise[j, i, :num_pixels] = random_state.normal(
                scale=1.0 / np.sqrt(sky_ivar[j, i, :num_pixels]),
                size=num_pixels)

    armName = {"b": 0, "r": 1, "z": 2}
    for channel in 'brz':

        #Before writing, convert from counts/bin to counts/A (as in Pixsim output)
        #Quicksim Default:
        #FLUX - input spectrum resampled to this binning; no noise added [1e-17 erg/s/cm2/s/Ang]
        #COUNTS_OBJ - object counts in 0.5 Ang bin
        #COUNTS_SKY - sky counts in 0.5 Ang bin

        num_pixels = len(waves[channel])
        dwave = np.gradient(waves[channel])
        nobj[:, armName[channel], :num_pixels] /= dwave
        frame_rand_noise[:, armName[channel], :num_pixels] /= dwave
        nivar[:, armName[channel], :num_pixels] *= dwave**2
        nsky[:, armName[channel], :num_pixels] /= dwave
        sky_rand_noise[:, armName[channel], :num_pixels] /= dwave
        sky_ivar[:, armName[channel], :num_pixels] /= dwave**2

        # Now write the outputs in DESI standard file system. None of the output file can have more than 500 spectra

        # Looping over spectrograph
        for ii in range((args.nspec + args.nstart - 1) // 500 + 1):

            start = max(500 * ii,
                        args.nstart)  # first spectrum for a given spectrograph
            end = min(500 * (ii + 1),
                      nmax)  # last spectrum for the spectrograph

            if (args.spectrograph <= ii):
                camera = "{}{}".format(channel, ii)
                log.info(
                    "Writing files for channel:{}, spectrograph:{}, spectra:{} to {}"
                    .format(channel, ii, start, end))
                num_pixels = len(waves[channel])

                # Write frame file
                framefileName = desispec.io.findfile("frame", NIGHT, EXPID,
                                                     camera)

                frame_flux=nobj[start:end,armName[channel],:num_pixels]+ \
                nsky[start:end,armName[channel],:num_pixels] + \
                frame_rand_noise[start:end,armName[channel],:num_pixels]
                frame_ivar = nivar[start:end, armName[channel], :num_pixels]

                sh1 = frame_flux.shape[
                    0]  # required for slicing the resolution metric, resolusion matrix has (nspec,ndiag,wave)
                # for example if nstart =400, nspec=150: two spectrographs:
                # 400-499=> 0 spectrograph, 500-549 => 1
                if (args.nstart == start):
                    resol = resolution[channel][:sh1, :, :]
                else:
                    resol = resolution[channel][-sh1:, :, :]

                # must create desispec.Frame object
                frame=Frame(waves[channel], frame_flux, frame_ivar,\
                    resolution_data=resol, spectrograph=ii, \
                    fibermap=fibermap[start:end], \
                    meta=dict(CAMERA=camera, FLAVOR=simspec.flavor) )
                desispec.io.write_frame(framefileName, frame)

                framefilePath = desispec.io.findfile("frame", NIGHT, EXPID,
                                                     camera)
                log.info("Wrote file {}".format(framefilePath))

                if args.frameonly or simspec.flavor == 'arc':
                    continue

                # Write cframe file
                cframeFileName = desispec.io.findfile("cframe", NIGHT, EXPID,
                                                      camera)
                cframeFlux = cframe_observedflux[
                    start:end,
                    armName[channel], :num_pixels] + cframe_rand_noise[
                        start:end, armName[channel], :num_pixels]
                cframeIvar = cframe_ivar[start:end,
                                         armName[channel], :num_pixels]

                # must create desispec.Frame object
                cframe = Frame(waves[channel], cframeFlux, cframeIvar, \
                    resolution_data=resol, spectrograph=ii,
                    fibermap=fibermap[start:end],
                    meta=dict(CAMERA=camera, FLAVOR=simspec.flavor) )
                desispec.io.frame.write_frame(cframeFileName, cframe)

                cframefilePath = desispec.io.findfile("cframe", NIGHT, EXPID,
                                                      camera)
                log.info("Wrote file {}".format(cframefilePath))

                # Write sky file
                skyfileName = desispec.io.findfile("sky", NIGHT, EXPID, camera)
                skyflux=nsky[start:end,armName[channel],:num_pixels] + \
                sky_rand_noise[start:end,armName[channel],:num_pixels]
                skyivar = sky_ivar[start:end, armName[channel], :num_pixels]
                skymask = np.zeros(skyflux.shape, dtype=np.uint32)

                # must create desispec.Sky object
                skymodel = SkyModel(waves[channel],
                                    skyflux,
                                    skyivar,
                                    skymask,
                                    header=dict(CAMERA=camera))
                desispec.io.sky.write_sky(skyfileName, skymodel)

                skyfilePath = desispec.io.findfile("sky", NIGHT, EXPID, camera)
                log.info("Wrote file {}".format(skyfilePath))

                # Write calib file
                calibVectorFile = desispec.io.findfile("calib", NIGHT, EXPID,
                                                       camera)
                flux = cframe_observedflux[start:end,
                                           armName[channel], :num_pixels]
                phot = nobj[start:end, armName[channel], :num_pixels]
                calibration = np.zeros_like(phot)
                jj = (flux > 0)
                calibration[jj] = phot[jj] / flux[jj]

                #- TODO: what should calibivar be?
                #- For now, model it as the noise of combining ~10 spectra
                calibivar = 10 / cframe_ivar[start:end,
                                             armName[channel], :num_pixels]
                #mask=(1/calibivar>0).astype(int)??
                mask = np.zeros(calibration.shape, dtype=np.uint32)

                # write flux calibration
                fluxcalib = FluxCalib(waves[channel], calibration, calibivar,
                                      mask)
                write_flux_calibration(calibVectorFile, fluxcalib)

                calibfilePath = desispec.io.findfile("calib", NIGHT, EXPID,
                                                     camera)
                log.info("Wrote file {}".format(calibfilePath))
コード例 #49
0
ファイル: desi_qso_templ.py プロジェクト: desihub/desisim
def desi_qso_templates(z_wind=0.2, zmnx=(0.4,4.), outfil=None, N_perz=500,
                       boss_pca_fil=None, wvmnx=(3500., 10000.),
                       rebin_wave=None, rstate=None,
                       sdss_pca_fil=None, no_write=False, redshift=None,
                       seed=None, old_read=False, ipad=40, cosmo=None):
    """ Generate QSO templates for DESI

    Rebins to input wavelength array (or log10 in wvmnx)

    Parameters
    ----------
    z_wind : float, optional
      Window for sampling PCAs
    zmnx : tuple, optional
      Min/max for generation
    N_perz : int, optional
      Number of draws per redshift window
    old_read : bool, optional
      Read the files the old way
    seed : int, optional
      Seed for the random number state
    rebin_wave : ndarray, optional
      Input wavelengths for rebinning
    wvmnx : tuple, optional
      Wavelength limits for rebinning (not used with rebin_wave)
    redshift : ndarray, optional
      Redshifts desired for the templates
    ipad : int, optional
      Padding for enabling enough models
    cosmo: astropy.cosmology.core, optional
       Cosmology inistantiation from astropy.cosmology.code
    Returns
    -------
    wave : ndarray
      Wavelengths that the spectra were rebinned to
    flux : ndarray (2D; flux vs. model)
    z : ndarray
      Redshifts
    """


    # Cosmology
    if cosmo is None:
        from astropy import cosmology
        cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)

    if old_read:
        # PCA values
        if boss_pca_fil is None:
            boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
        hdu = fits.open(boss_pca_fil)
        boss_pca_coeff = hdu[1].data

        if sdss_pca_fil is None:
            sdss_pca_fil = 'SDSS_DR7Lya_PCA_values_nocut.fits.gz'
        hdu2 = fits.open(sdss_pca_fil)
        sdss_pca_coeff = hdu2[1].data

        # Open the BOSS catalog file
        boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
        bcat_hdu = fits.open(boss_cat_fil)
        t_boss = bcat_hdu[1].data
        boss_zQSO = t_boss['z_pipe']

        # Open the SDSS catalog file
        sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
        scat_hdu = fits.open(sdss_cat_fil)
        t_sdss = scat_hdu[1].data
        sdss_zQSO = t_sdss['z']
        if len(sdss_pca_coeff) != len(sdss_zQSO):
            print('Need to finish running the SDSS models!')
            sdss_zQSO = sdss_zQSO[0:len(sdss_pca_coeff)]
        # Eigenvectors
        eigen, eigen_wave = fbq.read_qso_eigen()
    else:
        infile = desisim.io.find_basis_template('qso')
        with fits.open(infile) as hdus:
            hdu_names = [hdus[ii].name for ii in range(len(hdus))]
            boss_pca_coeff = hdus[hdu_names.index('BOSS_PCA')].data
            sdss_pca_coeff = hdus[hdu_names.index('SDSS_PCA')].data
            boss_zQSO = hdus[hdu_names.index('BOSS_Z')].data
            sdss_zQSO = hdus[hdu_names.index('SDSS_Z')].data
            eigen = hdus[hdu_names.index('SDSS_EIGEN')].data
            eigen_wave = hdus[hdu_names.index('SDSS_EIGEN_WAVE')].data

    # Fiddle with the eigen-vectors
    npix = len(eigen_wave)
    chkpix = np.where((eigen_wave > 900.) & (eigen_wave < 5000.) )[0]
    lambda_912 = 911.76
    pix912 = np.argmin( np.abs(eigen_wave-lambda_912) )

    # Loop on redshift.  If the
    if redshift is None:
        z0 = np.arange(zmnx[0],zmnx[1],z_wind)
        z1 = z0 + z_wind
    else:
        if np.isscalar(redshift):
            z0 = np.array([redshift])
        else:
            z0 = redshift.copy()
        z1 = z0.copy() #+ z_wind


    pca_list = ['PCA0', 'PCA1', 'PCA2', 'PCA3']
    PCA_mean = np.zeros(4)
    PCA_sig = np.zeros(4)
    PCA_rand = np.zeros((4,N_perz*ipad))

    final_spec = np.zeros((npix, N_perz * len(z0)))
    final_wave = np.zeros((npix, N_perz * len(z0)))
    final_z = np.zeros(N_perz * len(z0))

    # Random state
    if rstate is None:
        rstate = np.random.RandomState(seed)

    for ii in range(len(z0)):

        # BOSS or SDSS?
        if z0[ii] > 2.15:
            zQSO = boss_zQSO
            pca_coeff = boss_pca_coeff
        else:
            zQSO = sdss_zQSO
            pca_coeff = sdss_pca_coeff

        # Random z values and wavelengths
        zrand = rstate.uniform( z0[ii], z1[ii], N_perz*ipad)
        wave = np.outer(eigen_wave, 1+zrand)

        # MFP (Worseck+14)
        mfp = 37. * ( (1+zrand)/5. )**(-5.4) # Physical Mpc

        # Grab PCA mean + sigma
        if redshift is None:
            idx = np.where( (zQSO >= z0[ii]) & (zQSO < z1[ii]) )[0]
        else:
            # Hack by @moustakas: add a little jitter to get the set of QSOs
            # that are *nearest* in redshift to the desired output redshift.
            idx = np.where( (zQSO >= z0[ii]-0.01) & (zQSO < z1[ii]+0.01) )[0]
            if len(idx) == 0:
                idx = np.array([(np.abs(zQSO-zrand[0])).argmin()])
                #pdb.set_trace()
        log.debug('Making z=({:g},{:g}) with {:d} input quasars'.format(z0[ii],z1[ii],len(idx)))

        # Get PCA stats and random values
        for jj,ipca in enumerate(pca_list):
            if jj == 0:  # Use bounds for PCA0 [avoids negative values]
                xmnx = perc(pca_coeff[ipca][idx], per=95)
                PCA_rand[jj, :] = rstate.uniform(xmnx[0], xmnx[1], N_perz*ipad)
            else:
                PCA_mean[jj] = np.mean(pca_coeff[ipca][idx])
                PCA_sig[jj] = np.std(pca_coeff[ipca][idx])
                # Draws
                PCA_rand[jj, :] = rstate.uniform( PCA_mean[jj] - 2*PCA_sig[jj],
                                        PCA_mean[jj] + 2*PCA_sig[jj], N_perz*ipad)

        # Generate the templates (ipad*N_perz)
        spec = np.dot(eigen.T, PCA_rand)

        # Take first good N_perz

        # Truncate, MFP, Fill
        ngd = 0
        nbad = 0
        for kk in range(ipad*N_perz):
            # Any zero values?
            mn = np.min(spec[chkpix, kk])
            if mn < 0.:
                nbad += 1
                continue

            # MFP
            if z0[ii] > 2.39:
                z912 = wave[0:pix912,kk]/lambda_912 - 1.
                phys_dist = np.fabs( cosmo.lookback_distance(z912) -
                                cosmo.lookback_distance(zrand[kk]) ) # Mpc
                spec[0:pix912, kk] = spec[0:pix912,kk] * np.exp(-phys_dist.value/mfp[kk])

            # Write
            final_spec[:, ii*N_perz+ngd] = spec[:,kk]
            final_wave[:, ii*N_perz+ngd] = wave[:,kk]
            final_z[ii*N_perz+ngd] = zrand[kk]
            ngd += 1
            if ngd == N_perz:
                break
        if ngd != N_perz:
            print('Did not make enough!')
            #pdb.set_trace()
            log.warning('Did not make enough qso templates. ngd = {}, N_perz = {}'.format(ngd,N_perz))

    # Rebin
    if rebin_wave is None:
        light = C_LIGHT        # [km/s]
        velpixsize = 10.            # [km/s]
        pixsize = velpixsize/light/np.log(10) # [pixel size in log-10 A]
        minwave = np.log10(wvmnx[0])          # minimum wavelength [log10-A]
        maxwave = np.log10(wvmnx[1])          # maximum wavelength [log10-A]
        r_npix = np.round((maxwave-minwave)/pixsize+1)

        log_wave = minwave+np.arange(r_npix)*pixsize # constant log-10 spacing
    else:
        log_wave = np.log10(rebin_wave)
        r_npix = len(log_wave)

    totN = N_perz * len(z0)
    rebin_spec = np.zeros((r_npix, totN))


    for ii in range(totN):
        # Interpolate (in log space)
        rebin_spec[:, ii] = resample_flux(log_wave, np.log10(final_wave[:, ii]), final_spec[:, ii])
        #f1d = interp1d(np.log10(final_wave[:,ii]), final_spec[:,ii])
        #rebin_spec[:,ii] = f1d(log_wave)

    if outfil is None:
        return 10.**log_wave, rebin_spec, final_z

    # Transpose for consistency
    out_spec = np.array(rebin_spec.T, dtype='float32')

    # Write
    hdu = fits.PrimaryHDU(out_spec)
    hdu.header.set('PROJECT', 'DESI QSO TEMPLATES')
    hdu.header.set('VERSION', '1.1')
    hdu.header.set('OBJTYPE', 'QSO')
    hdu.header.set('DISPAXIS',  1, 'dispersion axis')
    hdu.header.set('CRPIX1',  1, 'reference pixel number')
    hdu.header.set('CRVAL1',  minwave, 'reference log10(Ang)')
    hdu.header.set('CDELT1',  pixsize, 'delta log10(Ang)')
    hdu.header.set('LOGLAM',  1, 'log10 spaced wavelengths?')
    hdu.header.set('AIRORVAC', 'vac', ' wavelengths in vacuum (vac) or air')
    hdu.header.set('VELSCALE', velpixsize, ' pixel size in km/s')
    hdu.header.set('WAVEUNIT', 'Angstrom', ' wavelength units')
    hdu.header.set('BUNIT', '1e-17 erg/s/cm2/A', ' flux unit')

    idval = list(range(totN))
    col0 = fits.Column(name=str('TEMPLATEID'),format=str('J'), array=idval)
    col1 = fits.Column(name=str('Z'),format=str('E'),array=final_z)
    cols = fits.ColDefs([col0, col1])
    tbhdu = fits.BinTableHDU.from_columns(cols)
    tbhdu.header.set('EXTNAME','METADATA')

    hdulist = fits.HDUList([hdu, tbhdu])
    hdulist.writeto(outfil, overwrite=True)

    return final_wave, final_spec, final_z
コード例 #50
0
def get_rr_model(coadd_fn, index, redrock_fn=None, ith_bestfit=1, use_targetid=False, coadd_cameras=False, restframe=False, z=None, return_z=False):
    '''
    Return redrock model spectrum.

    Args:
       coadd_fn: str, path of coadd FITS file
       index: int, index of coadd FITS file if use_targetid=False, or TARGETID if use_targetid=True

    Options:
       redrock_fn, str, path of redrock FITS file
       use_targetid: bool, if True, index is TARGETID
       coadd_cameras: bool, if True, the BRZ cameras are coadded together
       restframe: bool, if True, return restframe spectrum in template wavelength grid; if False,
       return spectrum in three cameras in observed frame
       z: bool, if None, use redrock best-fit redshift
       return_z: bool, if true, include redshift in output
    '''
    # If use_targetid=False, index is the index of coadd file; if True, index is TARGETID.

    from desispec.interpolation import resample_flux
    import redrock.templates
    from desispec.io import read_spectra

    templates = dict()
    for filename in redrock.templates.find_templates():
        tx = redrock.templates.Template(filename)
        templates[(tx.template_type, tx.sub_type)] = tx

    spec = read_spectra(coadd_fn)

    if redrock_fn is None:
        redrock_fn = coadd_fn.replace('/coadd-', '/redrock-')
    redshifts = Table(fitsio.read(redrock_fn, ext='REDSHIFTS'))

    if use_targetid:
        tid = index
        coadd_index = np.where(redshifts['TARGETID']==index)[0][0]
    else:
        tid = redshifts['TARGETID'][index]
        coadd_index = index

    if ith_bestfit==1:
        spectype, subtype = redshifts['SPECTYPE'][coadd_index], redshifts['SUBTYPE'][coadd_index]
        tx = templates[(spectype, subtype)]
        coeff = redshifts['COEFF'][coadd_index][0:tx.nbasis]
        if z is None:
            z = redshifts['Z'][coadd_index]
    else:
        import h5py
        rrdetails_fn = redrock_fn.replace('/redrock-', '/rrdetails-').replace('.fits', '.h5')
        f = h5py.File(rrdetails_fn)
        entry = f['zfit'][str(tid)]["zfit"]
        spectype, subtype = entry['spectype'][ith_bestfit].decode("utf-8"), entry['subtype'][ith_bestfit].decode("utf-8")
        tx = templates[(spectype, subtype)]
        coeff = entry['coeff'][ith_bestfit][0:tx.nbasis]
        if z is None:
            z = entry['z'][ith_bestfit]

    if restframe==False:
        wave = dict()
        model_flux = dict()
        if coadd_cameras:
            cameras = ['BRZ']
        else:
            cameras = ['B', 'R', 'Z']
        for camera in cameras:
            wave[camera] = spec.wave[camera.lower()]
            model_flux[camera] = np.zeros(wave[camera].shape)
            model = tx.flux.T.dot(coeff).T
            mx = resample_flux(wave[camera], tx.wave*(1+z), model)
            model_flux[camera] = spec.R[camera.lower()][coadd_index].dot(mx)
    else:
        wave = tx.wave
        model_flux = tx.flux.T.dot(coeff).T

    if return_z:
        return wave, model_flux, z
    else:
        return wave, model_flux
コード例 #51
0
ファイル: io.py プロジェクト: akremin/desisim
def _resample_flux(args):
    return resample_flux(*args)