def womreddening(hop):
    """redden or deredden with various reddening laws"""
    import matplotlib.pyplot as plt
    import extinction
    from tmath.wombat.inputter_single import inputter_single
    from tmath.wombat.inputter import inputter
    from tmath.wombat.yesno import yesno
    r_v=3.1
    print('Redden or deredden a spectrum')
    plt.cla()
    plt.plot(hop[0].wave,hop[0].flux,drawstyle='steps-mid',color='k')
    plt.xlabel('Wavelength')
    plt.ylabel('Flux')
    plt.title(hop[0].obname)
    flux=hop[0].flux.copy()
    action=inputter_single('(r)edden or (d)eredden the spectrum? (r/d) ', 'rd')
    print(' ')
    type=inputter_single('Do you want to enter the (c)olor excess, or (v)isual extinction? ','cv')
    print(' ')
    if (type == 'v'):
        av=inputter('Enter A_V in magnitudes: ','float',False)
    else:
        ebv=inputter('Enter E(B-V) in magnitudes: ','float',False)
        av=r_v*ebv
    print(' ')
    print('Do you want to use: ')
    print('(c)ardelli, Clayton, Mathis 1989')
    print("(o)'donnell 1994")
    print('(f)itzpatrick 1999\n')
    
    method=inputter_single('(c/o/f) ','cof')
    if (action == 'r'):
        if (method == 'c'):
            newflux=extinction.apply(extinction.ccm89(hop[0].wave,av,r_v),flux)
        elif (method == 'o'):
            newflux=extinction.apply(extinction.odonnell94(hop[0].wave,av,r_v),flux)
        else:
            newflux=extinction.apply(extinction.fitzpatrick99(hop[0].wave,av,r_v),flux)
    else:
        if (method == 'c'):
            ext=extinction.ccm89(hop[0].wave,av,r_v)
        elif (method == 'o'):
            ext=extinction.odonnell94(hop[0].wave,av,r_v)
        else:
            ext=extinction.fitzpatrick99(hop[0].wave,av,r_v)
        newflux=flux*10**(0.4*ext)
    plt.plot(hop[0].wave,newflux,drawstyle='steps-mid',color='r')
    print('\nOriginal spectrum in black, red/dered in red\n')
    print('Is this OK?\n')
    answer=yesno('y')
    if (answer == 'y'):
        hop[0].flux=newflux.copy()
        print('\nActive spectrum now changed')
    else:
        print('\nSorry to disappoint you, active spectrum unchanged')
    return hop
Exemple #2
0
    def apply_dust(self):

        import extinction

        for ii in range(self.n_zz):
            if self.dust['flag'] == 'calzetti':
                self.lum_em[:, ii] = extinction.apply(
                    extinction.calzetti00(self.wav_em, self.dust['Av'], 4.05),
                    self.lum_em[:, ii])
            elif self.dust['flag'] == 'cardelli':
                self.lum_em[:, ii] = extinction.apply(
                    extinction.ccm89(self.wav_em, self.dust['Av'], 4.05),
                    self.lum_em[:, ii])
            elif self.dust['flag'] == 'odonnell':
                self.lum_em[:, ii] = extinction.apply(
                    extinction.odonnell94(self.wav_em, self.dust['Av'], 4.05),
                    self.lum_em[:, ii])
            elif self.dust['flag'] == 'fitzpatrick':
                self.lum_em[:, ii] = extinction.apply(
                    extinction.fitzpatrick99(self.wav_em, self.dust['Av'],
                                             3.1), self.lum_em[:, ii])
            elif self.dust['flag'] == 'fitzpatrick07':
                self.lum_em[:, ii] = extinction.apply(
                    extinction.fm07(self.wav_em, self.dust['Av']),
                    self.lum_em[:, ii])
Exemple #3
0
    def _model2_av(self, lam, p):
        '''
        Return units: erg s-1 A-1
        As we multiply by the area of the emitting source (in cm**2)
        '''
        
        T1 = p[0] * u.K
        R1 = (p[1] * u.Rsun).to(u.cm)
        a_v = p[2]
        T2 = p[3] * u.K
        R2 = (p[4] * u.Rsun).to(u.cm)

        #Compute the effect of reddening as a flux factor
        flux_red =  10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
        lam = lam * u.Angstrom
        

        area1 = np.pi * (4 * np.pi * R1**2)
        area2 = np.pi * (4 * np.pi * R2**2)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            flam1 =  area1 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
            (np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T1))-1)
            flam2 =  area2 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
            (np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T2))-1)
            
        flam = flam1 + flam2
        #Apply the reddening
        flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
        
                
        return flam
Exemple #4
0
    def _model2_av_r(self, lam, p):
        '''
        Return units: erg s-1 A-1
        '''

    
        T1 = p[0] #In K
        R1 = p[1]*69570000000.0 #From Rsun to cm
        a_v = p[2]
        T2 = p[3]
        R2 = p[4]*69570000000.0 #From Rsun to cm
        
        lam = lam * 1e-8 #To cm
    
        if a_v < 0:
            return lam * np.inf
            
        #We need an extra pi as it is integrated across all steradians
        #The second factor is the surface of the black body
        #The third ones is the Plank law
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            flam1 =  np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
            flam2 =  np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
    
        #Compute the effect of reddening as a flux factor
        flux_red =  10**(-0.4 * extinction.fitzpatrick99(lam*1e8, a_v, unit='aa'))
        
        flam = (flam1 + flam2) * flux_red *1e-8 #to erg / s / A

        #Apply the reddening and transform to erg /s/ A from cm
        return flam 
Exemple #5
0
def test_fitzpatrick99_func():
    """Check passing R_V."""

    wave = np.array([2000., 30000.])
    for r_v in (3.1, 4.0):
        assert np.all(extinction.Fitzpatrick99(r_v)(wave, 1.0) ==
                      extinction.fitzpatrick99(wave, 1.0, r_v))
Exemple #6
0
def add_ext(x, z_ext=0, Av=0, kind='SMC'):
    """
    calculate extinction at given redshift
    parameters: 
        - x         : the wavelength grid
        - z_ext     : redshift of extinction applied
        - Av        : Av 
        - kind      : type of extinction curve, can be either 'SMC', 'LMC'
         
    return:
        - corr      : correction array at each x.
    """
    if kind in ['SMC', 'LMC']:
        et = {'SMC': 2, 'LMC': 6}
        data = np.genfromtxt('data/extinction.dat',
                             skip_header=3,
                             usecols=[0, et[kind]],
                             unpack=True)
        inter = interp1d(data[0] * 1e4, data[1], fill_value='extrapolate')
        return np.exp(-0.92 * Av * inter(x / (1 + z_ext)))

    elif kind in ['MW', 'fitzpatrick99']:
        return 10**(
            -0.4 *
            fitzpatrick99(np.asarray(x, dtype=float64) / (1 + z_ext), Av))
Exemple #7
0
def test_fitzpatrick99_func():
    """Check passing R_V."""

    wave = np.array([2000., 30000.])
    for r_v in (3.1, 4.0):
        assert np.all(
            extinction.Fitzpatrick99(r_v)(wave, 1.0) ==
            extinction.fitzpatrick99(wave, 1.0, r_v))
Exemple #8
0
def generate_resampled_data(HDULIST, z, a_v, params, survey_name):
    """Generate the wavelengths, flux, and flux error used in STARLIGHT input
    files.
    
    This function reads in wavelength, flux, and flux error data from the fits
    file. The wavelengths are shifted into rest frame and resampled uniformly
    with a step of 1 angstrom. The flux and flux error data is shifted into
    rest frame, dereddened, and resampled uniformly. The resampled arrays will
    appear in the STARLIGHT input files in the form of columns.
    
    Args:
        HDULIST : the opened fits file from which data is read in
        z       : the redshift parameter
        a_v     : extinction parameter specific for each data set
        r_v     : Milky Way extinction parameter - fixed value of 3.1
        
    Returns:
        resampled_wavelength  : array of wavelengths resampled
        resampled_flux        : array of flux values resampled
        resampled_eflux       : array of flux error values resampled
    """
    
    last_wavelength = params.initial_wavelength + (params.step * params.wavelength_axis)
    num_of_elements = ((last_wavelength - params.initial_wavelength) / params.step)
    wavelength = np.linspace(params.initial_wavelength, last_wavelength, num_of_elements, endpoint=True)
    
    restframed_wavelength = wavelength / (1 + z)
    number_of_wavelengths = np.floor(max(restframed_wavelength)) - np.ceil(min(restframed_wavelength))
    resampled_wavelength = np.arange(number_of_wavelengths) + np.ceil(restframed_wavelength[0])

    
    flux = HDULIST[params.flux_index].data * params.factor
    eflux = np.sqrt(1./ HDULIST[params.eflux_index].data) * params.factor
    
    if survey_name == "MANGA":
        a_lambda = extinction.fitzpatrick99(wavelength, a_v, params.r_v)
        correction_factor = 10**(0.4*a_lambda)
        
    elif survey_name == "PMAS":
        correction_factor = 1
        
    resampled_flux = np.empty((len(resampled_wavelength), flux.shape[1], flux.shape[2]))
    resampled_eflux = np.empty((len(resampled_wavelength), flux.shape[1], flux.shape[2]))
    
    for i in range(flux.shape[1]):
        for j in range(flux.shape[2]):
            unreddened_flux = flux[:, i, j] * (1 + z) * correction_factor
            unreddened_eflux = eflux[:, i, j] * (1 + z) * correction_factor
            
            resampled_flux[:, i, j] = np.interp(resampled_wavelength, 
                                                restframed_wavelength, 
                                                unreddened_flux)
            resampled_eflux[:, i, j] = np.interp(resampled_wavelength, 
                                                restframed_wavelength,
                                                unreddened_eflux)
            
    return resampled_wavelength, resampled_flux, resampled_eflux
Exemple #9
0
    def _model_accretion_disk(self, lam, p):
        '''
        Equation 1 from Kenyon, Hartmann, Hewett 1988.
        '''    
        Mstar = p[0]
        Rstar = p[1]
        Macc = p[2]
        scale = p[3]
        a_v = p[4]
        
        R = np.linspace(1,20,100)
        R = R[R>Rstar]
        if len(R)==0 or Mstar<0 or Macc<0 or Rstar<0 or a_v<0 or scale<0:
            return np.ones(len(lam))*np.inf
            
        F_r = (3 * cnt.G * Mstar * u.Msun * Macc * u.Msun/u.year / 8 / np.pi / (u.Rsun*Rstar)**3) * (Rstar/R)**3 * (1 - (Rstar/R)**0.5)
        F_r = F_r.to(u.erg/u.cm**2/u.s)
        T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
        
        T_max = 13000 * u.K *(Mstar)**0.25 * (Macc / 1e-5)**0.25 * (Rstar)**-0.75
        
        #Cretae the disk model
        #For each differential radii, we compute the black body spectra corresponding 
        # to the temperature at that radius, and scale it by the flux expected at that
        # radius.
        
        disk_model = []
        for i, ri in enumerate(R):
            #if ri >= 1 and ri<=1.5:
            if ri >= 1 and ri<=1.5:
                sp = ps.BlackBody(T_max.value)
                #sp = ps.BlackBody(T_r[i].value)

            else:
                sp = ps.BlackBody(T_r[i].value)
            sp.convert('flam')
            tot_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
            scaled_flux = sp.flux / tot_flux * F_r[i].value
            disk_model.append(scaled_flux)
            
        disk = np.array(disk_model)
        disk = np.nansum(disk, axis=0)
        
        sp = ps.ArraySpectrum(sp.wave, disk)
    
        #int_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
        int_flux = np.max(sp.flux)


        #Normalize the integral flux to 1
        flux_norm= sp.flux /int_flux
        #sp_norm = ps.ArraySpectrum(sp.wave, flux_norm)
                
        flux_norm =  np.interp(lam, sp.wave, flux_norm)
        flux_red =  10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))

        return scale *flux_norm * flux_red
Exemple #10
0
def Spec_mags(Models, pbs, av=0, Rv=3.1, Conversion=1.029):
    """
    Generate synthetic magnitudes from the models and passbands added.
    Conversion converts between Ebv and Egr, the Green value is 0.981, but the best fit value
    was found to be 1.029.
    """
    #a_v = 3.1*(Conversion * ex ) # ex = extinction from Bayestar19 = Egr
    keys = list(pbs.keys())
    mags = {}
    for key in keys:
        mags[key] = []

        pb, zp = pbs[key]

        # construct mags
        ind = []
        red = {}
        for model in Models:
            if av > 0:
                model = S.ArraySpectrum(model.wave,
                                        apply(
                                            fitzpatrick99(model.wave, av, Rv),
                                            model.flux),
                                        waveunits=model.waveunits,
                                        fluxunits=model.fluxunits)
            if av < 0:
                model = S.ArraySpectrum(model.wave,
                                        remove(
                                            fitzpatrick99(model.wave, -av, Rv),
                                            model.flux),
                                        waveunits=model.waveunits,
                                        fluxunits=model.fluxunits)
            mags[key] += [source_synphot.passband.synphot(model, pb, zp)]

    for key in keys:
        mags[key] = np.array(mags[key])

    #good = np.ones(len(mags[key])) > 0
    #for key in keys:
    #    good = good *np.isfinite(mags[key])
    #for key in keys:
    #    mags[key] = mags[key][good]
    return mags
Exemple #11
0
def test_fitzpatrick99_knots():
    """Test that knots match values in Fitzpatrick (1999) Table 3 for
    fitzpatrick99 function (with R_V = 3.1)"""

    wave = np.array([np.inf, 26500., 12200., 6000., 5470., 4670., 4110.,
                     2700., 2600.])
    x = np.array([0.0, 0.377, 0.820, 1.667, 1.828, 2.141, 2.433, 3.704,
                  3.846])
    
    # A(lambda) values for E(B-V) = 1 or A_V = 3.1
    ref_values = np.array([0.0, 0.265, 0.829, 2.688, 3.055, 3.806, 4.315, 6.265,
                           6.591])

    assert_allclose(extinction.fitzpatrick99(wave, 3.1, unit='aa'), ref_values,
                    rtol=0., atol=0.001)

    # atol = 0.002 because the input values are less precise (e.g., 0.377
    # rather than 1.e4 / 26500.)
    assert_allclose(extinction.fitzpatrick99(x, 3.1, unit='invum'), ref_values,
                    rtol=0., atol=0.002)
Exemple #12
0
    def _remove_flux_extinction(self):
        """
        Remove extinction for light curve assuming Fitzpatrick '99 reddening
        law, given some value of E(B-V)
        """
        self.fluxUnred = self.flux.copy()
        self.fluxErrUnred = self.fluxErr.copy()
        self.fluxRenorm = self.flux.copy()
        self.fluxErrRenorm = self.fluxErr.copy()

        # Using negative a_v so that extinction.apply works in reverse and removes the extinction
        if self.mwebv:
            extinctions = extinction.fitzpatrick99(wave=self._good_filter_wave, \
                                                   a_v=-3.1 * self.mwebv, r_v=3.1, unit='aa')

        for i, pb in enumerate(self._good_filters):
            mask = (self.passband == pb)

            flux_pb = self.flux[mask]
            fluxerr_pb = self.fluxErr[mask]
            npbobs = len(flux_pb)

            if npbobs < 1:
                return

            if self.mwebv:
                flux_out = extinction.apply(extinctions[i],
                                            flux_pb,
                                            inplace=False)
                fluxerr_out = extinction.apply(extinctions[i],
                                               fluxerr_pb,
                                               inplace=False)
            else:
                flux_out = flux_pb
                fluxerr_out = fluxerr_pb
                self.fluxUnred[mask] = flux_out
                self.fluxErrUnred[mask] = fluxerr_out

            if npbobs > 1:
                # there's at least enough observations to find minimum and maximum
                minfluxpb = flux_out.min()
                maxfluxpb = flux_out.max()
                norm = maxfluxpb - minfluxpb
                self.fluxRenorm[mask] = (flux_out - minfluxpb) / norm
                self.fluxErrRenorm[mask] = fluxerr_out / norm
            elif npbobs == 1:
                # deal with the case with one observation in this passband by setting renorm = 0.5
                norm = self.fluxUnred[mask] / 0.5
                self.fluxRenorm[mask] /= norm
                self.fluxErrRenorm[mask] /= norm

        self._default_cols = ['time', 'flux', 'fluxErr', 'fluxUnred', 'fluxErrUnred', \
                              'fluxRenorm', 'fluxErrRenorm', 'photflag', 'zeropoint', 'obsId']
        return
Exemple #13
0
def test_fitzpatrick99_knots():
    """Test that knots match values in Fitzpatrick (1999) Table 3 for
    fitzpatrick99 function (with R_V = 3.1)"""

    wave = np.array(
        [np.inf, 26500., 12200., 6000., 5470., 4670., 4110., 2700., 2600.])
    x = np.array([0.0, 0.377, 0.820, 1.667, 1.828, 2.141, 2.433, 3.704, 3.846])

    # A(lambda) values for E(B-V) = 1 or A_V = 3.1
    ref_values = np.array(
        [0.0, 0.265, 0.829, 2.688, 3.055, 3.806, 4.315, 6.265, 6.591])

    assert_allclose(extinction.fitzpatrick99(wave, 3.1, unit='aa'),
                    ref_values,
                    rtol=0.,
                    atol=0.001)

    # atol = 0.002 because the input values are less precise (e.g., 0.377
    # rather than 1.e4 / 26500.)
    assert_allclose(extinction.fitzpatrick99(x, 3.1, unit='invum'),
                    ref_values,
                    rtol=0.,
                    atol=0.002)
Exemple #14
0
def Spec_mags(Models, pbs, ex=0, Conversion=1.029):
    """
    Generate synthetic magnitudes from the models and passbands added.
    Conversion converts between Ebv and Egr, the Green value is 0.981, but the best fit value
    was found to be 1.029.
    """
    a_v = 3.1 * (Conversion * ex)  # ex = extinction from Bayestar19 = Egr
    pbg, zpg = pbs['ps1g']
    pbr, zpr = pbs['ps1r']
    pbi, zpi = pbs['ps1i']
    pbz, zpz = pbs['ps1z']
    pbk, zpk = pbs['Kep']

    mg = []
    mr = []
    mi = []
    mz = []
    mk = []
    # construct mags
    ind = []
    red = {}
    for modelname in Models:
        model = Models[modelname]
        model = S.ArraySpectrum(model.wave,
                                apply(
                                    fitzpatrick99(model.wave.astype('double'),
                                                  a_v, 3.1), model.flux),
                                name=modelname)
        mg += [source_synphot.passband.synphot(model, pbg, zpg)]
        mr += [source_synphot.passband.synphot(model, pbr, zpr)]
        mi += [source_synphot.passband.synphot(model, pbi, zpi)]
        mz += [source_synphot.passband.synphot(model, pbz, zpz)]
        mk += [source_synphot.passband.synphot(model, pbk, zpk)]

    mg = np.array(mg)
    mr = np.array(mr)
    mi = np.array(mi)
    mz = np.array(mz)
    mk = np.array(mk)
    good = np.isfinite(mg) & np.isfinite(mr) & np.isfinite(mi) & np.isfinite(
        mz) & np.isfinite(mk)
    d = {
        'g': mg[good],
        'r': mr[good],
        'i': mi[good],
        'z': mz[good],
        'k': mk[good]
    }
    return d
Exemple #15
0
    def _band2flux(self):
        '''
        Will transform the magnitude measurement into a flux measurement. 
        '''

        wls = np.array([])        
        fluxes = np.array([])
        fluxerr = np.array([])
        
        #Create a black body spectrum with an arbitrary value
        sp = ps.BlackBody(10000)
        sp.convert('flam')

        
        for b, m, me, psys in zip(self.bands, self.mags, self.magerrs, self.photsys):
            
            #Create the observation bandpass
            try:
                band = ps.ObsBandpass(b)
            except ValueError:
                #The band is not in the standard list
                #We need to go to the dictionary to retrieve the transmission function.
                band = ps.FileBandpass(self.banddic[b])
                #band.waveunits = ps.units.Angstrom
            
            #Oftain the effective (average) wavelength
            effwave = band.avgwave()
            wls = np.append(wls, effwave)
            
            #Correct for Milky Way extinction
            m = m - extinction.fitzpatrick99(np.array([effwave]), a_v=self.av_mw, unit='aa')[0]
            
            #Normalize the spectrum to the magnitude of the observation
            sp_norm = sp.renorm(m, psys, band)
            #Observe with the band
            obs = ps.Observation(sp_norm, band)
            #Get the flux
            flux = obs.effstim('flam')
            fluxes = np.append(fluxes, flux) 
            
            #Compute the error bars
            flux_high = flux * 10**(0.4*me)
            flux_low = flux * 10**(-0.4*me)
            
            fluxerr = np.append(fluxerr, np.average([flux - flux_low, flux_high-flux]))

        return wls, fluxes, fluxerr
Exemple #16
0
def w_comparison(ax):
    """ Compare spec with W feature """
    files, epochs, tels = get_files(7, 8)
    f = files[0]
    tel = tels[0]
    dt = epochs[0]
    wl, flux, ivar = load_spec(f, tel)
    wl, flux = fluxcal(wl, flux, dt)
    wl, flux = clip_lines(wl, flux, z, tel, dt)
    wl, flux = clip_tellurics(wl, flux)
    scale = flux[wl > 4100][0]
    plot_spec(ax, wl / (1 + z), flux / scale, tel, dt)
    smoothed = plot_smoothed_spec(ax,
                                  wl / (1 + z),
                                  flux / scale,
                                  ivar,
                                  tel,
                                  dt,
                                  lw=1.0,
                                  text=False,
                                  label='18gep, +4.2d, $T=20$\,kK',
                                  c='#e55c30')
    dat = np.loadtxt(SPEC_DIR + "/2008d.txt", delimiter=',')
    x = dat[:, 0]
    y = dat[:, 1]
    ext = fitzpatrick99(x + 100, 0.63)
    ax.plot(x - 100,
            y / 0.1 + ext - 2.0,
            lw=0.5,
            c='#84206b',
            label="SN2008D, +1.4d, $T=11$\,kK")
    dat = np.loadtxt(SPEC_DIR + "/ptf12dam.txt", delimiter=',')
    x = dat[:, 0]
    y = dat[:, 1]
    ax.plot(x - 750,
            y / 2 + 0.2,
            lw=1,
            c='#f6d746',
            ls='-',
            label="12dam, -25d, $T=15$--20\,kK")
    ax.legend(fontsize=12, loc='upper right')
    ax.set_ylim(-0.4, 2.5)
    ax.set_ylabel(r"Scaled $F_{\lambda}$ + cst.", fontsize=16)
Exemple #17
0
def GetEBVhost(fitcat_name, Rv) :
    """
    fitcat_name = 'SN2019ein-PolyFitRes.dat'
    Rv=3.1
    Bring the result of polynomial fitting.
    ==== Host galaxy extinction (Philips+99) ==='
    E(B-V)host = (Bmax - Vmax)c - (Bmax - Vmax)0
    (Bmax - Vmax)c = (Bmax -Vmax)obs - E(B-V)gal - K_BmaxVmax
    (Bmax - Vmax)0 = -0.07(+/-0.012) +0.114(+/-0.037)*(dB15-1.1), sigma   = 0.03
    BVRIJHK = 01234567
    """
    import extinction
    fitcat  = ascii.read(fitcat_name)
    MAGmax  = fitcat['MAGmax']; MAGmaxErr = fitcat['MmaxErr']
    Bmax    = MAGmax[0]       ; Vmax      = MAGmax[1]
    BmaxErr = MAGmaxErr[0]    ; VmaxErr   = MAGmaxErr[1]
    dM15    = fitcat['dM15']  ; dM15Err   = fitcat['dM15Err']
    dB15    = dM15[0]         ; dB15Err  = dM15Err[0]
    BVmaxObs      = Bmax - Vmax # Galactic extinction corrected
    # BVmaxObs here is BVmaxobs - (A_B-A_V) in the previous code.
    BVmaxObsErr   = np.sqrt(BmaxErr**2 + VmaxErr**2)
    K_BmaxVmax    = 0. # Assumption for a local object.
    BVmax0        = -0.07 + 0.114*(dB15 - 1.1)
    BVmax0_Sigma  = 0.03
    BVmax0Err     = np.sqrt(BVmax0_Sigma**2 + dB15Err**2)
    BVmaxc        = BVmaxObs - K_BmaxVmax
    EBVhost       = BVmaxc - BVmax0
    EBVhostErr    = np.sqrt(BVmaxObsErr**2 + BVmax0Err**2)
    Rv_gal    = Rv; Rv_HV = 1.55 # +/-0.06 (Wang+09)
    Ahost_V   = EBVhost * Rv_gal
    FilterSet = np.array([4400., 5400, 6500.])#, 8797., 12483.0, 16313.0, 22010.0])
    Ahost     = extinction.fitzpatrick99(FilterSet, Ahost_V, Rv_gal)
    print('*** Host Galaxy Extinction (Philips+99) Results ***')
    print('E(B-V)host = (Bmax - Vmax)c - (Bmax - Vmax)0')
    print('(Bmax - Vmax)c = (Bmax -Vmax)obs - E(B-V)gal - K_BmaxVmax')
    print('(Bmax - Vmax)0 = -0.07(+/-0.012) +0.114(+/-0.037)*(dB15-1.1), sigma = 0.03')
    print('(Bmax - Vmax)0 = {}+/-{} (MW corrected)'.format(round(BVmaxObs, 3), round(BVmaxObsErr,3)))
    print('(Bmax - Vmax)c = {} (K-correction = {})'.format(round(BVmaxc, 3), K_BmaxVmax))
    print('E(B-V)host = {}+/-{}'.format(round(EBVhost,3), round(EBVhostErr, 3)))
    return {'EBVhost': EBVhost, 'EBVhostErr': EBVhostErr, 'Ahost': Ahost}
Exemple #18
0
def get_ext():
    """ Load the extinction """
    bands = ['UVW2', 'UVM2', 'UVW1', 'U', 'u', 'B', 'g', 'V', 'r', 'i', 'z']
    wl = {}
    wl['UVW2'] = 1928
    wl['UVM2'] = 2246
    wl['UVW1'] = 2600
    wl['U'] = 3465
    wl['u'] = 3543
    wl['B'] = 4392
    wl['g'] = 4770
    wl['V'] = 5468
    wl['r'] = 6231
    wl['i'] = 7625
    wl['z'] = 9134

    ext = {}
    for band in bands:
        ext[band] = extinction.fitzpatrick99(np.array([wl[band]]), 0.029,
                                             3.1)[0]

    return ext
Exemple #19
0
    def _model_powerlaw(self, lam, p):
        '''
        Return units: erg s-1 A-1
        As we multiply by the area of the emitting source (in cm**2)
        '''
        
        lam = lam * u.Angstrom
        
        w0 = 4000 #p[0] #Refernce wavelength
        alpha = p[0]
        R1 = p[1]
        a_v = p[2]
            
        f = ps.PowerLaw(w0, alpha)
        f.convert('flam')
        
        flam = np.interp(lam, f.wave, f.flux)
        
        flux_red =  10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
        area = 10**R1

        return area * flam * flux_red #.to(u.erg/u.s/u.Angstrom).value
Exemple #20
0
def deredden_spectra(wave, coords):
    """
    Apply S&F 2011 Extinction from SFD Map
    https://iopscience.iop.org/article/10.1088/0004-637X/737/2/103#apj398709t6

    Paramters
    ---------
    wave      array
        wavelength to apply correction
    coords    SkyCoord object
        sky coordinates
    """
    Rv = 3.1
    corr_SF2011 = 2.742  # Landolt V
    
    sfd = SFDQuery()
    ebv = sfd(coords)
    Av = corr_SF2011 * ebv
    ext = extinction.fitzpatrick99(np.array(wave, dtype=np.double), Av, Rv)

    deredden = 10**(0.4*np.array(ext))

    return deredden
Exemple #21
0
    def _model_av(self, lam, p):
        '''
        Return units: erg s-1 A-1
        As we multiply by the area of the emitting source (in cm**2)
        '''
        
        T = p[0] * u.K
        R = (p[1] * u.Rsun).to(u.cm)
        a_v = p[2]

        #Compute the effect of reddening as a flux factor
        flux_red =  10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
        lam = lam * u.Angstrom
        

        area = np.pi * (4 * np.pi * R**2)
        flam =  area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
            (np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
        
        #Apply the reddening
        flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
        
                
        return flam
Exemple #22
0
    def get_table(subsurvey):
        t_survey = Table()
        bit_value = bitlist[subsurvey]
        sel = (target_bit & 2**bit_value != 0)

        def create_column(col_name):
            return hd_all[1].data[col_name][sel_all][sel]

        ra_array = create_column('ra')
        dec_array = create_column('dec')
        N_obj = len(ra_array)
        if N_obj > 0:
            N1 = n.arange(N_obj)
            id_list = HEALPIX_id * 1e8 + N1
            NAME = n.array([str(int(el)).zfill(11) for el in id_list])
            t_survey.add_column(Column(name='NAME', data=NAME, unit=''))
            t_survey.add_column(Column(name='RA', data=ra_array, unit='deg'))
            t_survey.add_column(Column(name='DEC', data=dec_array, unit='deg'))
            PMRA = n.zeros(N_obj)
            t_survey.add_column(Column(name='PMRA', data=PMRA, unit='mas/yr'))
            PMDEC = n.zeros(N_obj)
            t_survey.add_column(Column(name='PMDEC', data=PMDEC,
                                       unit='mas/yr'))
            EPOCH = n.ones(N_obj) * 2000.
            t_survey.add_column(Column(name='EPOCH', data=EPOCH, unit='yr'))
            # 'RESOLUTION':n.int16, 1I
            RESOLUTION = n.ones(N_obj).astype('int')
            t_survey.add_column(
                Column(name='RESOLUTION', data=RESOLUTION, unit=''))

            SUBSURVEY = n.ones(N_obj).astype('str')
            SUBSURVEY[:] = subsurvey
            t_survey.add_column(
                Column(name='SUBSURVEY', data=SUBSURVEY, unit=''))
            # 'PRIORITY':n.int16, 1I
            PRIORITY = n.zeros(N_obj).astype(
                'int') + priority_values[subsurvey]
            t_survey.add_column(Column(name='PRIORITY', data=PRIORITY,
                                       unit=''))

            galactic_ebv_array = create_column('galactic_ebv')
            t_survey.add_column(
                Column(name='REDDENING', data=galactic_ebv_array, unit='mag'))

            # REDDENING for templates
            ebv_1000 = (t_survey['REDDENING'] * 1000).astype('int')
            #print('EBV', n.min(ebv_1000), n.max(ebv_1000))
            ebv_1_0 = (ebv_1000 > 1000)
            ebv_0_5 = (ebv_1000 > 500) & (ebv_1000 <= 1000)
            ebv_0_4 = (ebv_1000 > 400) & (ebv_1000 <= 500)
            ebv_0_3 = (ebv_1000 > 300) & (ebv_1000 <= 400)
            ebv_0_2 = (ebv_1000 > 200) & (ebv_1000 <= 300)
            ebv_0_1 = (ebv_1000 > 100) & (ebv_1000 <= 200)
            ebv_0_0 = (ebv_1000 <= 100)
            z_name = lambda z0, z1: "_zmin_" + str(int(10 * z0)).zfill(
                2) + "_zmax_" + str(int(10 * z1)).zfill(2)
            # templates
            template_names = n.zeros(N_obj).astype('U100')
            #
            ruleset_array = n.zeros(N_obj).astype('str')
            #
            if subsurvey == "AGN_WIDE" or subsurvey == "AGN_DEEP" or subsurvey == "AGN_IR":
                ruleset_array[:] = "AGN_ALL_3PC"
            if subsurvey == "QSO" or subsurvey == "LyA":
                ruleset_array[:] = "COSMO_AGN"

            AGN_type_array = create_column('AGN_type')
            AGN_random_number_array = create_column('AGN_random_number')
            z_array = create_column('redshift_R')

            QSO = (AGN_type_array == 11) | (AGN_type_array == 12)
            T2 = (AGN_type_array == 22) | (AGN_type_array == 21)
            ELL = (T2) & (AGN_random_number_array < 0.2)

            for z0, z1 in zip(zmins, zmaxs):
                zsel = (z_array >= z0) & (z_array < z1)
                template_names[(zsel)] = "4most_" + 'qso_BL' + z_name(
                    z0, z1) + '_EBV_0_01.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_0_0)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_0_01.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_0_1)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_0_1.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_0_2)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_0_2.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_0_3)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_0_3.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_0_4)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_0_4.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_0_5)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_0_5.fits'
                template_names[(QSO) & (zsel) &
                               (ebv_1_0)] = "4most_" + 'qso_BL' + z_name(
                                   z0, z1) + '_EBV_1_0.fits'
                if z1 < 2.2:
                    template_names[(T2) & (
                        zsel) & (ebv_0_0)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_01.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_1)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_1.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_2)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_2.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_3)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_3.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_4)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_4.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_5)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_5.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_1_0)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_1_0.fits'

                    template_names[(ELL) & (zsel) &
                                   (ebv_0_0)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_01.fits'
                    template_names[(ELL) & (zsel) &
                                   (ebv_0_1)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_1.fits'
                    template_names[(ELL) & (zsel) &
                                   (ebv_0_2)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_2.fits'
                    template_names[(ELL) & (zsel) &
                                   (ebv_0_3)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_3.fits'
                    template_names[(ELL) & (zsel) &
                                   (ebv_0_4)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_4.fits'
                    template_names[(ELL) & (zsel) &
                                   (ebv_0_5)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_5.fits'
                    template_names[(ELL) & (zsel) &
                                   (ebv_1_0)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_1_0.fits'
                if z1 >= 2.2 and z1 < 6.:
                    template_names[(T2) & (
                        zsel) & (ebv_0_0)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_01.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_1)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_1.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_2)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_2.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_3)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_3.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_4)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_4.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_0_5)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_5.fits'
                    template_names[(T2) & (
                        zsel) & (ebv_1_0)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_1_0.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_0_0)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_01.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_0_1)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_1.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_0_2)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_2.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_0_3)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_3.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_0_4)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_4.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_0_5)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_0_5.fits'
                    template_names[(ELL) & (
                        zsel) & (ebv_1_0)] = "4most_" + 'AGN_type2' + z_name(
                            z0, z1) + '_EBV_1_0.fits'

            # 'TEMPLATE':str, max 256 char
            t_survey.add_column(
                Column(name='TEMPLATE', data=template_names, unit=''))
            # 'RULESET':str, max 256 char
            t_survey.add_column(
                Column(name='RULESET', data=ruleset_array, unit=''))
            # 'REDSHIFT_ESTIMATE':n.float32, 1E
            # 'REDSHIFT_ERROR':n.float32, 1E
            t_survey.add_column(
                Column(name='REDSHIFT_ESTIMATE', data=z_array, unit=''))
            t_survey.add_column(
                Column(name='REDSHIFT_ERROR', data=n.ones(N_obj), unit=''))
            # 'EXTENT_FLAG': 1I
            # =1
            # 'EXTENT_PARAMETER': 1E
            # =0
            # 'EXTENT_INDEX': 1E
            # =0
            t_survey.add_column(
                Column(name='EXTENT_FLAG',
                       data=n.zeros(N_obj).astype('int'),
                       unit=''))
            t_survey.add_column(
                Column(name='EXTENT_PARAMETER', data=n.zeros(N_obj), unit=''))
            t_survey.add_column(
                Column(name='EXTENT_INDEX', data=n.zeros(N_obj), unit=''))
            # 'MAG':n.float32,
            # 'MAG_ERR':n.float32
            # 'MAG_TYPE': str max 256 char
            HSC_RMAG_array = create_column('HSC-r')
            HSC_RMAGERR_array = create_column('HSC-r_err')
            #
            r_v = 3.1
            a_v = galactic_ebv_array * r_v
            delta_mag = n.hstack((n.array([
                extinction.fitzpatrick99(n.array([6500.]),
                                         el,
                                         r_v=3.1,
                                         unit='aa') for el in a_v
            ])))
            #rv = av/ebv
            #av = rv x ebv
            extincted_mag = HSC_RMAG_array + delta_mag
            t_survey.add_column(
                Column(name='MAG', data=extincted_mag, unit='mag'))
            t_survey.add_column(
                Column(name='MAG_ERR', data=HSC_RMAGERR_array, unit='mag'))
            MAG_TYPE = n.ones(N_obj).astype('str')
            MAG_TYPE[:] = 'DECam_r_AB'
            t_survey.add_column(Column(name='MAG_TYPE', data=MAG_TYPE,
                                       unit=''))
            # 'REDDENING':n.float32, 1E
            # 'DATE_EARLIEST':n.float64, JulianDate decimal days # 01-Nov-2022
            # 'DATE_LATEST':n.float64, JulianDate decimal days # 02-Feb-2033
            t_survey.add_column(
                Column(name='DATE_EARLIEST',
                       data=22305 * n.ones(N_obj),
                       unit='d'))
            t_survey.add_column(
                Column(name='DATE_LATEST',
                       data=33033 * n.ones(N_obj),
                       unit='d'))

            t_survey.add_column(
                Column(name='target_bit', data=target_bit[sel], unit=''))

            t_survey.add_column(
                Column(name='dL_cm', data=create_column('dL_cm'), unit='cm'))
            t_survey.add_column(
                Column(name='galactic_NH',
                       data=create_column('galactic_NH'),
                       unit=''))
            t_survey.add_column(
                Column(name='galaxy_stellar_mass',
                       data=create_column('galaxy_stellar_mass'),
                       unit=''))
            t_survey.add_column(
                Column(name='HALO_Mvir',
                       data=create_column('HALO_Mvir'),
                       unit=''))
            t_survey.add_column(
                Column(name='AGN_LX_soft',
                       data=create_column('AGN_LX_soft'),
                       unit=''))
            t_survey.add_column(
                Column(name='AGN_FX_soft',
                       data=create_column('AGN_FX_soft'),
                       unit=''))
            t_survey.add_column(
                Column(name='AGN_LX_hard',
                       data=create_column('AGN_LX_hard'),
                       unit=''))
            t_survey.add_column(
                Column(name='AGN_FX_hard',
                       data=create_column('AGN_FX_hard'),
                       unit=''))
            t_survey.add_column(
                Column(name='AGN_nH', data=create_column('AGN_nH'), unit=''))
            t_survey.add_column(
                Column(name='WISE-W1', data=create_column('WISE-W1'), unit=''))
            t_survey.add_column(
                Column(name='AGN_SDSS_r_magnitude',
                       data=create_column('AGN_SDSS_r_magnitude'),
                       unit=''))
            t_survey.add_column(
                Column(name='g_lat', data=create_column('g_lat'), unit='deg'))
            t_survey.add_column(
                Column(name='g_lon', data=create_column('g_lon'), unit='deg'))
            t_survey.add_column(
                Column(name='ecl_lat',
                       data=create_column('ecl_lat'),
                       unit='deg'))
            #t_survey.add_column(Column(name=''  ,data=create_column(''), unit=''))
            return t_survey
        else:
            return 0.
Exemple #23
0
    def __init__(self,
                 lam_model,
                 spec_model,
                 meta_model,
                 row,
                 drp_base,
                 dap_base,
                 plateifu_base,
                 model_ix,
                 Kspec_obs=None,
                 sky=None):
        '''
        create mocks of DRP LOGCUBE and DAP MAPS

        1. characterize SNR of observed data
        2. redshift model to observed-frame
        3. attenuate according to MW reddening law
        4. blur according to instrumental dispersion
        5. resample onto rectified (observed) wavelength grid
        6. add noise from full covariance prescription OR from SNR
        7. scale according to r-band surface brightness
        8. mask where there's no flux
        '''

        self.model_ix = model_ix

        flux_obs = drp_base['FLUX'].data
        ivar_obs = drp_base['IVAR'].data
        lam_obs = drp_base['WAVE'].data
        specres_obs = drp_base['SPECRES'].data

        cubeshape = drp_base['FLUX'].data.shape
        nl_obs, *mapshape = cubeshape
        mapshape = tuple(mapshape)
        '''STEP 1'''
        # find SNR of each pixel in cube (used to scale noise later)
        snrcube_obs, rmsmap_obs = compute_snrcube(flux=flux_obs,
                                                  ivar=ivar_obs,
                                                  filtersize_l=15,
                                                  return_rms_map=True)
        '''STEP 2'''
        # compute the redshift map
        z_cosm = row['nsa_zdist']
        z_pec = (dap_base['STELLAR_VEL'].data * u.Unit('km/s') /
                 c.c).to('').value
        z_obs = (1. + z_cosm) * (1. + z_pec) - 1.

        # create a placeholder model cube since flexible broadcasting is hard
        if specmodel.ndim == 3:
            spec_model_cube = specmodel
        else:
            spec_model_cube = np.tile(spec_model[:, None, None],
                                      (1, ) + mapshape)
        ivar_model_cube = np.ones_like(spec_model_cube)
        lam_model_z, spec_model_z, ivar_model_z = ut.redshift(
            l=lam_model,
            f=spec_model_cube,
            ivar=ivar_model_cube,
            z_in=0.,
            z_out=z_obs)
        '''STEP 3'''
        # figure out attenuation
        # there are issues with extinction library's handling of multidim arrays
        # so we'll interpolate
        atten_l = np.linspace(3000., 20000., 10000)
        r_v = 3.1
        ext_mag_interp = interp1d(x=atten_l,
                                  y=extinction.fitzpatrick99(
                                      atten_l,
                                      r_v=r_v,
                                      a_v=drp_base[0].header['EBVGAL'] * r_v),
                                  fill_value='extrapolate',
                                  bounds_error=False)
        ext_mag = ext_mag_interp(lam_model_z)
        atten_factor = 2.5**-ext_mag
        spec_model_mwred = spec_model_z * atten_factor
        ivar_model_mwred = ivar_model_z / atten_factor**2.
        '''STEP 4'''
        # specres of observed cube at model wavelengths
        spec_model_instblur = ut.blur_cube_to_psf(
            l_ref=drp_base['WAVE'].data,
            specres_ref=drp_base['SPECRES'].data,
            l_eval=lam_model_z,
            spec_unblurred=spec_model_mwred)

        ivar_model_instblur = ivar_model_mwred
        '''STEP 5'''
        # create placeholder arrays for ivar and flux
        final_fluxcube = np.empty(cubeshape)

        # wavelength grid for final cube
        l_grid = drp_base['WAVE'].data

        # populate flam and ivar pixel-by-pixel
        for ind in np.ndindex(mapshape):
            final_fluxcube[:, ind[0],
                           ind[1]] = np.interp(xp=lam_model_z[:, ind[0],
                                                              ind[1]],
                                               fp=spec_model_instblur[:,
                                                                      ind[0],
                                                                      ind[1]],
                                               x=l_grid)
        # normalize each spectrum to mean 1
        final_fluxcube /= np.mean(final_fluxcube, axis=0, keepdims=True)
        '''STEP 6'''
        # spectrophotometric noise
        cov_noise = noisify_cov(Kspec_obs, mapshape=mapshape)
        # random noise: signal * (gauss / snr)
        random_noise = np.random.randn(*cubeshape) / (snrcube_obs + 1.0e-6)
        fluxscaled_random_noise = random_noise * final_fluxcube

        final_fluxcube += (cov_noise + fluxscaled_random_noise)
        '''STEP 7'''
        # normalize everything to have the same observed-frame r-band flux
        u_flam = 1.0e-17 * (u.erg / (u.s * u.cm**2 * u.AA))
        rband_drp = Spec2Phot(
            lam=drp_base['WAVE'].data,
            flam=drp_base['FLUX'].data * u_flam).ABmags['sdss2010-r'] * u.ABmag
        rband_drp[~np.isfinite(rband_drp)] = rband_drp[np.isfinite(
            rband_drp)].max()
        rband_model = Spec2Phot(
            lam=drp_base['WAVE'].data,
            flam=final_fluxcube * u_flam).ABmags['sdss2010-r'] * u.ABmag
        rband_model[~np.isfinite(rband_model)] = rband_model[np.isfinite(
            rband_model)].max()
        # flux ratio map
        r = (rband_drp.to(m.Mgy) / rband_model.to(m.Mgy)).value

        final_fluxcube *= r[None, ...]
        # initialize the ivar cube according to the SNR cube
        # of base observations
        # this is because while we think we know the actual spectral covariance,
        # that is not necessarily reflected in the quoted ivars!!!
        final_ivarcube = (snrcube_obs / final_fluxcube)**2.

        # add sky spectrum
        if sky:
            skyfluxs, skyivars = sky.make_skycube(mapshape)
            final_fluxcube = final_fluxcube + skyfluxs
            #final_ivarcube = 1. / (1. / final_ivarcube + 1. / skyivars)
        '''STEP 8'''
        # mask where the native datacube has no signal
        rimg = drp_base['RIMG'].data
        nosignal = (rimg == 0.)[None, ...]
        nosignal_cube = np.broadcast_to(nosignal, final_fluxcube.shape)
        final_fluxcube[nosignal_cube] = 0.
        final_ivarcube[final_fluxcube == 0.] = 0.

        # mask where there's bad velocity info
        badvel = m.mask_from_maskbits(dap_base['STELLAR_VEL_MASK'].data,
                                      [30])[None, ...]
        final_ivarcube[np.tile(badvel, (nl_obs, 1, 1))] = 0.

        # replace infinite flux elements with median-filtered
        flux_is_inf = ~np.isfinite(final_fluxcube)
        final_fluxcube[flux_is_inf] = medfilt(np.nan_to_num(final_fluxcube),
                                              [11, 1, 1])[flux_is_inf]

        self.dap_base = dap_base
        self.drp_base = drp_base
        self.fluxcube = final_fluxcube
        self.fluxcube_ivar = final_ivarcube
        self.row = row
        self.metadata = meta_model

        self.plateifu_base = plateifu_base
Exemple #24
0
    # ===============================
    # x ray extinction from our Galaxy
    NH_DATA = n.loadtxt(path_2_NH_attenuation, unpack=True)
    nh_law = interp1d(
        n.hstack((-10.**25, 10**n.hstack((10., NH_DATA[0], 25)))),
        n.hstack((1., 1., 1. / NH_DATA[1], 0.00001)))

    attenuation = nh_law(galactic_NH[z_sel])
    agn_rxay_flux_05_20_observed = fx_05_20 * attenuation
    #print('agn_rxay_flux_05_20_observed',agn_rxay_flux_05_20_observed,time.time() - t0)

    # optical extinction, Fitzpatrick 99
    ebv_values = n.hstack((n.arange(0., 5., 0.01), 10**n.arange(1, 4, 0.1)))
    ext_values = n.array([
        extinction.fitzpatrick99(n.array([6231.]),
                                 3.1 * EBV,
                                 r_v=3.1,
                                 unit='aa')[0] for EBV in ebv_values
    ])
    ext_interp = interp1d(ebv_values, ext_values)
    agn_rmag_observed = empirical_mag_r + ext_interp(galactic_ebv[z_sel])
    #print('agn_rmag_observed', agn_rmag_observed, time.time() - t0)

    # ===============================
    # Writing results
    # ===============================
    f1['LX_hard'][z_sel] = lx
    f1['LX_soft'][z_sel] = lx_05_20
    f1['FX_soft'][z_sel] = fx_05_20
    f1['FX_soft_attenuated'][z_sel] = agn_rxay_flux_05_20_observed
    f1['FX_hard'][z_sel] = fx_2_10
    f1['logNH'][z_sel] = logNH
Exemple #25
0
def create_4most_catalogue_sersic(t_survey, subsurvey):
    """
	Creates a 4MOST subsurvey catalogue for S5
	
	Writes Sersic profile parameters to the Table
	
	t_survey : (astropy Table) simulated catalogue
	subsurvey : (string) sub survey name
	"""
    N_targets = len(t_survey)
    N_obj = len(t_survey)
    #  limit size of the string columns to the size of the longer string in the corresponding columns.
    # 'NAME':str, max 256 char
    N1 = n.arange(len(t_survey['galactic_ebv']))
    id_list = N_subsurvey[subsurvey] * 1e8 + N1
    NAME = n.array([str(int(el)).zfill(11) for el in id_list])
    t_survey.add_column(Column(name='NAME', data=NAME, unit=''))
    # 'RA':n.float64, 1D
    # 'DEC':n.float64, 1D
    # 'PMRA':n.float32, 1E
    # 'PMDEC':n.float32, 1E
    # 'EPOCH':n.float32, 1E
    PMRA = n.zeros(N_obj)
    t_survey.add_column(Column(name='RA', data=t_survey['ra'], unit='deg'))
    t_survey.add_column(Column(name='DEC', data=t_survey['dec'], unit='deg'))
    t_survey.add_column(Column(name='PMRA', data=PMRA, unit='mas/yr'))
    PMDEC = n.zeros(N_obj)
    t_survey.add_column(Column(name='PMDEC', data=PMDEC, unit='mas/yr'))
    EPOCH = n.ones(N_obj) * 2000.
    t_survey.add_column(Column(name='EPOCH', data=EPOCH, unit='yr'))
    # 'RESOLUTION':n.int16, 1I
    RESOLUTION = n.ones(N_obj).astype('int')
    t_survey.add_column(Column(name='RESOLUTION', data=RESOLUTION, unit=''))
    # 'SUBSURVEY':str, max 256 char
    SUBSURVEY = n.ones(N_obj).astype('str')
    SUBSURVEY[:] = subsurvey
    t_survey.add_column(Column(name='SUBSURVEY', data=SUBSURVEY, unit=''))
    # 'PRIORITY':n.int16, 1I
    PRIORITY = n.zeros(N_obj).astype('int') + priority_values[subsurvey]
    t_survey.add_column(Column(name='PRIORITY', data=PRIORITY, unit=''))

    # EBV for templates
    ebv_1000 = (t_survey['galactic_ebv'] * 1000).astype('int')
    ebv_1_0 = (ebv_1000 > 1000)
    ebv_0_5 = (ebv_1000 > 500) & (ebv_1000 <= 1000)
    ebv_0_4 = (ebv_1000 > 400) & (ebv_1000 <= 500)
    ebv_0_3 = (ebv_1000 > 300) & (ebv_1000 <= 400)
    ebv_0_2 = (ebv_1000 > 200) & (ebv_1000 <= 300)
    ebv_0_1 = (ebv_1000 > 100) & (ebv_1000 <= 200)
    ebv_0_0 = (ebv_1000 <= 100)
    z_name = lambda z0, z1: "_zmin_" + str(int(10 * z0)).zfill(
        2) + "_zmax_" + str(int(10 * z1)).zfill(2)
    # templates
    template_names = n.zeros(N_obj).astype('U100')
    ruleset_array = n.zeros(N_obj).astype('str')
    # S8 BG or LRG

    if subsurvey == 'cluster_BCG':
        ruleset_array[:] = "ClusBCG"

    if subsurvey == 'cluster_redGAL':
        ruleset_array[:] = "RedGAL"

    for z0, z1 in zip(zmins, zmaxs):
        zsel = (t_survey['redshift_R'] >= z0) & (t_survey['redshift_R'] < z1)
        if len(zsel.nonzero()[0]) > 0:
            #ruleset_array[zsel] = "COSMO_RedGAL"
            template_names[(
                zsel)] = "4most_" + 'LRG' + z_name(z0, z1) + '_EBV_0_01.fits'
            template_names[(zsel) & (ebv_0_0)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_0_01.fits'
            template_names[(zsel) & (ebv_0_1)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_0_1.fits'
            template_names[(zsel) & (ebv_0_2)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_0_2.fits'
            template_names[(zsel) & (ebv_0_3)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_0_3.fits'
            template_names[(zsel) & (ebv_0_4)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_0_4.fits'
            template_names[(zsel) & (ebv_0_5)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_0_5.fits'
            template_names[(zsel) & (ebv_1_0)] = "4most_" + 'LRG' + z_name(
                z0, z1) + '_EBV_1_0.fits'

    print(n.unique(template_names))
    # 'TEMPLATE':str, max 256 char
    t_survey.add_column(Column(name='TEMPLATE', data=template_names, unit=''))
    # 'RULESET':str, max 256 char
    t_survey.add_column(Column(name='RULESET', data=ruleset_array, unit=''))
    # 'REDSHIFT_ESTIMATE':n.float32, 1E
    # 'REDSHIFT_ERROR':n.float32, 1E
    t_survey.add_column(
        Column(name='REDSHIFT_ESTIMATE', data=t_survey['redshift_R'], unit=''))
    t_survey.add_column(
        Column(name='REDSHIFT_ERROR', data=n.ones(N_obj), unit=''))

    r_v = 3.1
    a_v = t_survey['galactic_ebv'] * r_v
    delta_mag = n.hstack((n.array([
        extinction.fitzpatrick99(n.array([6500.]), el, r_v=3.1, unit='aa')
        for el in a_v
    ])))
    #rv = av/ebv
    #av = rv x ebv
    extincted_mag = t_survey['galaxy_mag_r'] + delta_mag

    # fibermag

    def re_dev(M_star):
        return 0.16 * (M_star)**(0.1) * (1 + M_star /
                                         (2.42 * 10**(10)))**(0.76 - 0.1)

    def re_exp(M_star):
        return 0.08 * (M_star)**(0.16) * (1 + M_star /
                                          (17.1 * 10**(10)))**(0.81 - 0.16)

    radius_kpc = re_dev(10**t_survey['galaxy_stellar_mass'])
    radius_arcsec = cosmo.arcsec_per_kpc_proper(
        t_survey['redshift_R']).value * radius_kpc

    # surface brightness profiles
    # http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
    b4 = 7.669
    b1 = 1.678

    def f_14_dev(r12):
        return gammainc(8, b4 * (0.7 / r12)**(1. / 6.))

    def f_14_exp(r12):
        return gammainc(2, b1 * (0.7 / r12)**(1. / 2.))  # From Raichoor 2017

    def f_14_test(r12, nn):
        return gammainc(2, b1 * (0.7 / r12)**(1. / nn))

    frac = f_14_dev(radius_arcsec)
    flux_fiber = frac * 10**((extincted_mag + 48.6) / -2.5)
    magnitude_4fs2 = -2.5 * n.log10(flux_fiber) - 48.6

    # 'EXTENT_FLAG': 1I
    # =1
    # 'EXTENT_PARAMETER': 1E
    # =0
    # 'EXTENT_INDEX': 1E
    # =0
    t_survey.add_column(
        Column(name='EXTENT_FLAG',
               data=2 * n.ones(N_obj).astype('int'),
               unit=''))
    t_survey.add_column(
        Column(name='EXTENT_PARAMETER', data=radius_arcsec, unit='arcsec'))
    t_survey.add_column(
        Column(name='EXTENT_INDEX', data=6 * n.ones(N_obj), unit=''))
    # 'MAG':n.float32,
    # 'MAG_ERR':n.float32
    # 'MAG_TYPE': str max 256 char

    t_survey.add_column(Column(name='MAG', data=extincted_mag, unit='mag'))
    t_survey.add_column(
        Column(name='FIBERMAG_johan', data=magnitude_4fs2, unit='mag'))
    t_survey.add_column(
        Column(name='MAG_ERR', data=0.01 * n.ones(N_obj), unit='mag'))
    MAG_TYPE = n.ones(N_obj).astype('str')
    MAG_TYPE[:] = 'DECam_r_AB'
    t_survey.add_column(Column(name='MAG_TYPE', data=MAG_TYPE, unit=''))
    # 'REDDENING':n.float32, 1E
    t_survey.add_column(
        Column(name='REDDENING', data=t_survey['galactic_ebv'], unit='mag'))
    # 'DATE_EARLIEST':n.float64, JulianDate decimal days # 01-Nov-2022
    # 'DATE_LATEST':n.float64, JulianDate decimal days # 02-Feb-2033
    t_survey.add_column(
        Column(name='DATE_EARLIEST', data=22305 * n.ones(N_obj), unit='d'))
    t_survey.add_column(
        Column(name='DATE_LATEST', data=33033 * n.ones(N_obj), unit='d'))
    return t_survey
    def add_4most_columns(subsurvey='BG', t_survey=t_bg):
        #subsurvey = 'BG'
        #t_survey = t_bg
        #subsurvey = 'LRG'
        #t_survey = t_lrg
        #subsurvey = 'ELG'
        #t_survey = t_elg
        #subsurvey = 'QSO'
        #t_survey = t_qso
        #subsurvey = 'Lya'
        #t_survey = t_lya
        #subsurvey = 'filament_GAL'
        #t_survey = t_bgS5
        N_obj = len(t_survey)
        #  limit size of the string columns to the size of the longer string in the corresponding columns.
        # 'NAME':str, max 256 char
        N1 = n.arange(len(t_survey['EBV']))
        id_list = HEALPIX_id * 1e8 + N_subsurvey[subsurvey] * 1e6 + N1
        NAME = n.array([str(int(el)).zfill(11) for el in id_list])
        t_survey.add_column(Column(name='NAME', data=NAME, unit=''))
        # 'RA':n.float64, 1D
        # 'DEC':n.float64, 1D
        # 'PMRA':n.float32, 1E
        # 'PMDEC':n.float32, 1E
        # 'EPOCH':n.float32, 1E
        PMRA = n.zeros(N_obj)
        t_survey.add_column(Column(name='PMRA', data=PMRA, unit='mas/yr'))
        PMDEC = n.zeros(N_obj)
        t_survey.add_column(Column(name='PMDEC', data=PMDEC, unit='mas/yr'))
        EPOCH = n.ones(N_obj) * 2000.
        t_survey.add_column(Column(name='EPOCH', data=EPOCH, unit='yr'))
        # 'RESOLUTION':n.int16, 1I
        RESOLUTION = n.ones(N_obj).astype('int')
        t_survey.add_column(Column(name='RESOLUTION', data=RESOLUTION,
                                   unit=''))
        # 'SUBSURVEY':str, max 256 char
        SUBSURVEY = n.ones(N_obj).astype('str')
        SUBSURVEY[:] = subsurvey
        t_survey.add_column(Column(name='SUBSURVEY', data=SUBSURVEY, unit=''))
        # 'PRIORITY':n.int16, 1I
        PRIORITY = n.zeros(N_obj).astype('int') + priority_values[subsurvey]
        t_survey.add_column(Column(name='PRIORITY', data=PRIORITY, unit=''))
        # EBV for templates
        ebv_1000 = (t_survey['EBV'] * 1000).astype('int')
        #print('EBV', n.min(ebv_1000), n.max(ebv_1000))
        ebv_1_0 = (ebv_1000 > 1000)
        ebv_0_5 = (ebv_1000 > 500) & (ebv_1000 <= 1000)
        ebv_0_4 = (ebv_1000 > 400) & (ebv_1000 <= 500)
        ebv_0_3 = (ebv_1000 > 300) & (ebv_1000 <= 400)
        ebv_0_2 = (ebv_1000 > 200) & (ebv_1000 <= 300)
        ebv_0_1 = (ebv_1000 > 100) & (ebv_1000 <= 200)
        ebv_0_0 = (ebv_1000 <= 100)
        z_name = lambda z0, z1: "_zmin_" + str(int(10 * z0)).zfill(
            2) + "_zmax_" + str(int(10 * z1)).zfill(2)
        # templates
        template_names = n.zeros(N_obj).astype('U100')
        ruleset_array = n.zeros(N_obj).astype('str')
        # S8 BG or LRG
        if subsurvey == 'BG' or subsurvey == 'LRG':
            ruleset_array[:] = "COSMO_RedGAL"
            for z0, z1 in zip(zmins, zmaxs):
                zsel = (t_survey['Z'] >= z0) & (t_survey['Z'] < z1)
                if len(zsel.nonzero()[0]) > 0:
                    #ruleset_array[zsel] = "COSMO_RedGAL"
                    template_names[(zsel)] = "4most_" + 'LRG' + z_name(
                        z0, z1) + '_EBV_0_01.fits'
                    template_names[(zsel)
                                   & (ebv_0_0)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_01.fits'
                    template_names[(zsel)
                                   & (ebv_0_1)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_1.fits'
                    template_names[(zsel)
                                   & (ebv_0_2)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_2.fits'
                    template_names[(zsel)
                                   & (ebv_0_3)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_3.fits'
                    template_names[(zsel)
                                   & (ebv_0_4)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_4.fits'
                    template_names[(zsel)
                                   & (ebv_0_5)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_5.fits'
                    template_names[(zsel)
                                   & (ebv_1_0)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_1_0.fits'
        # S5 filament gal
        if subsurvey == 'filament_GAL':
            ruleset_array[:] = "RedGAL"
            for z0, z1 in zip(zmins, zmaxs):
                zsel = (t_survey['Z'] >= z0) & (t_survey['Z'] < z1)
                if len(zsel.nonzero()[0]) > 0:
                    #ruleset_array[zsel] = "RedGAL"
                    template_names[(zsel)] = "4most_" + 'LRG' + z_name(
                        z0, z1) + '_EBV_0_01.fits'
                    template_names[(zsel)
                                   & (ebv_0_0)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_01.fits'
                    template_names[(zsel)
                                   & (ebv_0_1)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_1.fits'
                    template_names[(zsel)
                                   & (ebv_0_2)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_2.fits'
                    template_names[(zsel)
                                   & (ebv_0_3)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_3.fits'
                    template_names[(zsel)
                                   & (ebv_0_4)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_4.fits'
                    template_names[(zsel)
                                   & (ebv_0_5)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_0_5.fits'
                    template_names[(zsel)
                                   & (ebv_1_0)] = "4most_" + 'LRG' + z_name(
                                       z0, z1) + '_EBV_1_0.fits'
        # S8 ELG
        if subsurvey == 'ELG':
            ruleset_array[:] = "ELG"
            for z0, z1 in zip(zmins, zmaxs):
                zsel = (t_survey['Z'] >= z0) & (t_survey['Z'] < z1)
                if len(zsel.nonzero()[0]) > 0:
                    #ruleset_array[zsel] = "ELG"
                    template_names[(zsel)] = "4most_" + 'ELG' + z_name(
                        z0, z1) + '_EBV_0_01.fits'
                    template_names[(zsel)
                                   & (ebv_0_0)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_0_01.fits'
                    template_names[(zsel)
                                   & (ebv_0_1)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_0_1.fits'
                    template_names[(zsel)
                                   & (ebv_0_2)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_0_2.fits'
                    template_names[(zsel)
                                   & (ebv_0_3)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_0_3.fits'
                    template_names[(zsel)
                                   & (ebv_0_4)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_0_4.fits'
                    template_names[(zsel)
                                   & (ebv_0_5)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_0_5.fits'
                    template_names[(zsel)
                                   & (ebv_1_0)] = "4most_" + 'ELG' + z_name(
                                       z0, z1) + '_EBV_1_0.fits'

        # 'TEMPLATE':str, max 256 char
        t_survey.add_column(
            Column(name='TEMPLATE', data=template_names, unit=''))
        # 'RULESET':str, max 256 char
        t_survey.add_column(Column(name='RULESET', data=ruleset_array,
                                   unit=''))
        # 'REDSHIFT_ESTIMATE':n.float32, 1E
        # 'REDSHIFT_ERROR':n.float32, 1E
        t_survey.add_column(
            Column(name='REDSHIFT_ESTIMATE', data=t_survey['Z'], unit=''))
        t_survey.add_column(
            Column(name='REDSHIFT_ERROR', data=n.ones(N_obj), unit=''))
        # 'EXTENT_FLAG': 1I
        # =1
        # 'EXTENT_PARAMETER': 1E
        # =0
        # 'EXTENT_INDEX': 1E
        # =0
        t_survey.add_column(
            Column(name='EXTENT_FLAG',
                   data=n.ones(N_obj).astype('int'),
                   unit=''))
        t_survey.add_column(
            Column(name='EXTENT_PARAMETER', data=n.zeros(N_obj), unit=''))
        t_survey.add_column(
            Column(name='EXTENT_INDEX', data=n.zeros(N_obj), unit=''))
        # 'MAG':n.float32,
        # 'MAG_ERR':n.float32
        # 'MAG_TYPE': str max 256 char
        r_v = 3.1
        a_v = t_survey['EBV'] * r_v
        delta_mag = n.hstack((n.array([
            extinction.fitzpatrick99(n.array([6500.]), el, r_v=3.1, unit='aa')
            for el in a_v
        ])))
        #rv = av/ebv
        #av = rv x ebv
        extincted_mag = t_survey['rfib'] + delta_mag
        t_survey.add_column(Column(name='MAG', data=extincted_mag, unit='mag'))
        t_survey.add_column(
            Column(name='MAG_ERR', data=0.01 * n.ones(N_obj), unit='mag'))
        MAG_TYPE = n.ones(N_obj).astype('str')
        MAG_TYPE[:] = 'DECam_r_AB'
        t_survey.add_column(Column(name='MAG_TYPE', data=MAG_TYPE, unit=''))
        # 'REDDENING':n.float32, 1E
        t_survey.add_column(
            Column(name='REDDENING', data=t_survey['EBV'], unit='mag'))
        # 'DATE_EARLIEST':n.float64, JulianDate decimal days # 01-Nov-2022
        # 'DATE_LATEST':n.float64, JulianDate decimal days # 02-Feb-2033
        t_survey.add_column(
            Column(name='DATE_EARLIEST', data=22305 * n.ones(N_obj), unit='d'))
        t_survey.add_column(
            Column(name='DATE_LATEST', data=33033 * n.ones(N_obj), unit='d'))
        return t_survey
Exemple #27
0
        f1sigma = 999
        print('Bad: {}'.format(new['detectid'][bad_i]))
    new['flux_noise_1sigma_obs'][bad_i] = f1sigma

join_new = join(source_table, new, join_type='left')

print(len(join_new), len(source_table))

# get intrinsic flux limit by dereddening

import extinction

Rv = 3.1
ext = []

for index in np.arange( np.size(join_new['detectid'])):
    src_wave = np.array([np.double(join_new['wave'][index])])
    ext_i = extinction.fitzpatrick99(src_wave, join_new['Av'][index], Rv)[0]
    ext.append(ext_i)

deredden = 10**(0.4*np.array(ext))

join_new['flux_noise_1sigma'] = deredden * join_new['flux_noise_1sigma_obs']
join_new['flux_noise_1sigma'] = join_new['flux_noise_1sigma'].filled(999)
join_new['flux_noise_1sigma_obs'] = join_new['flux_noise_1sigma_obs'].filled(999)
join_new['apcor_api'] = join_new['apcor'].filled(0.0)

print(len(source_table), len(join_new))

join_new.write('source_catalog_2.1.4.fits', overwrite=True)
Exemple #28
0
    def __init__(self, lam_model, spec_model, meta_model,
                 row, drp_base, dap_base, plateifu_base, model_ix,
                 Kspec_obs=None, sky=None):
        '''
        create mocks of DRP LOGCUBE and DAP MAPS

        1. characterize SNR of observed data
        2. redshift model to observed-frame
        3. attenuate according to MW reddening law
        4. blur according to instrumental dispersion
        5. resample onto rectified (observed) wavelength grid
        6. add noise from full covariance prescription OR from SNR
        7. scale according to r-band surface brightness
        8. mask where there's no flux
        '''

        self.model_ix = model_ix

        flux_obs = drp_base['FLUX'].data
        ivar_obs = drp_base['IVAR'].data
        lam_obs = drp_base['WAVE'].data
        specres_obs = drp_base['SPECRES'].data

        cubeshape = drp_base['FLUX'].data.shape
        nl_obs, *mapshape = cubeshape
        mapshape = tuple(mapshape)

        '''STEP 1'''
        # find SNR of each pixel in cube (used to scale noise later)
        snrcube_obs, rmsmap_obs = compute_snrcube(
            flux=flux_obs, ivar=ivar_obs,
            filtersize_l=15, return_rms_map=True)

        '''STEP 2'''
        # compute the redshift map
        z_cosm = row['nsa_zdist']
        z_pec = (dap_base['STELLAR_VEL'].data * u.Unit('km/s') / c.c).to('').value
        z_obs = (1. + z_cosm) * (1. + z_pec) - 1.

        # create a placeholder model cube since flexible broadcasting is hard
        if specmodel.ndim == 3:
            spec_model_cube = specmodel
        else:
            spec_model_cube = np.tile(spec_model[:, None, None], (1, ) + mapshape)
        ivar_model_cube = np.ones_like(spec_model_cube)
        lam_model_z, spec_model_z, ivar_model_z = ut.redshift(
            l=lam_model, f=spec_model_cube, ivar=ivar_model_cube,
            z_in=0., z_out=z_obs)

        '''STEP 3'''
        # figure out attenuation
        # there are issues with extinction library's handling of multidim arrays
        # so we'll interpolate
        atten_l = np.linspace(3000., 20000., 10000)
        r_v = 3.1
        ext_mag_interp = interp1d(
            x=atten_l,
            y=extinction.fitzpatrick99(
                atten_l, r_v=r_v, a_v=drp_base[0].header['EBVGAL'] * r_v),
            fill_value='extrapolate', bounds_error=False)
        ext_mag = ext_mag_interp(lam_model_z)
        atten_factor = 2.5**-ext_mag
        spec_model_mwred = spec_model_z * atten_factor
        ivar_model_mwred = ivar_model_z / atten_factor**2.

        '''STEP 4'''
        # specres of observed cube at model wavelengths
        spec_model_instblur = ut.blur_cube_to_psf(
            l_ref=drp_base['WAVE'].data, specres_ref=drp_base['SPECRES'].data,
            l_eval=lam_model_z, spec_unblurred=spec_model_mwred)

        ivar_model_instblur = ivar_model_mwred

        '''STEP 5'''
        # create placeholder arrays for ivar and flux
        final_fluxcube = np.empty(cubeshape)

        # wavelength grid for final cube
        l_grid = drp_base['WAVE'].data

        # populate flam and ivar pixel-by-pixel
        for ind in np.ndindex(mapshape):
            final_fluxcube[:, ind[0], ind[1]] = np.interp(
                xp=lam_model_z[:, ind[0], ind[1]],
                fp=spec_model_instblur[:, ind[0], ind[1]],
                x=l_grid)
        # normalize each spectrum to mean 1
        final_fluxcube /= np.mean(final_fluxcube, axis=0, keepdims=True)

        '''STEP 6'''
        # spectrophotometric noise
        cov_noise = noisify_cov(Kspec_obs, mapshape=mapshape)
        # random noise: signal * (gauss / snr)
        random_noise = np.random.randn(*cubeshape) / (snrcube_obs + 1.0e-6)
        fluxscaled_random_noise = random_noise * final_fluxcube

        final_fluxcube += (cov_noise + fluxscaled_random_noise)

        '''STEP 7'''
        # normalize everything to have the same observed-frame r-band flux
        u_flam = 1.0e-17 * (u.erg / (u.s * u.cm**2 * u.AA))
        rband_drp = Spec2Phot(
            lam=drp_base['WAVE'].data,
            flam=drp_base['FLUX'].data * u_flam).ABmags['sdss2010-r'] * u.ABmag
        rband_drp[~np.isfinite(rband_drp)] = rband_drp[np.isfinite(rband_drp)].max()
        rband_model = Spec2Phot(
            lam=drp_base['WAVE'].data,
            flam=final_fluxcube * u_flam).ABmags['sdss2010-r'] * u.ABmag
        rband_model[~np.isfinite(rband_model)] = rband_model[np.isfinite(rband_model)].max()
        # flux ratio map
        r = (rband_drp.to(m.Mgy) / rband_model.to(m.Mgy)).value

        final_fluxcube *= r[None, ...]
        # initialize the ivar cube according to the SNR cube
        # of base observations
        # this is because while we think we know the actual spectral covariance,
        # that is not necessarily reflected in the quoted ivars!!!
        final_ivarcube = (snrcube_obs / final_fluxcube)**2.

        # add sky spectrum
        if sky:
            skyfluxs, skyivars = sky.make_skycube(mapshape)
            final_fluxcube = final_fluxcube + skyfluxs
            #final_ivarcube = 1. / (1. / final_ivarcube + 1. / skyivars)


        '''STEP 8'''
        # mask where the native datacube has no signal
        rimg = drp_base['RIMG'].data
        nosignal = (rimg == 0.)[None, ...]
        nosignal_cube = np.broadcast_to(nosignal, final_fluxcube.shape)
        final_fluxcube[nosignal_cube] = 0.
        final_ivarcube[final_fluxcube == 0.] = 0.

        # mask where there's bad velocity info
        badvel = m.mask_from_maskbits(
            dap_base['STELLAR_VEL_MASK'].data, [30])[None, ...]
        final_ivarcube[np.tile(badvel, (nl_obs, 1, 1))] = 0.

        # replace infinite flux elements with median-filtered
        flux_is_inf = ~np.isfinite(final_fluxcube)
        final_fluxcube[flux_is_inf] = medfilt(
            np.nan_to_num(final_fluxcube), [11, 1, 1])[flux_is_inf]

        self.dap_base = dap_base
        self.drp_base = drp_base
        self.fluxcube = final_fluxcube
        self.fluxcube_ivar = final_ivarcube
        self.row = row
        self.metadata = meta_model

        self.plateifu_base = plateifu_base
Exemple #29
0
sys.path.append('/Users/wenlong/Codes/libs/pylib')
import myfuncs as cf
from astropy.io import ascii

f5 = 5410.
f8 = 8353.
f1 = 15450.
wave = np.array([f5, f8, f1])
av = 1.0
rv = 3.1

log = cf.HtmlLog('./', 'Reddening.html')
log.add_line('Python version of simple reddening', AppendLog=False)

ccm89 = extinction.ccm89(wave, av, rv)
f99 = extinction.fitzpatrick99(wave, av, rv)
fm07 = extinction.fm07(wave, av, 'aa')

rv = 3.1
fout = 'table.dat'
hout = open(fout, 'w')
hout.write('#  Law   Rv   A555   A814   A160   RIvi    RHvi\n')
law = 'CCM89'
av, ai, ah = extinction.ccm89(wave, av, rv)
rivi = ai / (av - ai)
rhvi = ah / (av - ai)
fmt = '%10s' + '%10.3f' * 6 + '\n'
hout.write(fmt % (law, rv, av, ai, ah, rivi, rhvi))

law = 'F99'
av, ai, ah = extinction.fitzpatrick99(wave, av, rv)
Exemple #30
0
def overplot_spectroscopic(res,
                           drp_logcube,
                           drpall_row,
                           mass_ax,
                           ml_ax,
                           f_ax,
                           pca_system,
                           res_wcs,
                           cmlr,
                           mlb='i'):
    '''
    overplot cumulative spectroscopic masses, log M/L, cumulative fluxes
    '''

    # synthetic photometry of IFU data in rest frame
    ebvgal = drpall_row['ebvgal']
    r_v = 3.1
    dustcorr = 10.**(0.4 * extinction.fitzpatrick99(
        drp_logcube['WAVE'].data, a_v=r_v * ebvgal, r_v=r_v))[:, None, None]

    ifu_s2p = spectrophot.Spec2Phot(
        lam=drp_logcube['WAVE'].data / (1. + drpall_row['nsa_z']),
        flam=1.0e-17 * drp_logcube['FLUX'].data * (1. + drpall_row['nsa_z']) *
        dustcorr)
    res_flux = (ifu_s2p.ABmags['sdss2010-{}'.format(mlb)] * u.ABmag).to(m.Mgy)

    # map sky coordinates to number of effective radii from center
    pos_to_nRe = RotatedParaboloid(ctr=np.array(
        [drpall_row['objra'], drpall_row['objdec']]),
                                   phi=drpall_row['nsa_elpetro_phi'] * u.deg,
                                   axis_ratio=drpall_row['nsa_elpetro_ba'],
                                   Re=drpall_row['nsa_elpetro_th50_r'] / 3600.)

    badpdf = res.badPDF()
    ml_mask = np.logical_or(res.mask, badpdf)

    # WCS from results file determines where elliptical projection is sampled
    #res_wcs = wcs.WCS(res['SNRMED'].header)
    res_II, res_JJ = np.meshgrid(
        *[np.linspace(0., s - 1., s) for s in res['SNRMED'].shape],
        indexing='ij')
    res_AA, res_DD = res_wcs.wcs_pix2world(res_JJ, res_II, 1)

    # sample Re at spaxel centers
    res_Re = pos_to_nRe(np.column_stack([res_AA.flatten(),
                                         res_DD.flatten()
                                         ])).reshape(res_AA.shape)
    nRe_to_plot = np.linspace(0., res_Re[~ml_mask].max(), 50.)

    # mass, log M/L
    badpdf = res.badPDF()
    ml_mask = np.logical_or.reduce((res.mask, badpdf))
    interior_mask = np.logical_or.reduce(
        (badpdf,
         (m.mask_from_maskbits(drp_logcube['MASK'].data, [3]).sum(axis=0) > 0),
         (m.mask_from_maskbits(drp_logcube['MASK'].data, [2]).sum(axis=0) >
          0)))
    logml = infer_masked(res.param_dist_med(extname='ML{}'.format(mlb)),
                         ml_mask, interior_mask)
    ml = 10.**logml * m.m_to_l_unit
    res_MAG = res_flux.to(u.ABmag) - cosmo.distmod(drpall_row['nsa_zdist'])
    mlb_sollum = 10.**(-0.4 * (res_MAG - spectrophot.absmag_sun_band[mlb] * u.ABmag)).value * \
                 m.bandpass_sol_l_unit
    spectro_mass = (ml * mlb_sollum).to(u.Msun)

    # sum mass within some number of Re
    mass_within_nRe = np.array(
        [sum_within_nRe(spectro_mass, res_Re, n) for n in nRe_to_plot])
    mass_ax.plot(nRe_to_plot, mass_within_nRe, c='C1')

    # plot mass to light versus Re
    ml_sc = ml_ax.scatter(res_Re[~ml_mask],
                          logml[~ml_mask],
                          c='C1',
                          edgecolor='None',
                          s=.5)

    res_Re_m = np.ma.array(res_Re, mask=ml_mask)
    # find ring method log M/L
    outer_ring = np.logical_and((res_Re_m <= res_Re_m.max()),
                                (res_Re_m >= res_Re_m.max() - .5))
    outer_logml_ring = np.median(logml[~ml_mask * outer_ring])
    ml_ax.scatter(x=[np.median(res_Re_m[~ml_mask * outer_ring])],
                  y=[outer_logml_ring],
                  marker='x',
                  c='C1')
    # find CMLR log M/L
    nsa_MAG = (np.array(drpall_row['nsa_elpetro_absmag'][2:]) * \
                        (u.ABmag - u.MagUnit(u.littleh**2))).to(u.ABmag, u.with_H0(cosmo.H0))
    nsa_mag = (nsa_MAG + cosmo.distmod(drpall_row['nsa_zdist']))
    nsa_flux = nsa_mag.to(m.Mgy)
    ifu_mag = np.array(
        [ifu_s2p.ABmags['sdss2010-{}'.format(b_)] for b_ in 'ugriz']) * u.ABmag
    ifu_mag[~np.isfinite(ifu_mag)] = 40. * u.ABmag
    ifu_flux = ifu_mag.to(m.Mgy)
    flux_deficit = nsa_flux - ifu_flux.sum(axis=(1, 2))
    logml_missingflux_cmlr = cmlr(2.5 *
                                  np.log10(flux_deficit[1] / flux_deficit[2]))
    ml_ax.scatter(x=[np.median(res_Re_m[~ml_mask * outer_ring])],
                  y=[logml_missingflux_cmlr],
                  marker='s',
                  c='C1',
                  edgecolor='k')

    #
    ml_missingflux_cmlr = 10.**logml_missingflux_cmlr * m.m_to_l_unit
    ml_missingflux_ring = 10.**outer_logml_ring * m.m_to_l_unit
    missing_mlb_MAG = flux_deficit[3].to(u.ABmag) - cosmo.distmod(
        drpall_row['nsa_zdist'])
    missing_mlb_sollum = 10.**(-0.4 * (missing_mlb_MAG - spectrophot.absmag_sun_band[mlb] * u.ABmag)).value * \
        m.bandpass_sol_l_unit
    missing_mass_cmlr = (ml_missingflux_cmlr * missing_mlb_sollum).to(u.Msun)
    missing_mass_ring = (ml_missingflux_ring * missing_mlb_sollum).to(u.Msun)

    mass_ax.scatter(x=res_Re.max() - .1,
                    y=mass_within_nRe[-1] * u.Msun + missing_mass_cmlr,
                    marker='s',
                    c='C1',
                    edgecolor='k',
                    zorder=3,
                    label='+AC(CMLR)')
    mass_ax.scatter(x=res_Re.max(),
                    y=mass_within_nRe[-1] * u.Msun + missing_mass_ring,
                    marker='x',
                    c='C1',
                    edgecolor='k',
                    zorder=3,
                    label='+AC(RING)')
    mass_ax.legend(loc='best', prop={'size': 'x-small'})

    summed_flux = np.array([
        sum_within_nRe(arr=res_flux, Re_a=res_Re, nRe=n) for n in nRe_to_plot
    ])
    f_ax.plot(nRe_to_plot, summed_flux, label='IFU', c='C1')
    f_ax.scatter(x=res_Re.max() + .1,
                 y=summed_flux[-1] * m.Mgy + flux_deficit[3],
                 marker='o',
                 c='C1',
                 edgecolor='k',
                 zorder=3)
Exemple #31
0
def getReddeningLaw(law='fitzpatrick99',Rv=3.1,inv=False):

    import numpy as np
    from scipy import interpolate
    import extinction

    # Wavelength ranges (lambda_min - lambda_max) of the various reddening laws 
    #  (in Angstroms)...
    lambda_min = {'ccm89':          1250., 
                  'odonnell94':     1250., 
                  'calzetti00':     1200., 
                  'fitzpatrick99':   910., 
                  'fm07':            910.}
    lambda_max = {'ccm89':         33000.,
                  'odonnell94':    33000.,
                  'calzetti00':    22000.,
                  'fitzpatrick99': 60000.,
                  'fm07':          60000.}
    # We can extract the list of supported reddening laws by
    #  grabbing those that are keys within the lambda_min dictionary...
    supported_laws = lambda_min.keys()

    # If reddening law not in the the list of supported reddening laws,
    #  return an Exception...
    if law not in supported_laws: 
        print """Un-supported reddening law:  %s""" % (law)
        print 'Supported reddening laws are: ', supported_laws 
        print 'Returning exception'
        return Exception

    # Calculate and return the reddening law in either
    #  inverse wavelength form (inv=True) or in wavelength
    #  form (inv=False)...
    if inv==True:

        # Use inverse microns to call to "extinction" module
        #  and return reddening law in inverse Angstroms...

        # Calculate inverse wavelengths...
        x_lambda_min = 1.0e4/lambda_max[law]
        x_lambda_max = 1.0e4/lambda_min[law]
        x_micron = np.linspace(x_lambda_min, x_lambda_max, 2000) # microns^-1
        x_angstrom = x_micron * 1.0e-4 # Convert from microns^-1 to Anstroms^-1

        # Call appropriate reddening law function...
        if law == 'ccm89':
            r_array = Rv*extinction.ccm89(x_micron, 1.0, Rv, unit='invum')
        elif law == 'odonnell94':
            r_array = Rv*extinction.odonnell94(x_micron, 1.0, Rv, unit='invum')
        elif law == 'calzetti00':
            r_array = Rv*extinction.calzetti00(x_micron, 1.0, Rv, unit='invum')
        elif law == 'fitzpatrick99':
            r_array = Rv*extinction.fitzpatrick99(x_micron, 1.0, Rv, unit='invum')
        elif law == 'fm07':
            r_array = Rv*extinction.fm07(x_micron, 1.0, unit='invum')

        # Create interpolation function for reddening law...
        r = interpolate.interp1d(x_angstrom, r_array, 
                                 bounds_error=False, fill_value=0., kind=3)

    else:

        # Use Angstroms to call to "extinction" module
        #  and return reddening law in Angstroms...

        # Create wavelength array...
        angstrom = np.logspace(np.log10(lambda_min[law]), np.log10(lambda_max[law]), 2000)

        # Call appropriate reddening law function...
        if law == 'ccm89':
            r_array = Rv*extinction.ccm89(angstrom, 1.0, Rv, unit='aa')
        elif law == 'odonnell94':
            r_array = Rv*extinction.odonnell94(angstrom, 1.0, Rv, unit='aa')
        elif law == 'calzetti00':
            r_array = Rv*extinction.calzetti00(angstrom, 1.0, Rv, unit='aa')
        elif law == 'fitzpatrick99':
            r_array = Rv*extinction.fitzpatrick99(angstrom, 1.0, Rv, unit='aa')
        elif law == 'fm07':
            r_array = Rv*extinction.fm07(angstrom, 1.0, unit='aa')

        # Create interpolation function for reddening law...
        r = interpolate.interp1d(angstrom, r_array, 
                                 bounds_error=False, fill_value=0., kind='linear')

    # Return interpolation fucntion...
    return r
Exemple #32
0
def kcorr_spec_to_phot(drp_logcube,
                       ifu_ivar_hdu,
                       z,
                       cb1='g',
                       cb2='r',
                       mlb='i'):
    '''
    figure out k-correction from spectroscopy, and then apply it to photometry
    '''
    ifu_wcs = wcs.WCS(ifu_ivar_hdu.header)
    ifu_AA, ifu_DD = ifu_wcs.wcs_pix2world(
        *np.meshgrid(
            *[np.linspace(0., s - 1., s) for s in ifu_ivar_hdu.data.shape]), 1)

    ebvgal = drp_logcube[0].header['EBVGAL']
    r_v = 3.1
    dustcorr = 10.**(0.4 * extinction.fitzpatrick99(
        drp_logcube['WAVE'].data, a_v=r_v * ebvgal, r_v=r_v))[:, None, None]

    sphot = dict()
    sphot['obs'] = spectrophot.Spec2Phot(lam=drp_logcube['WAVE'].data,
                                         flam=1.0e-17 *
                                         drp_logcube['FLUX'].data * dustcorr)
    sphot['rest'] = spectrophot.Spec2Phot(
        lam=drp_logcube['WAVE'].data / (1. + z),
        flam=1.0e-17 * drp_logcube['FLUX'].data * (1. + z) * dustcorr)

    obs_cb1 = sphot['obs'].ABmags['sdss2010-{}'.format(cb1)] * u.ABmag
    rest_cb1 = sphot['rest'].ABmags['sdss2010-{}'.format(cb1)] * u.ABmag
    obs_cb2 = sphot['obs'].ABmags['sdss2010-{}'.format(cb2)] * u.ABmag
    rest_cb2 = sphot['rest'].ABmags['sdss2010-{}'.format(cb2)] * u.ABmag
    obs_mlb = sphot['obs'].ABmags['sdss2010-{}'.format(mlb)] * u.ABmag
    rest_mlb = sphot['rest'].ABmags['sdss2010-{}'.format(mlb)] * u.ABmag

    kcorr_map_cb1 = (rest_cb1 - obs_cb1).clip(-.2, .2)
    kcorr_map_cb2 = (rest_cb2 - obs_cb2).clip(-.2, .2)
    kcorr_map_mlb = (rest_mlb - obs_mlb).clip(-.2, .2)

    good_phot = np.logical_and.reduce(
        (ifu_ivar_hdu.data.astype(bool), np.isfinite(kcorr_map_mlb),
         np.isfinite(kcorr_map_cb1), np.isfinite(kcorr_map_cb2)))

    kcorr_cb1_interp = Rbf(ifu_AA[good_phot],
                           ifu_DD[good_phot],
                           kcorr_map_cb1[good_phot],
                           bounds_error=False,
                           fill_value=0.,
                           function='thin_plate')
    kcorr_cb2_interp = Rbf(ifu_AA[good_phot],
                           ifu_DD[good_phot],
                           kcorr_map_cb2[good_phot],
                           bounds_error=False,
                           fill_value=0.,
                           function='thin_plate')
    kcorr_mlb_interp = Rbf(ifu_AA[good_phot],
                           ifu_DD[good_phot],
                           kcorr_map_mlb[good_phot],
                           bounds_error=False,
                           fill_value=0.,
                           function='thin_plate')

    return kcorr_cb1_interp, kcorr_cb2_interp, kcorr_mlb_interp
Exemple #33
0
def extinct(ls,
            EBV=0.1,
            HyperZ=True,
            type='calzetti',
            atmos=False,
            tatmos='eso',
            unit='aa'):
    """
  http://extinction.readthedocs.io/en/latest/                                                                                                             
  https://github.com/kbarbary/extinction/blob/19be03f7e04ce22802c52137205aa67ae7a0a8de/extinction.pyx

  ls: wavelength in angstroms; alternative unit of 'invum' (inverse microns).                                                                           
  A_V: extinction in magnitudes at characteristic V band.                                                                                               
  Ratio of total to selective extinction, Rv = A_V / E(B - V).                                                                                         
  
  -- https://arxiv.org/pdf/1209.2152.pdf                                                                                                             
  -- https://arxiv.org/pdf/astro-ph/9911459.pdf                                                                                                      
  
  Both studies used template spectra including dust attenuation following the Calzetti law.                                                              
  Finkelstein et al. inferred an attenuation at 1500A, A1500, of 1.3 magnitudes at z=4                                                                
  and A1500 < 0.25 at z=7. In contrast, for galaxies with z = 6.5, McLure et al. found A1500=0.4,                                                     
  a value above the upper limit found by Finkelstein et al. In many observational studies the dust                                                     
  attenuation is inferred from the UV continuum slope estimated from a single colour. Bouwens et al. (2011)                                          
  measured an average UV continuum slope of -3 for galaxies at z = 7. However, this value was measured to be -2.2                                      
  when more data were collected by the HST WFC3 (Bouwens et al. 2012). This illustrates how the scarcity of high                                       
  redshift data can bias the estimation of dust attenuation.                                                                                         
      
  UV continuum slope is a poor indicator of dust attenuation, as our results are extremely sensitive to the choice                                      
  of extinction curve used as the input to the attenuation of starlight calculations. For our standard choice, a Milky Way (MW)                          
  extinction curve, galaxies get bluer when they are attenuated, which, as we explain in Section 6 is due to the presence of a                          
  bump in the extinction curve.                                                                                                             
  Usually the term dust 'extinction' refers to the attenuation of the light from a point source placed behind a screen of dust.                          
  Thus, the 'extinction' is independent of the geometry of the system.                                                                                    

  With a_\lambda = L_\lambda (attenuated) /L_\lambda (unattenuated), t_eff (\lambda) = -ln(a_\lambda).                                                   
  Magnitudes: \tau_\eff = A_\lambda / (2.5 \log_10 e).                                                                                                  
  
  MW extinction: Cardelli, Clayton & Mathis (1989) with A_V = 1.0 (Amplitude) and R_V = 3.1 (Shape).                                                     
  """

    ## Av = Rv * E(B-V); Rv = 4.05
    if HyperZ == False:
        if type == 'odonnell':
            ## MW extinction O'Donnell (1994)
            Rv = 3.1
            ext = extinction.odonnell94(ls, Rv * EBV, Rv)

        elif type == 'fitzpatrick':
            ## LMC extinction; Fitzpatrick (1999)
            Rv = 3.10
            ext = extinction.fitzpatrick99(ls, Rv * EBV, Rv)

        else:
            Rv = 4.05
            ext = extinction.calzetti00(
                ls, Rv * EBV,
                Rv)  ## Extinction in magnitudes at each input wavelength.

    else:
        """
    http://webast.ast.obs-mip.fr/hyperz/hyperz_manual1/node10.html                                                                                      
    The extinction curve are expressed in  k(lambda[A]) vs lambda[A].                                                                                   
    Apply a E(B-V) correction by:                                                                                                                            
    flux_attenuated = flux_intrinsic * 10^[-0.4*k(l)*Av/Rv] 
    """

        files = {
            'fitzpatrick': 'LMC_Fitzpatrick.dat',
            'allen': 'MW_Allen.dat',
            'seaton': 'MW_seaton.dat',
            'prevot': 'SMC_prevot.dat',
            'calzetti': 'SB_calzetti.dat'
        }
        Rvs = {
            'fitzpatrick': 3.10,
            'allen': 3.10,
            'seaton': 3.10,
            'prevot': 2.72,
            'calzetti': 4.05
        }

        files[
            'calzetti_mod'] = 'SB_calzetti_mod.dat'  ## modified Calzetti law including contribution from 2175A bump (Massaroti et al., 2001)
        Rvs['calzetti_mod'] = 4.05

        data = np.loadtxt('ext-laws/' + files[type])
        extinterp = interp1d(data[:, 0],
                             EBV * data[:, 1],
                             kind='cubic',
                             bounds_error=False,
                             fill_value=0.0)
        ext = extinterp(ls)

    if atmos == True:
        files = {
            'eso': 'extinc_eso.dat',
            'ctio': 'extinc_ctio.dat'
        }  ## Atmospheric extinction curves
        Rv = 3.1

        data = np.loadtxt('ext-laws/' + files[tatmos])
        extinterp = interp1d(data[:, 0],
                             EBV * data[:, 1],
                             kind='cubic',
                             bounds_error=False,
                             fill_value=0.0)

        ext += extinterp(ls)

    return 10.**(
        -0.4 * ext
    )  ## Deredden with -Av; Positive extinction values decrease flux.
Exemple #34
0
    def estimate_galactic_extinction(self, ax=None, r_v: float = 3.1, **kwargs):
        import extinction
        if ax is None:
            fig, ax = plt.subplots()
        if "marker" not in kwargs:
            kwargs["marker"] = "x"

        self.retrieve_extinction_table()
        lambda_eff_tbl = self.irsa_extinction["LamEff"].to(
            units.Angstrom)
        power_law = models.PowerLaw1D()
        fitter = fitting.LevMarLSQFitter()
        fitted = fitter(power_law, lambda_eff_tbl, self.irsa_extinction["A_SandF"].value)

        tbl = self.photometry_to_table(fmts=["ascii.ecsv", "ascii.csv"])

        x = np.linspace(0, 80000, 1000) * units.Angstrom

        a_v = (r_v * self.ebv_sandf).value

        tbl["ext_gal_sandf"] = extinction.fitzpatrick99(tbl["lambda_eff"], a_v, r_v) * units.mag
        tbl["ext_gal_pl"] = fitted(tbl["lambda_eff"]) * units.mag
        tbl["ext_gal_interp"] = np.interp(
            tbl["lambda_eff"],
            lambda_eff_tbl,
            self.irsa_extinction["A_SandF"].value
        ) * units.mag

        ax.plot(
            x, extinction.fitzpatrick99(x, a_v, r_v),
            label="S\&F + F99 extinction law",
            c="red"
        )
        ax.plot(
            x, fitted(x),
            label=f"power law fit to IRSA",
            # , \\alpha={fitted.alpha.value}; $x_0$={fitted.x_0.value}; A={fitted.amplitude.value}",
            c="blue"
        )
        ax.scatter(
            lambda_eff_tbl, self.irsa_extinction["A_SandF"].value,
            label="from IRSA",
            c="green",
            **kwargs)
        ax.scatter(
            tbl["lambda_eff"], tbl["ext_gal_pl"].value,
            label="power law interpolation of IRSA",
            c="blue",
            **kwargs
        )
        ax.scatter(
            tbl["lambda_eff"], tbl["ext_gal_interp"].value,
            label="numpy interpolation from IRSA",
            c="violet",
            **kwargs
        )
        ax.scatter(
            tbl["lambda_eff"], tbl["ext_gal_sandf"].value,
            label="S\&F + F99 extinction law",
            c="red",
            **kwargs
        )
        ax.set_ylim(0, 0.6)
        ax.legend()
        plt.savefig(os.path.join(self.data_path, f"{self.name_filesys}_irsa_extinction.pdf"))
        plt.close()
        self.extinction_power_law = {
            "amplitude": fitted.amplitude.value * fitted.amplitude.unit,
            "x_0": fitted.x_0.value,
            "alpha": fitted.alpha.value
        }

        for row in tbl:
            instrument = row["instrument"]
            band = row["band"]
            epoch_name = row["epoch_name"]

            # if row["lambda_eff"] > max(lambda_eff_tbl) or row["lambda_eff"] < min(lambda_eff_tbl):
            #     key = "ext_gal_pl"
            #     self.photometry[instrument][band]["ext_gal_type"] = "power_law_fit"
            # else:
            #     key = "ext_gal_interp"
            #     self.photometry[instrument][band]["ext_gal_type"] = "interpolated"
            key = "ext_gal_sandf"
            self.photometry[instrument][band][epoch_name]["ext_gal_type"] = "s_and_f"
            self.photometry[instrument][band][epoch_name]["ext_gal"] = row[key]
            self.photometry[instrument][band][epoch_name]["mag_ext_corrected"] = row["mag"] - row[key]
            if "mag_sep" in row.colnames:
                self.photometry[instrument][band][epoch_name]["mag_sep_ext_corrected"] = row["mag_sep"] - row[key]

        # tbl_2 = self.photometry_to_table()
        # tbl_2.update(tbl)
        # tbl_2.write(self.build_photometry_table_path().replace("photometry", "photemetry_extended"))
        self.update_output_file()
        return ax
def deredden(wave,
             flux,
             ra,
             dec,
             scaling=0.86,
             reddening_law='fitzpatrick99',
             dustmaps_dir=None,
             r_v=3.1,
             ebv=None):
    """Dereddens the given spectrum, given a right ascension and declination or :math:`E(B-V)`.

    Parameters
    ----------
    wave : array
        Wavelength values.
    flux : array
        Flux density values.
    ra : float
        Right ascension in degrees.
    dec : float
        Declination in degrees.
    scaling: float, default ``0.86``
        Calibration of the Milky Way dust maps. Either ``0.86``
        for the Schlafly & Finkbeiner (2011) recalibration or ``1.0`` for the original
        dust map of Schlegel, Fikbeiner & Davis (1998).
    reddening_law: str, default ``fitzpatrick99``
        Reddening law. The options are: ``ccm89`` (Cardelli, Clayton & Mathis 1989), ``odonnell94`` (O’Donnell 1994),
        ``fitzpatrick99`` (Fitzpatrick 1999), ``calzetti00`` (Calzetti 2000) and ``fm07`` (Fitzpatrick & Massa 2007 with
        :math:`R_V` = 3.1.)
    dustmaps_dir : str, default ``None``
        Directory where the dust maps of Schlegel, Fikbeiner & Davis (1998) are found.
    r_v : float, default ``3.1``
        Total-to-selective extinction ratio (:math:`R_V`)
    ebv : float, default ``None``
        Colour excess (:math:`E(B-V)`). If given, this is used instead of the dust map value.

    Returns
    -------
    deredden_flux : array
        Deredden flux values.

    """
    pisco_path = piscola.__path__[0]
    if dustmaps_dir is None:
        dustmaps_dir = os.path.join(pisco_path, 'sfddata-master')

    if ebv is None:
        m = sfdmap.SFDMap(mapdir=dustmaps_dir, scaling=scaling)
        ebv = m.ebv(ra, dec)  # RA and DEC in degrees

    a_v = r_v * ebv

    rl_list = ['ccm89', 'odonnell94', 'fitzpatrick99', 'calzetti00', 'fm07']
    assert reddening_law in rl_list, f'Choose one of the available reddening laws: {rl_list}'

    if reddening_law == 'ccm89':
        ext = extinction.ccm89(wave, a_v, r_v)
    elif reddening_law == 'odonnell94':
        ext = extinction.odonnell94(wave, a_v, r_v)
    elif reddening_law == 'fitzpatrick99':
        ext = extinction.fitzpatrick99(wave, a_v, r_v)
    elif reddening_law == 'calzetti00':
        ext = extinction.calzetti00(wave, a_v, r_v)
    elif reddening_law == 'fm07':
        ext = extinction.fm07(wave, a_v)

    deredden_flux = extinction.remove(ext, flux)

    return deredden_flux
Exemple #36
0
"""Plot extinction functions for comparison"""

import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable

import extinction

rcParams['font.family'] = 'serif'

wave = np.logspace(np.log10(910.), np.log10(30000.), 2000)

a_lambda = {'ccm89': extinction.ccm89(wave, 1.0, 3.1),
            'odonnell94': extinction.odonnell94(wave, 1.0, 3.1),
            'fitzpatrick99': extinction.fitzpatrick99(wave, 1.0),
            'fm07': extinction.fm07(wave, 1.0)}

names = list(a_lambda.keys())  # consistent ordering between panels

fig = plt.figure(figsize=(8.5, 6.))

ax = plt.axes()
for name in names:
    plt.plot(wave, a_lambda[name], label=name)
plt.axvline(x=2700., ls=':', c='k')
plt.axvline(x=3030.3030, ls=':', c='k')
plt.axvline(x=9090.9091, ls=':', c='k')
plt.axvspan(wave[0], 1150., fc='0.8', ec='none', zorder=-1000)
plt.axvspan(1150., 1250., fc='0.9', ec='none', zorder=-1000)    
plt.text(0.5, 0.95, '$R_V = 3.1$', transform=ax.transAxes, va='top',