コード例 #1
0
ファイル: desi_qso_templ.py プロジェクト: desihub/desisim
def mean_templ_zi(zimag, debug=False, i_wind=0.1, z_wind=0.05,
                  boss_pca_fil=None):
    '''
    Generate 'mean' templates at given z,i

    Parameters
    ----------
    zimag: list of tuples
      Redshift, imag pairs for the templates
    i_wind: float (0.1 mag)
      Window for smoothing imag
    z_wind: float (0.05 mag)
      Window for smoothing redshift
    '''
    # PCA values
    if boss_pca_fil is None:
        boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
    hdu = fits.open(boss_pca_fil)
    pca_coeff = hdu[1].data

    # BOSS Eigenvectors
    eigen, eigen_wave = fbq.read_qso_eigen()
    npix = len(eigen_wave)

    # Open the BOSS catalog file
    boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
    bcat_hdu = fits.open(boss_cat_fil)
    t_boss = bcat_hdu[1].data
    zQSO = t_boss['z_pipe']
    tmp = t_boss['PSFMAG']
    imag = tmp[:,3] # i-band mag

    # Output array
    ntempl = len(zimag)
    out_spec = np.zeros( (ntempl, npix) )

    # Iterate on z,imag
    for izi in zimag:
        tt = zimag.index(izi)
        # Find matches
        idx = np.where( (np.fabs(imag-izi[1]) < i_wind) &
                        (np.fabs(zQSO-izi[0]) < z_wind))[0]
        if len(idx) < 50:
            raise ValueError('mean_templ_zi: Not enough QSOs! {:d}'.format(len(idx)))

        # Calculate median PCA values
        PCA0 = np.median(pca_coeff['PCA0'][idx])
        PCA1 = np.median(pca_coeff['PCA1'][idx])
        PCA2 = np.median(pca_coeff['PCA2'][idx])
        PCA3 = np.median(pca_coeff['PCA3'][idx])
        acoeff = np.array( [PCA0, PCA1, PCA2, PCA3] )

        # Make the template
        out_spec[tt,:] = np.dot(eigen.T,acoeff)
        if debug is True:
            xdb.xplot(eigen_wave*(1.+izi[0]), out_spec[tt,:])
            xdb.set_trace()

    # Return
    return out_spec
コード例 #2
0
def mean_templ_zi(zimag, debug=False, i_wind=0.1, z_wind=0.05,
                  boss_pca_fil=None):
    '''
    Generate 'mean' templates at given z,i

    Parameters
    ----------
    zimag: list of tuples
      Redshift, imag pairs for the templates
    i_wind: float (0.1 mag)
      Window for smoothing imag
    z_wind: float (0.05 mag)
      Window for smoothing redshift
    '''
    # PCA values
    if boss_pca_fil is None:
        boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
    hdu = fits.open(boss_pca_fil)
    pca_coeff = hdu[1].data

    # BOSS Eigenvectors
    eigen, eigen_wave = fbq.read_qso_eigen()
    npix = len(eigen_wave)

    # Open the BOSS catalog file
    boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
    bcat_hdu = fits.open(boss_cat_fil)
    t_boss = bcat_hdu[1].data
    zQSO = t_boss['z_pipe']
    tmp = t_boss['PSFMAG']
    imag = tmp[:,3] # i-band mag

    # Output array
    ntempl = len(zimag)
    out_spec = np.zeros( (ntempl, npix) ) 

    # Iterate on z,imag
    for izi in zimag:
        tt = zimag.index(izi)
        # Find matches
        idx = np.where( (np.fabs(imag-izi[1]) < i_wind) &
                        (np.fabs(zQSO-izi[0]) < z_wind))[0]
        if len(idx) < 50:
            raise ValueError('mean_templ_zi: Not enough QSOs! {:d}'.format(len(idx)))

        # Calculate median PCA values
        PCA0 = np.median(pca_coeff['PCA0'][idx])
        PCA1 = np.median(pca_coeff['PCA1'][idx])
        PCA2 = np.median(pca_coeff['PCA2'][idx])
        PCA3 = np.median(pca_coeff['PCA3'][idx])
        acoeff = np.array( [PCA0, PCA1, PCA2, PCA3] )

        # Make the template
        out_spec[tt,:] = np.dot(eigen.T,acoeff)
        if debug is True:
            xdb.xplot(eigen_wave*(1.+izi[0]), out_spec[tt,:])
            xdb.set_trace()

    # Return
    return out_spec
コード例 #3
0
ファイル: arproc.py プロジェクト: EdwardBetts/PYPIT
def trim(slf, file, det):
    for i in xrange (slf._spect['det'][det-1]['numamplifiers']):
        datasec = "datasec{0:02d}".format(i+1)
        x0, x1, y0, y1 = slf._spect['det'][det-1][datasec][0][0], slf._spect['det'][det-1][datasec][0][1], slf._spect['det'][det-1][datasec][1][0], slf._spect['det'][det-1][datasec][1][1]
        if x0 < 0: x0 += file.shape[0]
        if x1 <= 0: x1 += file.shape[0]
        if y0 < 0: y0 += file.shape[1]
        if y1 <= 0: y1 += file.shape[1]
        if i == 0:
            xv = np.arange(x0, x1)
            yv = np.arange(y0, y1)
        else:
            xv = np.unique(np.append(xv, np.arange(x0, x1)))
            yv = np.unique(np.append(yv, np.arange(y0, y1)))
    # Construct and array with the rows and columns to be extracted
    w = np.ix_(xv, yv)
#	if len(file.shape) == 2:
#		trimfile = file[w]
#	elif len(file.shape) == 3:
#		trimfile = np.zeros((w[0].shape[0],w[1].shape[1],file.shape[2]))
#		for f in xrange(file.shape[2]):
#			trimfile[:,:,f] = file[:,:,f][w]
#	else:
#		msgs.error("Cannot trim {0:d}D frame".format(int(len(file.shape))))
    try:
        trim_file = file[w]
    except:
        msgs.bug("Odds are datasec is set wrong. Maybe due to transpose")
        set_trace()
        msgs.error("Cannot trim file")
    return file[w]
コード例 #4
0
ファイル: absline.py プロジェクト: nhmc/xastropy
    def mk_pix_stau(self, spec, kbin=22.*u.km/u.s, debug=False, **kwargs):
        """ Generate the smoothed tau array for kinematic tests
    
        Parameters
        ----------
        spec: Spectrum1D class
          Input spectrum
          velo is expected to have been filled already
        fill: bool (True)
          Fill the dictionary with some items that other kin programs may need

        Returns
        -------
        out_kin : dict
           Dictionary of kinematic measurements
    
        JXP on 11 Dec 2014
        """
        # Calcualte dv
        imn = np.argmin( np.fabs(spec.velo) )
        dv = np.abs( spec.velo[imn] - spec.velo[imn+1] )

        # Test for bad pixels
        pixmin = np.argmin( np.fabs( spec.velo-self.vmnx[0] ) )
        pixmax = np.argmin( np.fabs( spec.velo-self.vmnx[1] ) )
        pix = np.arange(pixmin, pixmax+1)
        npix = len(pix)
        badzero=np.where((spec.flux[pix] == 0) & (spec.sig[pix] <= 0))[0]
        if len(badzero) > 0:
            if np.max(badzero)-np.min(badzero) >= 5: 
                raise ValueError('orig_kin: too many or too large sections of bad data')
            
            spec.flux[pix[badzero]] = np.mean(np.array([spec.flux[pix[np.min(badzero)-1]],
                                                        spec.flux[pix[np.max(badzero)+1]]]))
            xdb.set_trace() # Should add sig too

        # Generate the tau array
        tau = np.zeros(npix)
        gd = np.where((spec.flux[pix] > spec.sig[pix]/2.) &
                    (spec.sig[pix] > 0.) )
        if len(gd) == 0:
            raise ValueError('orig_kin: Profile too saturated.')

        tau[gd] = np.log(1./spec.flux[pix[gd]])
        sat = (pix == pix)
        sat[gd] = False
        tau[sat] = np.log(2./spec.sig[pix[sat]])

        # Smooth
        nbin = (np.round(kbin/dv)).value
        kernel = Box1DKernel(nbin, mode='center')
        stau = convolve(tau, kernel, boundary='fill', fill_value=0.)
        if debug is True:
            xdb.xplot(spec.velo[pix], tau, stau)

        # Fill
        self.stau = stau
        self.pix = pix
コード例 #5
0
ファイル: abskingui.py プロジェクト: xastropy/xastropy
def main(*args, **kwargs):
    """ Runs the AbsKinGui

    Command line
    or from Python
    Examples:
      1.  python ~/xastropy/xastropy/xguis/abskingui.py
      2.  abskingui.main(filename)
      3.  abskingui.main(spec1d)
    """
    import sys
    import argparse

    parser = argparse.ArgumentParser(description='Parse for AbsKinGui')
    parser.add_argument("file", type=str, help="Spectral file")
    parser.add_argument("-sysfile", type=str, help="System JSON file")
    parser.add_argument("-zsys", type=float, help="System Redshift")
    parser.add_argument("-outfil", type=str, help="Output filename")
    parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
                        action="store_true")

    if len(args) == 0:
        pargs = parser.parse_args()
    else: # better know what you are doing!
        if isinstance(args[0],(Spectrum1D, tuple)):
            if not kwargs['rerun']:
                app = QtGui.QApplication(sys.argv)
            xdb.set_trace()
            gui = AbsKinGui(args[0], **kwargs)
            gui.exec_()
            #gui.show()
            #app.exec_()
            return gui, app
        else: # String parsing
            largs = [iargs for iargs in args]
            pargs = parser.parse_args(largs)
            xdb.set_trace() # Not setup for command line yet

    # Normalized?
    norm = True
    if pargs.un_norm:
        norm = False

    # Read AbsSystem
    from linetools.isgm.abssystem import GenericAbsSystem
    if pargs.sysfile is not None:
        abs_sys = GenericAbsSystem.from_json(pargs.sysfile, chk_vel=False)
    else:
        abs_sys = None

    app = QtGui.QApplication(sys.argv)
    gui = AbsKinGui(pargs.file, z=pargs.zsys, norm=norm, abs_sys=abs_sys, outfil=pargs.outfil)
    gui.show()
    app.exec_()

    return gui, app
コード例 #6
0
ファイル: arutils.py プロジェクト: adwasser/PYPIT
def bspline_fit(x,y,order=3,knots=None,everyn=20,xmin=None,xmax=None,w=None,bkspace=None):
    ''' bspline fit to x,y
    Should probably only be called from func_fit

    Parameters:
    ---------
    x: ndarray
    y: ndarray
    func: str
      Name of the fitting function:  polynomial, legendre, chebyshev, bspline
    deg: int 
      deg of the spline.  Default=3 (cubic)
    xmin: float, optional
      Minimum value in the array  [both must be set to normalize]
    xmax: float, optional
      Maximum value in the array  [both must be set to normalize]
    w: ndarray, optional
      weights to be used in the fitting (weights = 1/sigma)
    everyn: int 
      Knot everyn good pixels, if used
    bkspace: float 
      Spacing of breakpoints in units of x

    Returns:
    ---------
    fit_dict: dict  
      dict describing the bspline fit 
    ''' 
    #
    if w is None:
        ngd = x.size
        gd = np.arange(ngd)
        weights = None
    else:
        gd = np.where(w > 0.)[0]
        weights = w[gd]
    # Make the knots
    if knots is None:
        if bkspace is not None: 
            xrnge = (np.max(x[gd]) - np.min(x[gd]))
            startx = np.min(x[gd])
            nbkpts = max(int(xrnge/bkspace) + 1,2)
            tempbkspace = xrnge/(nbkpts-1)
            knots = np.arange(1,nbkpts-1)*tempbkspace + startx
        elif everyn is not None:
            idx_knots = np.arange(10, ngd-10, everyn) # A knot every good N pixels
            knots = x[gd[idx_knots]]
        else:
            msgs.error("No method specified to generate knots")
    # Generate spline
    try:
        tck = interpolate.splrep(x[gd], y[gd], w=weights, k=order, t=knots)
    except ValueError: # Knot problem
        msgs.warn("Problem in the bspline knot")
        debugger.set_trace()
    return tck
コード例 #7
0
    def fit_component(self, component):
        '''Fit the component and save values'''
        from astropy.modeling import fitting
        # Generate Fit line
        fit_line = AbsLine(component.init_wrest,
                           linelist=self.llist[self.llist['List']])
        fit_line.analy['vlim'] = component.vlim
        fit_line.analy['spec'] = self.spec
        fit_line.attrib['z'] = component.zcomp
        fit_line.measure_aodm()
        # Guesses
        fmin = np.argmin(self.spec.flux[fit_line.analy['pix']])
        zguess = self.spec.dispersion[fit_line.analy['pix']
                                      [fmin]] / component.init_wrest - 1.
        bguess = (component.vlim[1] - component.vlim[0]) / 2.
        Nguess = fit_line.attrib['logN']
        # Voigt model
        fitvoigt = xsv.single_voigt_model(logN=Nguess,
                                          b=bguess.value,
                                          z=zguess,
                                          wrest=component.init_wrest.value,
                                          gamma=fit_line.data['gamma'].value,
                                          f=fit_line.data['f'],
                                          fwhm=self.fwhm)
        # Restrict z range
        try:
            fitvoigt.z.min = component.zcomp + component.vlim[
                0].value / 3e5 / (1 + component.zcomp)
        except TypeError:
            QtCore.pyqtRemoveInputHook()
            xdb.set_trace()
            QtCore.pyqtRestoreInputHook()
        fitvoigt.z.max = component.zcomp + component.vlim[1].value / 3e5 / (
            1 + component.zcomp)
        #QtCore.pyqtRemoveInputHook()
        #xdb.set_trace()
        #QtCore.pyqtRestoreInputHook()
        # Fit
        fitter = fitting.LevMarLSQFitter()
        parm = fitter(fitvoigt, self.spec.dispersion[fit_line.analy['pix']],
                      self.spec.flux[fit_line.analy['pix']].value)

        # Save and sync
        component.attrib['N'] = parm.logN.value
        component.attrib['z'] = parm.z.value
        component.attrib['b'] = parm.b.value * u.km / u.s
        component.sync_lines()
コード例 #8
0
ファイル: mcmc.py プロジェクト: astronomeara/xastropy-old
def set_fn_data(sources=None, extra_fNc=[]):
    '''
    Load up f(N) data

    Parameters
    ----------

    Returns
    -------
    fN_data :: List of fN_Constraint Classes

    JXP on 27 Nov 2014
    '''
    if sources is None:
        sources = ['OPB07', 'OPW12', 'OPW13', 'K05', 'K13R13', 'N12']

    fn_file = xa_path + '/igm/fN/fn_constraints_z2.5_vanilla.fits'
    k13r13_file = xa_path + '/igm/fN/fn_constraints_K13R13_vanilla.fits'
    n12_file = xa_path + '/igm/fN/fn_constraints_N12_vanilla.fits'
    all_fN_cs = xifd.fn_data_from_fits([fn_file, k13r13_file, n12_file])

    # Add on, e.g. user-supplied
    if len(extra_fNc) > 0:
        for src in extra_fNc:
            all_fN_cs.append(xifd.fN_data_from_ascii_file(
                os.path.abspath(src)))

    # Include good data sources
    fN_cs = []
    for fN_c in all_fN_cs:
        # In list?
        if fN_c.ref in sources:
            print('Using {:s} as a constraint'.format(fN_c.ref))
            # Append
            fN_cs.append(fN_c)
            # Pop
            idx = sources.index(fN_c.ref)
            sources.pop(idx)

    # Check that all the desired sources were used
    if len(sources) > 0:
        xdb.set_trace()

    #xdb.set_trace()

    return fN_cs
コード例 #9
0
ファイル: core.py プロジェクト: astronomeara/xastropy-old
 def __getattr__(self, k):
     # Try Self first
     try:
         lst = [getattr(cgm_abs,k) for cgm_abs in self.cgm_abs]
     except AttributeError:
         # Try AbsLine_Sys next
         try:
             lst = [getattr(cgm_abs.abs_sys,k) for cgm_abs in self.cgm_abs] 
         except AttributeError:
             # Galaxy?
             try:
                 lst = [getattr(cgm_abs.galaxy,k) for cgm_abs in self.cgm_abs] 
             except AttributeError:
                 print('cgm.core: Attribute not found!')
                 xdb.set_trace()
     # Return array
     return xu_array.lst_to_array(lst,mask=self.mask)
コード例 #10
0
 def __getattr__(self, k):
     # Try Self first
     try:
         lst = [getattr(cgm_abs,k) for cgm_abs in self.cgm_abs]
     except AttributeError:
         # Try AbsLine_Sys next
         try:
             lst = [getattr(cgm_abs.abs_sys,k) for cgm_abs in self.cgm_abs] 
         except AttributeError:
             # Galaxy?
             try:
                 lst = [getattr(cgm_abs.galaxy,k) for cgm_abs in self.cgm_abs] 
             except AttributeError:
                 print('cgm.core: Attribute not found!')
                 xdb.set_trace()
     # Return array
     return xu_array.lst_to_array(lst,mask=self.mask)
コード例 #11
0
ファイル: draftmcmc.py プロジェクト: afeinsod/alresearch
def set_fn_data(sources=None, extra_fNc=[]):
    '''
    Load up f(N) data

    Parameters
    ----------

    Returns
    -------
    fN_data :: List of fN_Constraint Classes

    JXP on 27 Nov 2014
    '''
    if sources is None:
        sources = ['OPB07', 'OPW12', 'OPW13', 'K05', 'K13R13', 'N12']

    fn_file = xa_path+'/igm/fN/fn_constraints_z2.5_vanilla.fits'
    k13r13_file = xa_path+'/igm/fN/fn_constraints_K13R13_vanilla.fits'
    n12_file = xa_path+'/igm/fN/fn_constraints_N12_vanilla.fits'
    all_fN_cs = xifd.fn_data_from_fits([fn_file,k13r13_file,n12_file])

    # Add on, e.g. user-supplied 
    if len(extra_fNc) > 0:
    	for src in extra_fNc:
        	all_fN_cs.append(xifd.fN_data_from_ascii_file(os.path.abspath(src)))

    # Include good data sources
    fN_cs = []
    for fN_c in all_fN_cs:
        # In list?
        if fN_c.ref in sources:
            print('Using {:s} as a constraint'.format(fN_c.ref))
            # Append
            fN_cs.append(fN_c)
            # Pop
            idx = sources.index(fN_c.ref)
            sources.pop(idx)
    
    # Check that all the desired sources were used
    if len(sources) > 0:
        xdb.set_trace()

    #xdb.set_trace()

    return fN_cs
コード例 #12
0
def setwave(hdr):

    # Initialize
    SCL = 1.
    
    # Parse the header
    npix = hdr['NAXIS1'] 
    crpix1 = hdr['CRPIX1'] if 'CRPIX1' in hdr else 1.
    crval1 = hdr['CRVAL1'] if 'CRVAL1' in hdr else 1.
    cdelt1 = hdr['CDELT1'] if 'CDELT1' in hdr else 1.
    ctype1 = hdr['CTYPE1'] if 'CTYPE1' in hdr else None
    dcflag = hdr['DC-FLAG'] if 'DC-FLAG' in hdr else None

    # Generate
    if (dcflag == 1) or (cdelt1 < 1e-4):
        wave = SCL * 10.**(crval1 + ( cdelt1 * np.arange(npix) + 1. - crpix1) ) # Log
    xdb.set_trace()

    # Return
    return wave
コード例 #13
0
ファイル: mk_dla_files.py プロジェクト: ntejos/xastropy
def mk_1dspec(idla, outpath=None, name=None, clobber=False):
    """ Collate and rename the spectra
    Parameters
    ----------
    idla : DLASystem
    name : str, optional
    clobber : bool, optional

    """
    #
    if outpath is None:
        outpath = 'Spectra/'
    if name is None:
        raise ValueError('Not setup for this')

    # Spectra files
    spec_dict = idla._clmdict['fits_files']
    all_spec = []
    for key in spec_dict.keys():
        instr = fits_idx(key)
        if instr == 'XX':
            xdb.set_trace()
            if 'PROGETTI' in spec_dict[key]:
                instr = 'MAGE'
            elif 'HIRES' in spec_dict[key]:
                instr = 'HIRES'
            else:
                xdb.set_trace()
        # Generate new filename
        spec_fil = name + '_' + instr + '.fits'
        # Copy over?
        tmp = glob.glob(outpath + spec_fil + "*")
        if (len(tmp) == 0) | (clobber):
            # Read
            spec = lsio.readspec(spec_dict[key])
            # Write
            spec.write_to_fits(outpath + spec_fil, clobber=True, add_wave=True)
        # Append
        all_spec.append(str(spec_fil))
    # Return
    return all_spec
コード例 #14
0
def mk_1dspec(idla, outpath=None, name=None, clobber=False):
    """ Collate and rename the spectra
    Parameters
    ----------
    idla : DLASystem
    name : str, optional
    clobber : bool, optional

    """
    #
    if outpath is None:
        outpath = 'Spectra/'
    if name is None:
        raise ValueError('Not setup for this')

    # Spectra files
    spec_dict = idla._clmdict['fits_files']
    all_spec = []
    for key in spec_dict.keys():
        instr= fits_idx(key)
        if instr == 'XX':
            xdb.set_trace()
            if 'PROGETTI' in spec_dict[key]:
                instr = 'MAGE'
            elif 'HIRES' in spec_dict[key]:
                instr = 'HIRES'
            else:
                xdb.set_trace()
        # Generate new filename
        spec_fil = name+'_'+instr+'.fits'
        # Copy over?
        tmp = glob.glob(outpath+spec_fil+"*")
        if (len(tmp) == 0) | (clobber):
            # Read
            spec = lsio.readspec(spec_dict[key])
            # Write
            spec.write_to_fits(outpath+spec_fil, clobber=True, add_wave=True)
        # Append
        all_spec.append(str(spec_fil))
    # Return
    return all_spec
コード例 #15
0
ファイル: igmguesses.py プロジェクト: jsribaud/xastropy
    def fit_component(self,component):
        '''Fit the component and save values'''
        from astropy.modeling import fitting
        # Generate Fit line
        fit_line = AbsLine(component.init_wrest,
            linelist=self.llist[self.llist['List']])
        fit_line.analy['vlim'] = component.vlim
        fit_line.analy['spec'] = self.spec
        fit_line.attrib['z'] = component.zcomp
        fit_line.measure_aodm()
        # Guesses
        fmin = np.argmin(self.spec.flux[fit_line.analy['pix']])
        zguess = self.spec.dispersion[fit_line.analy['pix'][fmin]]/component.init_wrest - 1.
        bguess = (component.vlim[1]-component.vlim[0])/2.
        Nguess = fit_line.attrib['logN']
        # Voigt model
        fitvoigt = xsv.single_voigt_model(logN=Nguess,b=bguess.value,
                                z=zguess, wrest=component.init_wrest.value,
                                gamma=fit_line.data['gamma'].value, 
                                f=fit_line.data['f'], fwhm=self.fwhm)
        # Restrict z range
        try:
            fitvoigt.z.min = component.zcomp+component.vlim[0].value/3e5/(1+component.zcomp)
        except TypeError:
            QtCore.pyqtRemoveInputHook()
            xdb.set_trace()
            QtCore.pyqtRestoreInputHook()
        fitvoigt.z.max = component.zcomp+component.vlim[1].value/3e5/(1+component.zcomp)
        #QtCore.pyqtRemoveInputHook()
        #xdb.set_trace()
        #QtCore.pyqtRestoreInputHook()
        # Fit
        fitter = fitting.LevMarLSQFitter()
        parm = fitter(fitvoigt,self.spec.dispersion[fit_line.analy['pix']],
            self.spec.flux[fit_line.analy['pix']].value)

        # Save and sync
        component.attrib['N'] = parm.logN.value
        component.attrib['z'] = parm.z.value
        component.attrib['b'] = parm.b.value * u.km/u.s
        component.sync_lines()
コード例 #16
0
def run_xvelp(*args, **kwargs):
    '''
    Runs the XVelPltGui

    Command line
    or from Python
    Examples:
      1.  python ~/xastropy/xastropy/xguis/spec_guis.py 3
      2.  spec_guis.run_xvelp(filename)
      3.  spec_guis.run_xvelp(spec1d)
    '''

    import argparse
    from specutils import Spectrum1D

    xdb.set_trace()  # DEPRECATED FOR NOW

    parser = argparse.ArgumentParser(description='Parse for XVelPlt')
    parser.add_argument("flag", type=int, help="GUI flag (ignored)")
    parser.add_argument("file", type=str, help="Spectral file")
    parser.add_argument("-zsys", type=float, help="System Redshift")
    parser.add_argument("--un_norm",
                        help="Spectrum is NOT normalized",
                        action="store_true")

    if len(args) == 0:
        pargs = parser.parse_args()
    else:  # better know what you are doing!
        if isinstance(args[0], (Spectrum1D, tuple)):
            if not kwargs['rerun']:
                app = QtGui.QApplication(sys.argv)
            xdb.set_trace()
            gui = XVelPltGui(args[0], **kwargs)
            gui.exec_()
            #gui.show()
            #app.exec_()
            return gui, app
        else:  # String parsing
            largs = ['1'] + [iargs for iargs in args]
            pargs = parser.parse_args(largs)

    # Normalized?
    norm = True
    if pargs.un_norm:
        norm = False

    # Second spectral file?
    try:
        zsys = pargs.zsys
    except AttributeError:
        zsys = None

    xdb.set_trace()  # Not setup for command line yet
    app = QtGui.QApplication(sys.argv)
    gui = XSpecGui(pargs.file, zsys=zsys, norm=norm)
    gui.show()
    app.exec_()

    return gui
コード例 #17
0
ファイル: readwrite.py プロジェクト: profxj/old_xastropy
def setwave(hdr):

    # DEPRECATED
    xdb.set_trace()
    # Initialize
    SCL = 1.0

    # Parse the header
    npix = hdr["NAXIS1"]
    crpix1 = hdr["CRPIX1"] if "CRPIX1" in hdr else 1.0
    crval1 = hdr["CRVAL1"] if "CRVAL1" in hdr else 1.0
    cdelt1 = hdr["CDELT1"] if "CDELT1" in hdr else 1.0
    ctype1 = hdr["CTYPE1"] if "CTYPE1" in hdr else None
    dcflag = hdr["DC-FLAG"] if "DC-FLAG" in hdr else None

    # Generate
    if (dcflag == 1) or (cdelt1 < 1e-4):
        wave = SCL * 10.0 ** (crval1 + (cdelt1 * np.arange(npix) + 1.0 - crpix1))  # Log
    xdb.set_trace()

    # Return
    return wave
コード例 #18
0
ファイル: spec_guis.py プロジェクト: nhmc/xastropy
def run_xvelp(*args, **kwargs):
    '''
    Runs the XVelPltGui

    Command line
    or from Python
    Examples:
      1.  python ~/xastropy/xastropy/xguis/spec_guis.py 3
      2.  spec_guis.run_xvelp(filename)
      3.  spec_guis.run_xvelp(spec1d)
    '''

    import argparse
    from specutils import Spectrum1D

    xdb.set_trace() # DEPRECATED FOR NOW

    parser = argparse.ArgumentParser(description='Parse for XVelPlt')
    parser.add_argument("flag", type=int, help="GUI flag (ignored)")
    parser.add_argument("file", type=str, help="Spectral file")
    parser.add_argument("-zsys", type=float, help="System Redshift")
    parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
                        action="store_true")

    if len(args) == 0:
        pargs = parser.parse_args()
    else: # better know what you are doing!
        if isinstance(args[0],(Spectrum1D,tuple)):
            if not kwargs['rerun']:
                app = QtGui.QApplication(sys.argv)
            xdb.set_trace()
            gui = XVelPltGui(args[0], **kwargs)
            gui.exec_()
            #gui.show()
            #app.exec_()
            return gui, app
        else: # String parsing 
            largs = ['1'] + [iargs for iargs in args]
            pargs = parser.parse_args(largs)
    
    # Normalized?
    norm=True
    if pargs.un_norm:
        norm=False

    # Second spectral file?
    try:
        zsys = pargs.zsys
    except AttributeError:
        zsys=None

    xdb.set_trace() # Not setup for command line yet
    app = QtGui.QApplication(sys.argv)
    gui = XSpecGui(pargs.file, zsys=zsys, norm=norm)
    gui.show()
    app.exec_()

    return gui
コード例 #19
0
ファイル: utils.py プロジェクト: profxj/old_xastropy
    def box_smooth(self, nbox, preserve=False):
        """ Box car smooth spectrum and return a new one
        Is a simple wrapper to the rebin routine

        Parameters
        ----------
        nbox: integer
          Number of pixels to smooth over
        preserve: bool (False) 
          Keep the new spectrum at the same number of pixels as original
        Returns:
          XSpectrum1D of the smoothed spectrum
        """
        from xastropy.xutils import arrays as xxa
        if preserve:
            from astropy.convolution import convolve, Box1DKernel
            new_fx = convolve(self.flux, Box1DKernel(nbox))
            new_sig = convolve(self.sig, Box1DKernel(nbox))
            new_wv = self.dispersion
        else:
            # Truncate arrays as need be
            npix = len(self.flux)
            try:
                new_npix = npix // nbox  # New division
            except ZeroDivisionError:
                xdb.set_trace()
            orig_pix = np.arange(new_npix * nbox)

            # Rebin (mean)
            new_wv = xxa.scipy_rebin(self.dispersion[orig_pix], new_npix)
            new_fx = xxa.scipy_rebin(self.flux[orig_pix], new_npix)
            new_sig = xxa.scipy_rebin(self.sig[orig_pix],
                                      new_npix) / np.sqrt(nbox)

        # Return
        return XSpectrum1D.from_array(
            new_wv, new_fx, uncertainty=apy.nddata.StdDevUncertainty(new_sig))
コード例 #20
0
ファイル: utils.py プロジェクト: mneeleman/xastropy
    def box_smooth(self, nbox, preserve=False):
        """ Box car smooth spectrum and return a new one
        Is a simple wrapper to the rebin routine

        Parameters
        ----------
        nbox: integer
          Number of pixels to smooth over
        preserve: bool (False) 
          Keep the new spectrum at the same number of pixels as original
        Returns:
          XSpectrum1D of the smoothed spectrum
        """
        from xastropy.xutils import arrays as xxa
        if preserve:
            from astropy.convolution import convolve, Box1DKernel
            new_fx = convolve(self.flux, Box1DKernel(nbox))
            new_sig = convolve(self.sig, Box1DKernel(nbox))
            new_wv = self.dispersion
        else:
            # Truncate arrays as need be
            npix = len(self.flux)
            try:
                new_npix = npix // nbox # New division
            except ZeroDivisionError:
                xdb.set_trace()
            orig_pix = np.arange( new_npix * nbox )

            # Rebin (mean)
            new_wv = xxa.scipy_rebin( self.dispersion[orig_pix], new_npix )
            new_fx = xxa.scipy_rebin( self.flux[orig_pix], new_npix )
            new_sig = xxa.scipy_rebin( self.sig[orig_pix], new_npix ) / np.sqrt(nbox)

        # Return
        return XSpectrum1D.from_array(new_wv, new_fx,
                                      uncertainty=apy.nddata.StdDevUncertainty(new_sig))
コード例 #21
0
def build_spectra(field, obs_path=None, path='./'):
    """Top-level program to build spectra files

    Parameters
    ----------
    field : tuple
      (Name, ra, dec)
    """
    if obs_path is None:
        obs_path = os.getenv('DROPBOX_DIR') + 'CASBAH_Observing/'
    """
    Hectospec
    """
    ## DEAL WITH DUPLICATES (TAKE HIGHER ZQ)
    #HDU0: wavelengths (Angstroms)
    #HDU1: sky-subtracted, variance-weighted coadded spectra (total counts)
    #HDU2: inverse variance (counts)
    #HDU3: AND bad pixel mask
    #HDU4: OR bad pixel mask
    #HDU5: Plugmap structure (fiber info)
    #HDU6: Combined sky spectra
    #HDU7: Summed (unweighted) spectra

    # Load up the data
    hecto_path = '/Galx_Spectra/Hectospec/'
    spfiles = glob.glob(obs_path + field[0] + hecto_path + 'spHect-*')
    spfiles.sort()
    for spfile in spfiles:
        if 'zcat' not in spfile:  # Spectra
            hdu = fits.open(spfile)
            print('Reading {:s}'.format(spfile))
            wave = hdu[0].data
            flux = hdu[1].data
            ivar = hdu[2].data
            sig = np.zeros_like(flux)
            gd = ivar > 0.
            sig[gd] = np.sqrt(ivar[gd])
            tbl = Table(hdu[5].data)
            if 'hecto_wave' not in locals():
                hecto_wave, hecto_flux, hecto_sig, hecto_stbl = wave, flux, sig, tbl
            else:
                hecto_wave = np.concatenate((hecto_wave, wave))
                hecto_flux = np.concatenate((hecto_flux, flux))
                hecto_sig = np.concatenate((hecto_sig, sig))
                hecto_stbl = vstack([hecto_stbl, tbl])
        else:
            tmp = Table.read(spfile)  # z values
            if 'hecto_ztbl' not in locals():
                hecto_ztbl = tmp
            else:
                hecto_ztbl = vstack([hecto_ztbl, tmp])
    # Check
    if len(hecto_stbl) != len(hecto_ztbl):
        raise ValueError("Bad Hecto tables..")
    # Objects only
    gdobj = np.where(hecto_ztbl['MAG'] > 1.)[0]
    nobj = len(gdobj)
    # Check for duplicates
    idval = np.array(hecto_ztbl[gdobj]['ID']).astype(int)
    uni, counts = np.unique(idval, return_counts=True)
    if len(uni) != nobj:
        warnings.warn("Resetting duplicate ID values using the targs table")
        # Load targets file
        targ_file = xcasbahu.get_filename(field, 'TARGETS')
        targs = Table.read(targ_file,
                           delimiter='|',
                           format='ascii.fixed_width',
                           fill_values=[('--', '0', 'MASK_NAME')])
        tcoord = SkyCoord(ra=targs['TARG_RA'] * u.deg,
                          dec=targs['TARG_DEC'] * u.deg)
        # Loop on duplicates
        dup = np.where(counts > 1)[0]
        for idup in dup:
            dobj = np.where(hecto_ztbl['ID'] == str(uni[idup]))[0]
            # Loop on objects
            for idobj in dobj:
                dcoord = SkyCoord(ra=hecto_stbl['RA'][idobj] * u.deg,
                                  dec=hecto_stbl['DEC'][idobj] * u.deg)
                # Match by RA/DEC
                mt = np.argmin(dcoord.separation(tcoord))
                # Reset ID
                #xdb.set_trace()
                print('Setting ID to {:s} from {:s}'.format(
                    str(targs['TARG_ID'][mt]), hecto_ztbl['ID'][idobj]))
                hecto_ztbl['ID'][idobj] = str(targs['TARG_ID'][mt])
    # Double check
    idval = np.array(hecto_ztbl[gdobj]['ID']).astype(int)
    uni, counts = np.unique(idval, return_counts=True)
    if len(uni) != nobj:
        xdb.set_trace()
        raise ValueError("Should not get here")

    # Generate the final Table
    hecto_spec = Table()
    hecto_spec.add_column(hecto_stbl['RA'][gdobj])
    hecto_spec.add_column(hecto_stbl['DEC'][gdobj])
    hecto_spec['RA'].unit = u.deg
    hecto_spec['DEC'].unit = u.deg
    hecto_spec.add_column(hecto_ztbl['Z'][gdobj])
    hecto_spec.add_column(hecto_ztbl['Z_ERR'][gdobj])
    hecto_spec.add_column(hecto_ztbl['ZQ'][gdobj])
    hecto_spec.add_column(hecto_ztbl['APERTURE'][gdobj])
    hecto_spec.add_column(hecto_ztbl['ID'][gdobj])  # May wish to recast as int
    hecto_spec.add_column(hecto_ztbl['MAG'][gdobj])
    hecto_spec.add_column(Column(['MMT'] * nobj, name='TELESCOPE'))
    hecto_spec.add_column(Column(['Hectospec'] * nobj, name='INSTRUMENT'))

    hecto_spec.add_column(Column(hecto_wave[gdobj, :], name='WAVE'))
    hecto_spec.add_column(Column(hecto_flux[gdobj, :], name='FLUX'))
    hecto_spec.add_column(Column(hecto_sig[gdobj, :], name='SIG'))
    # Write
    hectospec_file = xcasbahu.get_filename(field, 'HECTOSPEC')
    hecto_spec.write(hectospec_file, overwrite=True)
コード例 #22
0
ファイル: lls_literature.py プロジェクト: ntejos/xastropy
def battisti12():
    '''Battisti, A. et al. 2012, ApJ, 744, 93
    HST/COS
    QSO info from Table 1
    Metal columns parsed from Table 3
    NHI from Lya
    '''
    all_lls = []
    # Grab ASCII files from ApJ
    tab_fils = [
        xa_path + "/data/LLS/battisti12.tb1.ascii",
        xa_path + "/data/LLS/battisti12.tb3.ascii"
    ]
    urls = [
        'http://iopscience.iop.org/0004-637X/744/2/93/suppdata/apj413924t1_ascii.txt',
        'http://iopscience.iop.org/0004-637X/744/2/93/suppdata/apj413924t3_ascii.txt'
    ]
    for jj, tab_fil in enumerate(tab_fils):
        chk_fil = glob.glob(tab_fil)
        if len(chk_fil) > 0:
            tab_fil = chk_fil[0]
        else:
            url = urls[jj]
            print('LLSSurvey: Grabbing table file from {:s}'.format(url))
            f = urllib2.urlopen(url)
            with open(tab_fil, "wb") as code:
                code.write(f.read())
    # QSO info
    with open(tab_fils[0], 'r') as f:
        flines1 = f.readlines()
    # Grab RA/DEC
    all_idict = []
    for iline in flines1:
        if iline[0:2] != 'SD':
            continue
        # Parse
        isplit = iline.split('\t')
        name = isplit[0].split(' ')[1]
        radec = xor.stod1(name)
        zem = float(isplit[1].strip())
        zabs = float(isplit[2].strip())
        NHI = float(isplit[3].strip()[0:4])
        sigNHI = np.array([float(isplit[3].strip()[11:])] * 2)
        # Save
        lls = LLSSystem(name=name,
                        RA=radec[0],
                        Dec=radec[1],
                        zem=zem,
                        zabs=zabs,
                        NHI=NHI,
                        sigNHI=sigNHI)
        #
        all_lls.append(lls)
        all_idict.append({})

    # Abundances
    with open(tab_fils[1], 'r') as f:
        flines3 = f.readlines()
    flines3 = flines3[5:]
    ion = None
    for iline in flines3:
        if ion == 'Ni II':
            break
        isplit = iline.split('\t')
        if isplit[0] == 'C II*':  # Skipping CII*
            continue
        # ion
        ipos = -1
        while (isplit[0][ipos] not in ['I', 'V']):
            ipos -= 1
        ion = isplit[0][0:ipos + 1 + len(isplit[0])]
        Zion = xai.name_ion(ion)
        # Loop on systems
        for kk, iis in enumerate(isplit[1:-1]):
            if iis.strip()[0] == '.':
                continue
            all_idict[kk][ion] = dict(Z=Zion[0], ion=Zion[1], sig_clm=0.)
            if iis[0] == '>':
                all_idict[kk][ion]['flg_clm'] = 2
                all_idict[kk][ion]['clm'] = float(iis[1:6])
            elif iis[0] == '<':
                all_idict[kk][ion]['flg_clm'] = 3
                all_idict[kk][ion]['clm'] = float(iis[1:])
            else:
                all_idict[kk][ion]['flg_clm'] = 1
                all_idict[kk][ion]['clm'] = float(iis[0:5])
                all_idict[kk][ion]['sig_clm'] = float(iis[-4:])

    # Return SLLS only
    for kk, lls in enumerate(all_lls):
        try:
            lls._ionclms = IonClms(idict=all_idict[kk])
        except ValueError:
            xdb.set_trace()
        lls.Refs.append('Bat12')
    fin_slls = [ills for ills in all_lls if ills.NHI < 20.3]
    return fin_slls
コード例 #23
0
    def calc_lox(self, z, NHI_min, NHI_max=None, neval=10000, cumul=False):
        """ Calculate l(X) over an N_HI interval

        Parameters:
        z: float
          Redshift for evaluation
        NHI_min: float
          minimum NHI value
        NHI_max: float (Infinity)
          maximum NHI value for evaluation
        neval: int (10000)
          Discretization parameter
        cumul: boolean (False)
          Return a cumulative array?

        Returns:
        lX: float
          l(X) value

        JXP 10 Nov 2014
        """
        # Initial
        if NHI_max == None:
            NHI_max = 23.
            infinity = True
        else:
            infinity = False

        try:
            nz = len(z)
        except:
            nz = 1
            z = np.array([z])

        # Brute force (should be good to ~0.5%)
        lgNHI = np.linspace(
            NHI_min, NHI_max,
            neval)  #NHI_min + (NHI_max-NHI_min)*np.arange(neval)/(neval-1.)
        dlgN = lgNHI[1] - lgNHI[0]

        # Evaluate f(N,X)
        lgfNX = self.eval(lgNHI, z)
        #xdb.set_trace()

        # Sum
        lX = np.zeros(nz)
        for ii in range(nz):
            lX[ii] = np.sum(10.**(lgfNX[:, ii] + lgNHI)) * dlgN * np.log(10.)
        if cumul == True:
            if nz > 1:  #; Have not modified this yet
                raise ValueError('fN.model: Not ready for this model type %s' %
                                 self.fN_mtype)
            cum_sum = np.cumsum(10.
                                **(lgfNX[:, ii] + lgNHI)) * dlgN * np.log(10.)
        #xdb.set_trace()

        # Infinity?
        if infinity is True:
            # This is risky...
            # Best to cut it off
            xdb.set_trace()
            neval2 = 1000L
            lgNHI2 = NHI_max + (99. - NHI_max) * np.arange(neval2) / (neval2 -
                                                                      1.)
            dlgN = lgNHI2[1] - lgNHI2[0]
            lgfNX = np.zeros((neval2, nz))
            lX2 = np.zeros(nz)
            for ii in range(nz):
                lgfNX[:, ii] = self.eval(lgNHI2, z[ii]).flatten()
                lX2[ii] = np.sum(
                    10.**(lgfNX[:, ii] + lgNHI2)) * dlgN * np.log(10.)
                xdb.set_trace()
            #
            lX = lX + lX2

        # Return
        if nz == 1:
            lX = lX[0]
        if cumul == True:
            return lX, cum_sum, lgNHI
        else:
            return lX
コード例 #24
0
ファイル: abssys_utils.py プロジェクト: nhmc/xastropy
 def load_low_kin(self):
     from xastropy import kinematics as xkin
     # Grab spectrum from ions
     xdb.set_trace()
     out_kin = xkin.orig_kin(spec, vmnx)
コード例 #25
0
ファイル: arload.py プロジェクト: ntejos/PYPIT
def set_params(lines, indict, setstr=""):
    """
    Adjust settings parameters.
    lines    : an array of settings with the same format as the default 'settings.armed'
    indict  : a dictionary generated by initialise that contains all settings
    setstr   : a string argument for error messages that tells the user which file the error occured in.
    """
    for i in range(len(lines)):
        if lines[i].strip() == '' or lines[i].strip() == '\n': continue
        if lines[i].strip()[0] == '#': continue
        tline = lines[i].strip().split("#")[0]
        linspl = tline.split()
        if len(linspl) <= 2:
            msgs.error("Not enough parameters given on line:"+msgs.newline()+lines[i])
        if linspl[0] == 'check':
            text = str(linspl[2]).strip().replace('_', ' ')
            if ',' in text:  # There are multiple possibilities
                indict[linspl[0]][linspl[1]] += text.split(',')
            else:
                indict[linspl[0]][linspl[1]] = text
        elif linspl[0] in indict.keys():
            if linspl[1] in ['check', 'match', 'combsame']:
                text = str(linspl[3]).strip().replace('_', ' ')
                if ',' in text and text[0:2] != '%,':  # There are multiple possibilities - split the infile
                    indict[linspl[0]][linspl[1]][linspl[2]] += text.split(',')
                else:
                    indict[linspl[0]][linspl[1]][linspl[2]] = text
            elif linspl[1][:6] == 'ndet':  # Mosaic of Detectors
                indict[linspl[0]][linspl[1]] = int(linspl[2])
                tmp = []
                for ii in range(indict['mosaic']['ndet']):  # List
                    tmpi = copy.deepcopy(indict['det'])
                    tmpi['suffix'] = str(ii)
                    tmp.append(tmpi)
                indict['det'] = tmp
            elif linspl[1][:7] == 'headext':  # Header Sections
                try:
                    null = np.int(linspl[1][7:])
                except ValueError:
                    msgs.error("keyword headext must contain an integer suffix")
                indict[linspl[0]][linspl[1]] = int(linspl[2])
            elif linspl[1][:8] == 'lampname':  # Lamp names
                try:
                    null = np.int(linspl[1][8:])
                except ValueError:
                    msgs.error("keyword lampname must contain an integer suffix")
                indict[linspl[0]][linspl[1]] = linspl[2]
            elif linspl[1][:8] == 'lampstat': # Lamp status
                try:
                    null = np.int(linspl[1][8:])
                except ValueError:
                    msgs.error("keyword lampstat must contain an integer suffix")
                indict[linspl[0]][linspl[1]] = linspl[2]
            elif linspl[1] in indict[linspl[0]].keys():
                indict[linspl[0]][linspl[1]] = set_params_wtype(indict[linspl[0]][linspl[1]], linspl[2], lines=tline, setstr=setstr)
            else:
                debugger.set_trace()
                msgs.error(setstr + "Settings contains bad line (arg 2):"+msgs.newline()+lines[i].split('#')[0].strip())
        elif linspl[0][:3] == 'det': # Detector parameters
            try:
                didx = np.int(linspl[0][4:]) - 1 
            except ValueError:
                msgs.error("keyword det must contain an integer suffix")
            else:
                linspl[0] = 'det'
            if linspl[1][:6] == 'ampsec': # Amplifier Sections
                try:
                    null = np.int(linspl[1][6:])
                except ValueError:
                    msgs.error("keyword ampsec must contain an integer suffix")
                indict[linspl[0]][didx][linspl[1]] = load_sections(linspl[2], strtxt=linspl[1])
            elif linspl[1][:7] == 'datasec': # Data Sections
                try:
                    null = np.int(linspl[1][7:])
                except ValueError:
                    msgs.error("keyword datasec must contain an integer suffix")
                indict[linspl[0]][didx][linspl[1]] = load_sections(linspl[2], strtxt=linspl[1])
            elif linspl[1][:8] == 'oscansec': # Overscan Sections
                try:
                    null = np.int(linspl[1][8:])
                except ValueError:
                    msgs.error("keyword oscansec must contain an integer suffix")
                indict[linspl[0]][didx][linspl[1]] = load_sections(linspl[2], strtxt=linspl[1])
            elif linspl[1][:6] == 'numamp':
                indict[linspl[0]][didx]['numamplifiers'] = int(linspl[2])
                indict[linspl[0]][didx]['gain'] = [indict['det'][didx]['gain'][0]]*int(linspl[2])
                indict[linspl[0]][didx]['ronoise'] = [indict['det'][didx]['ronoise'][0]]*int(linspl[2])
            else:  # Read value
                indict[linspl[0]][didx][linspl[1]] = set_params_wtype(indict[linspl[0]][didx][linspl[1]], linspl[2], lines=tline,setstr=setstr)
        else:
            msgs.error(setstr + "Settings contains bad line (arg 1):"+msgs.newline()+lines[i].split('#')[0].strip())
    return indict
コード例 #26
0
ファイル: cos_halos.py プロジェクト: nhmc/xastropy
    def load_abskin(self,flg=1,kin_init_file=None):
        """ Load the absorption-line kinematic data for COS-Halos
        Calculate from scratch if needed

        Paramaeters
        ----------
        flg: integer (1)
          Flag indicating how to load the data
        0 = Load from file
        1 = Generate
        kin_init_file: string
          Name of kinematics driver file
    
        JXP on 10 Dec 2014
        """
    
        if flg == 1: # Generate
            # Read init file
            if kin_init_file is None:
                kin_init_file = os.path.abspath(os.environ.get('DROPBOX_DIR')+'/COS-Halos/Kin/'+
                                                  'coshalo_kin_driver.dat')
            kin_init = ascii.read(kin_init_file,guess=False)
    
            # Loop to my loop
            fgal = zip(self.field, self.gal_id)
            for cgm_abs in self.cgm_abs:
                # Match to kin_init
                mt = np.where( (cgm_abs.field == kin_init['QSO']) &
                               (cgm_abs.gal_id == kin_init['Galaxy']) )[0]
                if len(mt) == 0:
                    print('load_kin: No kinemtaics for {:s}, {:s}'.format(cgm_abs.field,
                                                                          cgm_abs.gal_id))
                    continue
                mt = mt[0]

                # Metals
                if kin_init['flgL'][mt] > 0:
                    wrest = kin_init['mtl_wr'][mt]*u.AA 
                    if wrest.value <= 1:
                        xdb.set_trace()
                    spec = get_coshalo_spec( cgm_abs, wrest )
                    vmnx = (kin_init['L_vmn'][mt]*u.km/u.s, kin_init['L_vmx'][mt]*u.km/u.s)
                    # Process
                    cgm_abs.abs_sys.kin['Metal'] = Kin_Abs(wrest, vmnx)
                    cgm_abs.abs_sys.kin['Metal'].fill_kin(spec, per=0.07)
                    # Save spec
                    cgm_abs.abs_sys.kin['Metal'].spec = spec
                else:
                    # Fill with zeros (for the keys)
                    cgm_abs.abs_sys.kin['Metal'] = Kin_Abs(0.*u.AA, (0., 0.))

                # HI
                if kin_init['flgH'][mt] > 0:
                    wrest = kin_init['HI_wrest'][mt]*u.AA 
                    if wrest.value <= 1:
                        xdb.set_trace()
                    spec = get_coshalo_spec( cgm_abs, wrest )
                    vmnx = (kin_init['HIvmn'][mt]*u.km/u.s, kin_init['HIvmx'][mt]*u.km/u.s) 
                    # Process
                    cgm_abs.abs_sys.kin['HI'] = Kin_Abs(wrest, vmnx)
                    cgm_abs.abs_sys.kin['HI'].fill_kin(spec, per=0.07)
                    cgm_abs.abs_sys.kin['HI'].spec = spec
                else:
                    # Fill with zeros (for the keys)
                    cgm_abs.abs_sys.kin['HI'] = Kin_Abs(0.*u.AA, (0., 0.))
コード例 #27
0
ファイル: model.py プロジェクト: jsribaud/xastropy
    def teff_ll(self, z912, zem, N_eval=5000, cosmo=None):
        """ Calculate teff_LL 
        Effective opacity from LL absorption at z912 from zem

        Parameters:
        z912: float
          Redshift for evaluation
        zem: float
          Redshift of source
        cosmo: astropy.cosmology (None)
          Cosmological model to adopt (as needed)
        N_eval: int (5000)
          Discretization parameter

        Returns:
        zval, teff_LL: array
          z values and Effective opacity from LL absorption from z912 to zem

        JXP 10 Nov 2014
        """
        # Imports
        from astropy import constants as const

        # NHI array
        lgNval = 11.5 + 10.5*np.arange(N_eval)/(N_eval-1.) #; This is base 10 [Max at 22]
        dlgN = lgNval[1]-lgNval[0]
        Nval = 10.**lgNval

        #; z array
        zval = z912 + (zem-z912)*np.arange(N_eval)/(N_eval-1.)
        dz = np.fabs(zval[1]-zval[0])

        teff_LL = np.zeros(N_eval)

        # dXdz
        dXdz = igmu.cosm_xz(zval, cosmo=cosmo, flg=1) 
        #if keyword_set(FNZ) then dXdz = replicate(1.,N_eval)

        # Evaluate f(N,X)
        velo = (zval-zem)/(1+zem) * (const.c.cgs.value/1e5) # Kludge for eval [km/s]

        log_fnX = self.eval(lgNval, zem, vel_array=velo)  
        log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz))

        # Evaluate tau(z,N)
        teff_engy = (const.Ryd.to(u.eV,equivalencies=u.spectral()) /
                     ((1+zval)/(1+zem)) )
        sigma_z = xai.photo_cross(1,1,teff_engy)
        #xdb.set_trace()
        #sigma_z = teff_cross * ((1+zval)/(1+zem))**(2.75)  # Not exact but close
        tau_zN = np.outer(Nval, sigma_z)

        # Integrand
        intg = 10.**(log_fnz) * (1. - np.exp(-1.*tau_zN))

        # Sum
        sumz_first = False
        if sumz_first == False:
            #; Sum in N first
            N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)),  0) * dlgN * np.log(10.)
            #xdb.set_trace()
            # Sum in z
            teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz 
        #xdb.set_trace()

        # Debug
        debug=False
        if debug == True:
            #        x_splot, lgNval, alog10(10.d^(log_fnX) * dxdz * dz * Nval), /bloc
            #        x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #     printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #     writecol, 'debug_file'+strtrim(qq,2)+'.dat', $
            #               lgNval, restEW, log_fnX
            xdb.set_trace()
        # Return
        return zval, teff_LL
コード例 #28
0
ファイル: cos_halos.py プロジェクト: nhmc/xastropy
    def load_mega(self,flg=1, data_file=None,cosh_dct=None, pckl_fil=None,
                  skip_ions=False, test=False):
        """ Load the data for COS-Halos

        Paramaeters
        ----------
        flg: integer (1)
          Flag indicating how to load the data
          0 = IDL mega structure
          1 = FITS files from Dropbox
        data_file: string
          Name of data file
        pckl_fil: string
          Name of file for pickling

        JXP on 30 Nov 2014
        """
        #from xastropy.cgm import core as xcc
        #reload(xcc)

        # IDL save file
        if flg == 0:
            if data_file is None:
                data_file = os.path.abspath(os.environ.get('DROPBOX_DIR')+'/COS-Halos/lowions/'+
                                            'coshalos_lowmetals_mega.sav')
            '''
            from scipy.io import readsav
            print('cos_halos.load:  Be patient...')
            if cosh_dct is None:
                cosh_dct = readsav(data_file)
    
            # Generate the CGM Survey
            ncos = len(cosh_dct['megastruct'])
            self.nsys = ncos
            for kk in range(ncos):
            #  
                self.cgm_abs.append(CGM_Abs(
                    ras=cosh_dct['megastruct'][kk]['galaxy']['qsora'][0],
                    decs=cosh_dct['megastruct'][kk]['galaxy']['qsodec'][0],
                    g_ras=cosh_dct['megastruct'][kk]['galaxy']['ra'][0],
                    g_decs=cosh_dct['megastruct'][kk]['galaxy']['dec'][0],
                    zgal=cosh_dct['megastruct'][kk]['galaxy']['zspec'][0]
                    ))
            '''
        elif flg == 1: # FITS files
            fits_path = os.path.abspath(os.environ.get('DROPBOX_DIR')+'/COS-Halos/lowions/FITS')
            # Loop
            if test is True:
                cos_files = glob.glob(fits_path+'/J091*.fits') # For testing
            else:
                cos_files = glob.glob(fits_path+'/J*.fits')
            # Setup
            self.nsys = len(cos_files)
            # Read
            for fil in cos_files:
                print('cos_halos: Reading {:s}'.format(fil))
                mm = cos_files.index(fil)
                hdu = fits.open(fil)
                summ = hdu[1].data
                galx = hdu[2].data
                self.cgm_abs.append( CGMSys(ras=galx['qsora'][0],
                    decs=galx['qsodec'][0],
                    g_ras=galx['ra'][0],
                    g_decs=galx['dec'][0],
                    zgal=summ['zfinal'][0]
                    ))
                # COS-Halos naming
                self.cgm_abs[mm].field = galx['field'][0]
                self.cgm_abs[mm].gal_id = galx['galid'][0]
                # Galxy properties
                self.cgm_abs[mm].galaxy.halo_mass = summ['LOGMHALO'][0] 
                self.cgm_abs[mm].galaxy.stellar_mass = summ['LOGMFINAL'][0] 
                # Ions
                if skip_ions is True:
                    continue
                self.cgm_abs[mm].abs_sys.ions = IonClms()
                all_Z = []
                all_ion = []
                for jj in range(summ['nion'][0]):
                    iont = hdu[3+jj].data
                    if jj == 0: # Generate new Table
                        dat_tab = Table(iont)
                    else:
                        try:
                            dat_tab.add_row(Table(iont)[0])
                        except:
                            xdb.set_trace()
                    all_Z.append(iont['zion'][0][0])
                    all_ion.append(iont['zion'][0][1])
                    '''
                    for key in self.cgm_abs[mm].abs_sys.ions.keys:
                        try:
                            self.cgm_abs[mm].abs_sys.ions.ion_data[zion][key] = iont[key][0]
                        except KeyError:
                            if key == 'flg_inst':
                                self.cgm_abs[mm].abs_sys.ions.ion_data[zion][key] = 0
                            else:
                                xdb.set_trace()
                    '''
                # Add Z,ion
                dat_tab.add_column(Column(all_Z,name='Z'))
                dat_tab.add_column(Column(all_ion,name='ion'))
                # Set
                self.cgm_abs[mm].abs_sys.ions._data = dat_tab
                # NHI
                self.cgm_abs[mm].abs_sys.NHI = self.cgm_abs[mm].abs_sys.ions[(1,1)]['CLM']
            # Mask
            self.mask = np.ones(self.nsys, dtype=bool)
        else:
            raise ValueError('cos_halos.load: Not ready for this flag {:d}'.format(flg))

        '''
コード例 #29
0
ファイル: abssys_utils.py プロジェクト: profxj/old_xastropy
 def load_low_kin(self):
     from xastropy import kinematics as xkin
     # Grab spectrum from ions
     xdb.set_trace()
     out_kin = xkin.orig_kin(spec, vmnx)
コード例 #30
0
def desi_qso_templates(z_wind=0.2, zmnx=(0.4,4.), outfil=None, Ntempl=500,
                       boss_pca_fil=None, wvmnx=(3500., 10000.),
                       sdss_pca_fil=None, no_write=False):
    '''
    Generate 9000 templates for DESI from z=0.4 to 4 

    Parameters
    ----------
    z_wind: float (0.2) 
      Window for sampling
    zmnx: tuple  ( (0.5,4) )
      Min/max for generation
    Ntempl: int  (500)
      Number of draws per redshift window
    '''
    # Cosmology
    from astropy import cosmology 
    cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)

    # PCA values
    if boss_pca_fil is None:
        boss_pca_fil = 'BOSS_DR10Lya_PCA_values_nocut.fits.gz'
    hdu = fits.open(boss_pca_fil)
    boss_pca_coeff = hdu[1].data

    if sdss_pca_fil is None:
        sdss_pca_fil = 'SDSS_DR7Lya_PCA_values_nocut.fits.gz'
    hdu2 = fits.open(sdss_pca_fil)
    sdss_pca_coeff = hdu2[1].data
    

    # Eigenvectors
    eigen, eigen_wave = fbq.read_qso_eigen()
    npix = len(eigen_wave)
    chkpix = np.where((eigen_wave > 900.) & (eigen_wave < 5000.) )[0]
    lambda_912 = 911.76
    pix912 = np.argmin( np.abs(eigen_wave-lambda_912) )

    # Open the BOSS catalog file
    boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
    bcat_hdu = fits.open(boss_cat_fil)
    t_boss = bcat_hdu[1].data
    boss_zQSO = t_boss['z_pipe']

    # Open the SDSS catalog file
    sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
    scat_hdu = fits.open(sdss_cat_fil)
    t_sdss = scat_hdu[1].data
    sdss_zQSO = t_sdss['z']
    if len(sdss_pca_coeff) != len(sdss_zQSO):
        print('Need to finish running the SDSS models!')
        sdss_zQSO = sdss_zQSO[0:len(sdss_pca_coeff)]

    # Outfil
    if outfil is None:
        outfil = 'DESI_QSO_Templates_v1.1.fits'

    # Loop on redshift
    z0 = np.arange(zmnx[0],zmnx[1],z_wind)
    z1 = z0 + z_wind

    pca_list = ['PCA0', 'PCA1', 'PCA2', 'PCA3']
    PCA_mean = np.zeros(4)
    PCA_sig = np.zeros(4)
    PCA_rand = np.zeros( (4,Ntempl*2) )

    final_spec = np.zeros( (npix, Ntempl * len(z0)) )
    final_wave = np.zeros( (npix, Ntempl * len(z0)) )
    final_z = np.zeros( Ntempl * len(z0) )

    seed = -1422
    for ii in range(len(z0)):

        # BOSS or SDSS?
        if z0[ii] > 1.99:
            zQSO = boss_zQSO
            pca_coeff = boss_pca_coeff
        else:
            zQSO = sdss_zQSO
            pca_coeff = sdss_pca_coeff

        # Random z values and wavelengths
        zrand = np.random.uniform( z0[ii], z1[ii], Ntempl*2)
        wave = np.outer(eigen_wave, 1+zrand)

        # MFP (Worseck+14)
        mfp = 37. * ( (1+zrand)/5. )**(-5.4) # Physical Mpc

        # Grab PCA mean + sigma
        idx = np.where( (zQSO >= z0[ii]) & (zQSO < z1[ii]) )[0]
        print('Making z=({:g},{:g}) with {:d} input quasars'.format(z0[ii],z1[ii],len(idx)))

        # Get PCA stats and random values
        for ipca in pca_list:
            jj = pca_list.index(ipca)
            if jj == 0: # Use bounds for PCA0 [avoids negative values]
                xmnx = xstat_b.perc( pca_coeff[ipca][idx], per=0.95 )
                PCA_rand[jj,:] = np.random.uniform( xmnx[0], xmnx[1], Ntempl*2)
            else:
                PCA_mean[jj] = np.mean(pca_coeff[ipca][idx])
                PCA_sig[jj] = np.std(pca_coeff[ipca][idx])
                # Draws
                PCA_rand[jj,:] = np.random.uniform( PCA_mean[jj] - 2*PCA_sig[jj],
                                        PCA_mean[jj] + 2*PCA_sig[jj], Ntempl*2)

        # Generate the templates (2*Ntempl)
        spec = np.dot(eigen.T,PCA_rand)

        # Take first good Ntempl

        # Truncate, MFP, Fill
        ngd = 0
        for kk in range(2*Ntempl):
            # Any zero values?
            mn = np.min(spec[chkpix,kk])
            if mn < 0.:
                continue

            # MFP
            if z0[ii] > 2.39:
                z912 = wave[0:pix912,kk]/lambda_912 - 1.
                phys_dist = np.fabs( cosmo.lookback_distance(z912) -
                                cosmo.lookback_distance(zrand[kk]) ) # Mpc
                spec[0:pix912,kk] = spec[0:pix912,kk] * np.exp(-phys_dist.value/mfp[kk]) 

            # Write
            final_spec[:, ii*Ntempl+ngd] = spec[:,kk]
            final_wave[:, ii*Ntempl+ngd] = wave[:,kk]
            final_z[ii*Ntempl+ngd] = zrand[kk]
            ngd += 1
            if ngd == Ntempl:
                break
        if ngd != Ntempl:
            print('Did not make enough!')
            xdb.set_trace()

    if no_write is True: # Mainly for plotting
        return final_wave, final_spec, final_z

    # Rebin 
    light = 2.99792458e5        # [km/s]
    velpixsize = 10.            # [km/s]
    pixsize = velpixsize/light/np.log(10) # [pixel size in log-10 A]
    minwave = np.log10(wvmnx[0])          # minimum wavelength [log10-A]
    maxwave = np.log10(wvmnx[1])          # maximum wavelength [log10-A]
    r_npix = np.round((maxwave-minwave)/pixsize+1)

    log_wave = minwave+np.arange(r_npix)*pixsize # constant log-10 spacing

    totN = Ntempl * len(z0)
    rebin_spec = np.zeros((r_npix, totN))
    
    from scipy.interpolate import interp1d
    
    for ii in range(totN):
        # Interpolate (in log space)
        f1d = interp1d(np.log10(final_wave[:,ii]), final_spec[:,ii])
        rebin_spec[:,ii] = f1d(log_wave)
        #xdb.xplot(final_wave[:,ii], final_spec[:,ii], xtwo=10.**log_wave, ytwo=rebin_spec[:,ii])
        #xdb.set_trace()

    # Transpose for consistency
    out_spec = np.array(rebin_spec.T, dtype='float32')

    # Write
    hdu = fits.PrimaryHDU(out_spec)
    hdu.header.set('PROJECT', 'DESI QSO TEMPLATES')
    hdu.header.set('VERSION', '1.1')
    hdu.header.set('OBJTYPE', 'QSO')
    hdu.header.set('DISPAXIS',  1, 'dispersion axis')
    hdu.header.set('CRPIX1',  1, 'reference pixel number')
    hdu.header.set('CRVAL1',  minwave, 'reference log10(Ang)')
    hdu.header.set('CDELT1',  pixsize, 'delta log10(Ang)')
    hdu.header.set('LOGLAM',  1, 'log10 spaced wavelengths?')
    hdu.header.set('AIRORVAC', 'vac', ' wavelengths in vacuum (vac) or air')
    hdu.header.set('VELSCALE', velpixsize, ' pixel size in km/s')
    hdu.header.set('WAVEUNIT', 'Angstrom', ' wavelength units')
    hdu.header.set('BUNIT', '1e-17 erg/s/cm2/A', ' flux unit')

    idval = range(totN)
    col0 = fits.Column(name='TEMPLATEID',format='K', array=idval)
    col1 = fits.Column(name='Z',format='E',array=final_z)
    cols = fits.ColDefs([col0, col1])
    tbhdu = fits.BinTableHDU.from_columns(cols)
    tbhdu.header.set('EXTNAME','METADATA')

    hdulist = fits.HDUList([hdu, tbhdu])
    hdulist.writeto(outfil, clobber=True)

    return final_wave, final_spec, final_z
コード例 #31
0
ファイル: arutils.py プロジェクト: adwasser/PYPIT
def bspline_fit(x,
                y,
                order=3,
                knots=None,
                everyn=20,
                xmin=None,
                xmax=None,
                w=None,
                bkspace=None):
    ''' bspline fit to x,y
    Should probably only be called from func_fit

    Parameters:
    ---------
    x: ndarray
    y: ndarray
    func: str
      Name of the fitting function:  polynomial, legendre, chebyshev, bspline
    deg: int 
      deg of the spline.  Default=3 (cubic)
    xmin: float, optional
      Minimum value in the array  [both must be set to normalize]
    xmax: float, optional
      Maximum value in the array  [both must be set to normalize]
    w: ndarray, optional
      weights to be used in the fitting (weights = 1/sigma)
    everyn: int 
      Knot everyn good pixels, if used
    bkspace: float 
      Spacing of breakpoints in units of x

    Returns:
    ---------
    fit_dict: dict  
      dict describing the bspline fit 
    '''
    #
    if w is None:
        ngd = x.size
        gd = np.arange(ngd)
        weights = None
    else:
        gd = np.where(w > 0.)[0]
        weights = w[gd]
    # Make the knots
    if knots is None:
        if bkspace is not None:
            xrnge = (np.max(x[gd]) - np.min(x[gd]))
            startx = np.min(x[gd])
            nbkpts = max(int(xrnge / bkspace) + 1, 2)
            tempbkspace = xrnge / (nbkpts - 1)
            knots = np.arange(1, nbkpts - 1) * tempbkspace + startx
        elif everyn is not None:
            idx_knots = np.arange(10, ngd - 10,
                                  everyn)  # A knot every good N pixels
            knots = x[gd[idx_knots]]
        else:
            msgs.error("No method specified to generate knots")
    # Generate spline
    try:
        tck = interpolate.splrep(x[gd], y[gd], w=weights, k=order, t=knots)
    except ValueError:  # Knot problem
        msgs.warn("Problem in the bspline knot")
        debugger.set_trace()
    return tck
コード例 #32
0
ファイル: tau_eff.py プロジェクト: LiuFang816/SALSTM_py_data
def lyman_ew(ilambda, zem, fN_model, NHI_MIN=11.5, NHI_MAX=22.0, N_eval=5000,
             bval=24., cosmo=None, debug=False, cumul=None,
             verbose=False, EW_spline=None, wrest=None):
    """ tau effective from HI Lyman series absorption

    Parameters
    ----------
    ilambda : float
        Observed wavelength (Ang)
    zem : float
        Emission redshift of the source [sets which Lyman lines are included]
    fN_model : FNModel
    NHI_MIN : float, optional
         -- Minimum log HI column for integration [default = 11.5]
    NHI_MAX : float, optional
         -- Maximum log HI column for integration [default = 22.0]
    N_eval : int, optional
      Number of NHI evaluations
    bval : float
         -- Characteristics Doppler parameter for the Lya forest
         -- [Options: 24, 35 km/s]
    cosmo : astropy.cosmology (None)
         -- Cosmological model to adopt (as needed)
    cumul : List of cumulative sums
         -- Recorded only if cumul is not None
    EW_spline : spline, optional
      Speeds up execution if input
    HI : LineList, optional
      HI line list.  Speeds up execution

    Returns
    -------
    teff : float
      Total effective opacity of all lines contributing

    ToDo:
      1. Parallelize the Lyman loop
    """
    # Cosmology
    if cosmo is None:
        cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
    # Lambda
    if not isinstance(ilambda, float):
        raise ValueError('tau_eff: ilambda must be a float for now')
    Lambda = ilambda
    if not isinstance(Lambda,u.quantity.Quantity):
        Lambda = Lambda * u.AA # Ang

    # Read in EW spline (if needed)
    if EW_spline is None:
        if int(bval) == 24:
            EW_FIL = pyigm_path+'/data/fN/EW_SPLINE_b24.yml'
            with open(EW_FIL, 'r') as infile:
                EW_spline = yaml.load(infile)  # dict from mk_ew_lyman_spline
        else:
            raise ValueError('tau_eff: Not ready for this bvalue %g' % bval)

    # Lines
    if wrest is None:
        HI = LineList('HI')
        wrest = HI._data['wrest']

    # Find the lines
    gd_Lyman = wrest[(Lambda/(1+zem)) < wrest]
    nlyman = len(gd_Lyman)
    if nlyman == 0:
        if verbose:
            print('igm.tau_eff: No Lyman lines covered at this wavelength')
        return 0

    # N_HI grid
    lgNval = NHI_MIN + (NHI_MAX-NHI_MIN)*np.arange(N_eval)/(N_eval-1) # Base 10
    dlgN = lgNval[1]-lgNval[0]
    Nval = 10.**lgNval
    teff_lyman = np.zeros(nlyman)

    # For cumulative
    if cumul is not None:
        cumul.append(lgNval)

    # Loop on the lines
    for qq, line in enumerate(gd_Lyman): # Would be great to do this in parallel...
                             # (Can pack together and should)
        # Redshift
        zeval = ((Lambda / line) - 1).value
        if zeval < 0.:
            teff_lyman[qq] = 0.
            continue
        # dxdz
        dxdz = pyigmu.cosm_xz(zeval, cosmo=cosmo, flg_return=1)

        # Get EW values (could pack these all together)
        idx = np.where(EW_spline['wrest']*u.AA == line)[0]
        if len(idx) != 1:
            raise ValueError('tau_eff: Line %g not included or over included?!' % line)
        restEW = interpolate.splev(lgNval, EW_spline['tck'][idx[0]], der=0)

        # dz
        dz = ((restEW*u.AA) * (1+zeval) / line).value

        # Evaluate f(N,X) at zeval
        log_fnX = fN_model.evaluate(lgNval, zeval, cosmo=cosmo).flatten()

        # Sum
        intgrnd = 10.**(log_fnX) * dxdz * dz * Nval
        teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.)
        if cumul is not None:
            cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.))

        # Debug
        if debug:
            try:
                from xastropy.xutils import xdebug as xdb
            except ImportError:
                break
            xdb.xplot(lgNval, np.log10(10.**(log_fnX) * dxdz * dz * Nval))
            #x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #writecol, 'debug_file'+strtrim(qq,2)+'.dat',  lgNval, restEW, log_fnX
            xdb.set_trace()

    # Return
    return np.sum(teff_lyman)
コード例 #33
0
ファイル: fit_boss_qsos.py プロジェクト: profxj/desisim
def do_sdss_lya_parallel(istart, iend, cut_Lya, output, debug=False):
    '''
    Generate PCA coeff for the SDSS DR7 dataset, 0.5<z<2

    Parameters
    ----------
    cut_Lya: boolean (True)
      Avoid using the Lya forest in the analysis
    '''
    # Eigen
    eigen, eigen_wave = read_qso_eigen()

    # Open the BOSS catalog file
    sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
    bcat_hdu = fits.open(sdss_cat_fil)
    t_sdss = bcat_hdu[1].data
    nqso = len(t_sdss)

    pca_val = np.zeros((iend-istart, 4))

    if cut_Lya is False:
        print('do_sdss: Not cutting the Lya Forest in the fit')

    # Loop us -- Should spawn on multiple CPU
    #for ii in range(nqso):
    datdir =  os.environ.get('SDSSPATH')+'/DR7_QSO/spectro/1d_26/'
    jj = 0
    for ii in range(istart,iend):
        if (ii % 1000) == 0:
            print('SDSS ii = {:d}'.format(ii))
        # Spectrum file
        pnm = str(t_sdss['PLATE'][ii]).rjust(4,str('0'))
        fnm = str(t_sdss['FIBERID'][ii]).rjust(3,str('0'))
        mjd = str(t_sdss['MJD'][ii])
        sfil = datdir+pnm+'/1d/spSpec-'
        sfil = sfil+mjd+'-'+pnm+'-'+fnm+'.fit.gz'
        # Read spectrum
        spec_hdu = fits.open(sfil)
        head = spec_hdu[0].header
        iwave = head['CRVAL1']
        cdelt = head['CD1_1']

        t = spec_hdu[0].data
        flux = t[0,:]
        sig = t[2,:]
        npix = len(flux)
        wave = 10.**(iwave + np.arange(npix)*cdelt)
        ivar = np.zeros(npix)
        gd = np.where(sig>0.)[0]
        ivar[gd] = 1./sig[gd]**2
        zqso = t_sdss['z'][ii]

        wrest  = wave / (1+zqso)
        wlya = 1215. 

        # Cut Lya forest?
        if cut_Lya is True:
            Ly_imn = np.argmin(np.abs(wrest-wlya))
        else:
            Ly_imn = 0
            
        # Pack
        imn = np.argmin(np.abs(wrest[Ly_imn]-eigen_wave))
        npix = len(wrest[Ly_imn:])
        imx = npix+imn
        eigen_flux = eigen[:,imn:imx]

        # FIT
        acoeff = fit_eigen(flux[Ly_imn:], ivar[Ly_imn:], eigen_flux)
        pca_val[jj,:] = acoeff
        jj += 1

        # Check
        if debug is True:
            model = np.dot(eigen.T,acoeff)
            if flg_xdb is True:
                xdb.xplot(wrest, flux, xtwo=eigen_wave, ytwo=model)
            xdb.set_trace()

    #xdb.set_trace()
    print('Done with my subset {:d}, {:d}'.format(istart,iend))
    if output is not None:
        output.put((istart,iend,pca_val))
        #output.put(None)
    else:
        return pca_val
コード例 #34
0
def deimos_targets(field, path=None):
    '''Generate files related to DEIMOS deimos_targets

    Parameters:
    -----------
    field: tuple
      (Name, ra, dec)

    Returns:
    ----------
    '''
    if path is None:
        path = '/Galx_Spectra/DEIMOS/'

    # Loop on Fields
    mask_path = field[0] + path + '/Masks/'
    # SExtractor targeting
    targetting_file = glob.glob(mask_path + '*targ.yaml')
    if len(targetting_file) == 1:
        sex_targ = parse_sex_file(field, targetting_file[0])
        sex_targ.add_column(Column(['DEIMOS'] * len(sex_targ), name='INSTR'))
        # Setup for mask matching
        sex_coord = SkyCoord(ra=sex_targ['TARG_RA'] * u.deg,
                             dec=sex_targ['TARG_DEC'] * u.deg)
        sex_msk_clms = {}
        cnames = ['MASK_NAME', 'MASK_ID']
        smsk = '--'
        msk_val = [smsk] * len(cnames)
        for kk, cname in enumerate(cnames):
            sex_msk_clms[cname] = [msk_val[kk]] * len(sex_targ)
    elif len(targetting_file) == 0:
        print(
            'WARNING: No SExtractor info for mask path {:s}'.format(mask_path))
        xdb.set_trace()
        sex_targ = None
    else:
        raise ValueError('Found multiple targ.yaml files!!')

    # Mask info
    all_masks = []
    all_masktarg = []
    all_obs = []
    files = glob.glob(mask_path + '*.out')
    for msk_file in files:
        print('Reading DEIMOS mask file: {:s}'.format(msk_file))
        # Parse
        mask_dict, targ_tab, obs_tab = parse_deimos_mask_file(msk_file)
        # Fill up SEx file
        if sex_targ is not None:
            for targ in targ_tab:
                targ_coord = xra.to_coord((targ['RAS'], targ['DECS']))
                sep = targ_coord.separation(sex_coord)
                isep = np.argmin(sep)
                if sep[isep] > 0.5 * u.arcsec:
                    raise ValueError('No match in SExtractor?!')
                else:  # Fill
                    if sex_msk_clms['MASK_NAME'][isep] == smsk:
                        sex_msk_clms['MASK_NAME'][isep] = mask_dict[
                            'MASK_NAME']
                    else:  # Already full
                        sex_targ.add_row(sex_targ[isep])
                        sex_msk_clms['MASK_NAME'].append(
                            mask_dict['MASK_NAME'])
        # Append
        all_masks.append(mask_dict)
        all_masktarg.append(targ_tab)
        all_obs.append(obs_tab)

    # Add columns to sex_targ
    for tt, cname in enumerate(cnames):
        # Mask
        mask = np.array([False] * len(sex_targ))
        bad = np.where(np.array(sex_msk_clms[cname]) == msk_val[tt])[0]
        if len(bad) > 0:
            mask[bad] = True
        #
        clm = MaskedColumn(sex_msk_clms[cname], name=cname, mask=mask)
        sex_targ.add_column(clm)
    # Return
    return sex_targ, all_masks, all_obs, all_masktarg
コード例 #35
0
ファイル: fit_boss_qsos.py プロジェクト: profxj/desisim
def do_boss_lya_parallel(istart, iend, cut_Lya, output, debug=False):
    '''
    Generate PCA coeff for the BOSS Lya DR10 dataset, v2.1

    Parameters
    ----------
    cut_Lya: boolean (True)
      Avoid using the Lya forest in the analysis
    '''
    # Eigen
    eigen, eigen_wave = read_qso_eigen()

    # Open the BOSS catalog file
    boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
    bcat_hdu = fits.open(boss_cat_fil)
    t_boss = bcat_hdu[1].data
    nqso = len(t_boss)

    pca_val = np.zeros((iend-istart, 4))

    if cut_Lya is False:
        print('do_boss: Not cutting the Lya Forest in the fit')

    # Loop us -- Should spawn on multiple CPU
    #for ii in range(nqso):
    datdir =  os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_spectra_v2.1/'
    jj = 0
    print('istart = {:d}'.format(istart))
    for ii in range(istart,iend):
        if (ii % 100) == 0:
            print('ii = {:d}'.format(ii))
        #print('ii = {:d}'.format(ii))
        # Spectrum file
        pnm = str(t_boss['PLATE'][ii])
        fnm = str(t_boss['FIBERID'][ii]).rjust(4,str('0'))
        mjd = str(t_boss['MJD'][ii])
        sfil = datdir+pnm+'/speclya-'
        sfil = sfil+pnm+'-'+mjd+'-'+fnm+'.fits.gz'
        # Read spectrum
        spec_hdu = fits.open(sfil)
        t = spec_hdu[1].data
        flux = t['flux']
        wave = 10.**t['loglam']
        ivar = t['ivar']
        zqso = t_boss['z_pipe'][ii]

        wrest  = wave / (1+zqso)
        wlya = 1215. 

        # Cut Lya forest?
        if cut_Lya is True:
            Ly_imn = np.argmin(np.abs(wrest-wlya))
        else:
            Ly_imn = 0
            
        # Pack
        imn = np.argmin(np.abs(wrest[Ly_imn]-eigen_wave))
        npix = len(wrest[Ly_imn:])
        imx = npix+imn
        eigen_flux = eigen[:,imn:imx]


        # FIT
        tflux = flux[Ly_imn:]
        tivar = ivar[Ly_imn:]
        acoeff = fit_eigen(tflux, ivar, eigen_flux)
        pca_val[jj,:] = acoeff
        jj += 1

        # Check
        if debug is True:
            model = np.dot(eigen.T,acoeff)
            if flg_xdb is True:
                xdb.xplot(wrest, flux, xtwo=eigen_wave, ytwo=model)
            xdb.set_trace()


    #xdb.set_trace()
    print('Done with my subset {:d}, {:d}'.format(istart,iend))
    if output is not None:
        output.put((istart,iend,pca_val))
        #output.put(None)
    else:
        return pca_val
コード例 #36
0
 def load_low_kin(self):
     # Grab spectrum from ions
     xdb.set_trace()
     out_kin = xkin.orig_kin(spec, vmnx)
コード例 #37
0
ファイル: lick.py プロジェクト: banados/xastropy
def wiki(targs, keys, fndr_pth=None, dbx_pth=None, outfil=None, skip_finder=False):
    """
    Generate a Wiki table for Lick observing.
    Should work for any of the Wiki pages

    Parameters:
    ----------
    targs: Table (RA, DEC keys required)
    keys: List
      List of keys to include in the Table + order
    fndr_pth: string
      Folder for finder charts
    dbx_pth: string
      Dropbox path for the finders
    skip_finder: False
      Skip making the finders

    Writes a file to disk that can be pasted into the Wiki
    """
    reload(x_finder)
    # Outfil
    if outfil is None:
        outfil = 'tmp_wiki.txt'
    f = open(outfil, 'w')

    # Finders?
    if not fndr_pth is None:
        if dbx_pth is None:
            dbx_pth = './'
            dbx_folder = './'
        else: # Expecting Public
            ifind = dbx_pth.find('Observing/')
            if ifind == -1:
                xdb.set_trace()
            else:
                dbx_folder = os.getenv('DROPBOX_DIR')+'/Public/'+dbx_pth[ifind:]
        #
        print('lick.wiki: Will copy finders to {:s}'.format(dbx_folder))
        # Get name tag
        name_tag = get_name_tag(targs.dtype.names)
        # Type
        #if isinstance(targs['RA'][0], basestring):
        #    radec = 1 # : separated strings
        #else:
        #    radec = 2 # decimal degrees
        # Finders
        fndr_files = []
        for targ in targs:
            # Finder
            #xdb.set_trace()
            if not skip_finder:
                x_finder.main([targ[name_tag], targ['RA'], targ['DEC']], fpath=fndr_pth)
            # Copy? + Save
            fil1 = fndr_pth+targ[name_tag]+'.pdf'
            fil2 = dbx_folder
            if not skip_finder:
                subprocess.call(["cp", fil1, dbx_folder])
            fndr_files.append(dbx_pth+targ[name_tag]+'.pdf')
        
    # Header
    lin = '||' 
    for key in keys:
        lin = lin+str(key)+'||'
    if 'fndr_files' in locals():
        lin=lin+'finder||comment||'
    f.write(str(lin+'\n'))
    
    # Targets
    for ii,targ in enumerate(targs):
        lin = '||' 
        for key in keys:
            lin = lin+str(targ[key])+'||'
        # Finder chart
        if 'fndr_files' in locals():
            lin = lin+'['+fndr_files[ii]+' pdf_finder]|| ||' # Lick formatting is different
        # Write
        f.write(str(lin+'\n'))

    # Close
    print('lick.wiki: Wrote {:s}'.format(outfil))
    f.close()
コード例 #38
0
ファイル: arload.py プロジェクト: ntejos/PYPIT
def load_headers(argflag, spect, datlines):
    """
    Load the header information for each fits file

    Parameters
    ----------
    argflag : dict
      Arguments and flags used for reduction
    spect : dict
      Properties of the spectrograph.
      If None, spect will be created, otherwise spect
      will be updated.
    datlines : list
      Input (uncommented) lines specified by the user.
      datlines contains the full data path to every
      raw exposure listed by the user.

    Returns
    -------
    spect : dict
      Loaded or updated properties of the spectrograph
    """
    chks = spect['check'].keys()
    keys = spect['keyword'].keys()
    fitsdict = dict({'directory': [], 'filename': [], 'utc': []})
    whddict = dict({})
    for k in keys:
        fitsdict[k]=[]
    headarr = [None for k in range(spect['fits']['numhead'])]
    for i in range(len(datlines)):
        # Try to open the fits file
        try:
            for k in range(spect['fits']['numhead']):
                headarr[k] = pyfits.getheader(datlines[i], ext=spect['fits']['headext{0:02d}'.format(k+1)])
                whddict['{0:02d}'.format(spect['fits']['headext{0:02d}'.format(k+1)])] = k
        except:
            msgs.error("Error reading header from extension {0:d} of file:".format(spect['fits']['headext{0:02d}'.format(k+1)])+msgs.newline()+datlines[i])
        # Perform checks on each fits files, as specified in the settings.instrument file.
        skip = False
        for ch in chks:
            tfrhd = int(ch.split('.')[0])-1
            kchk  = '.'.join(ch.split('.')[1:])
            frhd  = whddict['{0:02d}'.format(tfrhd)]
            if spect['check'][ch] != str(headarr[frhd][kchk]).strip():
                #print ch, frhd, kchk
                #print spect['check'][ch], str(headarr[frhd][kchk]).strip()
                msgs.warn("The following file:"+msgs.newline()+datlines[i]+msgs.newline()+"is not taken with the settings.{0:s} detector".format(argflag['run']['spectrograph'])+msgs.newline()+"Remove this file, or specify a different settings file.")
                msgs.warn("Skipping the file..")
                skip = True
        if skip:
            continue
        # Now set the key values for each of the required keywords
        dspl = datlines[i].split('/')
        fitsdict['directory'].append('/'.join(dspl[:-1])+'/')
        fitsdict['filename'].append(dspl[-1])
        # Attempt to load a UTC
        utcfound = False
        for k in range(spect['fits']['numhead']):
            if 'UTC' in headarr[k].keys():
                utc = headarr[k]['UTC']
                utcfound = True
                break
            elif 'UT' in headarr[k].keys():
                utc = headarr[k]['UT']
                utcfound = True
                break
        if utcfound:
            fitsdict['utc'].append(utc)
        else:
            fitsdict['utc'].append(None)
            msgs.warn("UTC is not listed as a header keyword in file:"+msgs.newline()+datlines[i])
        # Read binning-dependent detector properties here? (maybe read speed too)
        #if argflag['run']['spectrograph'] in ['lris_blue']:
        #    arlris.set_det(fitsdict, headarr[k])
        # Now get the rest of the keywords
        for kw in keys:
            if spect['keyword'][kw] is None:
                value = str('None')  # This instrument doesn't have/need this keyword
            else:
                ch = spect['keyword'][kw]
                try:
                    tfrhd = int(ch.split('.')[0])-1
                except ValueError:
                    value = ch  # Keyword given a value. Only a string allowed for now
                else:
                    frhd = whddict['{0:02d}'.format(tfrhd)]
                    kchk = '.'.join(ch.split('.')[1:])
                    try:
                        value = headarr[frhd][kchk]
                    except KeyError: # Keyword not found in header
                        msgs.warn("{:s} keyword not in header. Setting to None".format(kchk))
                        value=str('None')
            # Convert the input time into hours
            if kw == 'time':
                if spect['fits']['timeunit']   == 's'  : value = float(value)/3600.0    # Convert seconds to hours
                elif spect['fits']['timeunit'] == 'm'  : value = float(value)/60.0      # Convert minutes to hours
                elif spect['fits']['timeunit'] in Time.FORMATS.keys() : # Astropy time format
                    if spect['fits']['timeunit'] in ['mjd']:
                        ival = float(value)
                    else:
                        ival = value
                    tval = Time(ival, scale='tt', format=spect['fits']['timeunit'])
                    # dspT = value.split('T')
                    # dy,dm,dd = np.array(dspT[0].split('-')).astype(np.int)
                    # th,tm,ts = np.array(dspT[1].split(':')).astype(np.float64)
                    # r=(14-dm)/12
                    # s,t=dy+4800-r,dm+12*r-3
                    # jdn = dd + (153*t+2)/5 + 365*s + s/4 - 32083
                    # value = jdn + (12.-th)/24 + tm/1440 + ts/86400 - 2400000.5  # THIS IS THE MJD
                    value = tval.mjd * 24.0 # Put MJD in hours
                else:
                    msgs.error('Bad time unit')
            # Put the value in the keyword
            typv = type(value)
            if typv is int or typv is np.int_:
                fitsdict[kw].append(value)
            elif typv is float or typv is np.float_:
                fitsdict[kw].append(value)
            elif isinstance(value, basestring) or typv is np.string_:
                fitsdict[kw].append(value.strip())
            else:
                debugger.set_trace()
                msgs.bug("I didn't expect useful headers to contain type {0:s}".format(typv).replace('<type ','').replace('>',''))

        if argflag['out']['verbose'] == 2: msgs.info("Successfully loaded headers for file:"+msgs.newline()+datlines[i])
    del headarr
    # Convert the fitsdict arrays into numpy arrays
    for k in fitsdict.keys(): fitsdict[k] = np.array(fitsdict[k])
    msgs.info("Headers loaded for {0:d} files successfully".format(len(datlines)))
    return fitsdict
コード例 #39
0
ファイル: arsciexp.py プロジェクト: adwasser/PYPIT
    def MasterTrace(self, fitsdict, det):
        """
        Generate Master Trace frame for a given detector

        Parameters
        ----------
        fitsdict : dict
          Contains relevant information from fits header files
        det : int
          Index of the detector

        Returns
        -------
        boolean : bool
          Should other ScienceExposure classes be updated?
        """

        # If the master trace is already made, use it
        if self._mstrace[det-1] is not None:
            msgs.info("An identical master trace frame already exists")
            return False
        if self._argflag['reduce']['usetrace'] in ['trace', 'blzflat']:
            if self._argflag['masters']['use']:
                # Attempt to load the Master Frame
                mstrace_name = armasters.master_name(self._argflag['run']['masterdir'],
                                                   'trace', self._argflag['masters']['setup'])
                try:
                    mstrace, head = arload.load_master(mstrace_name, frametype="trace")
                except IOError:
                    msgs.warn("No MasterTrace frame found {:s}".format(mstrace_name))
                else:
                    # Extras
                    lordloc, _ = arload.load_master(mstrace_name, frametype="trace", exten=1)
                    rordloc, _ = arload.load_master(mstrace_name, frametype="trace", exten=2)
                    pixcen, _ = arload.load_master(mstrace_name, frametype="trace", exten=3)
                    pixwid, _ = arload.load_master(mstrace_name, frametype="trace", exten=4)
                    lordpix, _ = arload.load_master(mstrace_name, frametype="trace", exten=5)
                    rordpix, _ = arload.load_master(mstrace_name, frametype="trace", exten=6)
                    self.SetFrame(self._lordloc, lordloc, det)
                    self.SetFrame(self._rordloc, rordloc, det)
                    self.SetFrame(self._pixcen, pixcen.astype(np.int), det)
                    self.SetFrame(self._pixwid, pixwid.astype(np.int), det)
                    self.SetFrame(self._lordpix, lordpix.astype(np.int), det)
                    self.SetFrame(self._rordpix, rordpix.astype(np.int), det)
                    #
                    self._argflag['masters']['loaded'].append('trace'+self._argflag['masters']['setup'])
            if 'trace'+self._argflag['masters']['setup'] not in self._argflag['masters']['loaded']:
                msgs.info("Preparing a master trace frame with {0:s}".format(self._argflag['reduce']['usetrace']))
                ind = self._idx_trace
                # Load the frames for tracing
                frames = arload.load_frames(self, fitsdict, ind, det, frametype='trace', msbias=self._msbias[det-1],
                                            trim=self._argflag['reduce']['trim'], transpose=self._transpose)
                if self._argflag['reduce']['flatmatch'] > 0.0:
                    sframes = arsort.match_frames(frames, self._argflag['reduce']['flatmatch'], msgs, frametype='trace', satlevel=self._spect['det'][det-1]['saturation']*self._spect['det'][det-1]['nonlinear'])
                    subframes = np.zeros((frames.shape[0], frames.shape[1], len(sframes)))
                    numarr = np.array([])
                    for i in xrange(len(sframes)):
                        numarr = np.append(numarr, sframes[i].shape[2])
                        mstrace = arcomb.comb_frames(sframes[i], det, spect=self._spect, frametype='trace', **self._argflag['trace']['comb'])
                        subframes[:,:,i] = mstrace.copy()
                    del sframes
                    # Combine all sub-frames
                    mstrace = arcomb.comb_frames(subframes, det, spect=self._spect, frametype='trace', weights=numarr, **self._argflag['trace']['comb'])
                    del subframes
                else:
                    mstrace = arcomb.comb_frames(frames, det, spect=self._spect, frametype='trace', **self._argflag['trace']['comb'])
                del frames
        elif self._argflag['reduce']['usetrace'] == 'science':
            msgs.error("Tracing with a science frame is not yet implemented")
        else: # It must be the name of a file the user wishes to load
            mstrace_name = self._argflag['run']['masterdir']+'/'+self._argflag['reduce']['usetrace']
            mstrace, head = arload.load_master(mstrace_name, frametype=None)
            debugger.set_trace()  # NEED TO LOAD EXTRAS AS ABOVE
        # Set and then delete the Master Trace frame
        self.SetMasterFrame(mstrace, "trace", det)
        del mstrace
        return True
コード例 #40
0
    def get_zpeak(self):
        ''' Measure zpeak from an ionic transition
        '''
        if self.ions is None:
            print('get_zpeak: Need to fill ions with get_ions first.')
            return

        # Ions for analysis
        low_ions = [ (14,2), (6,2), (13,2), (26,2), (13,3)]
        high_ions= [(14,4), (6,4)]

        for tt in range(4):
            if tt == 0:
                ions = low_ions
                iflg = 1 # Standard
            elif tt == 1:
                ions = low_ions
                iflg = 2 # Saturated
            elif tt == 2:
                ions = high_ions
                iflg = 1 # Standard
            elif tt == 3:
                ions = high_ions
                iflg = 2 # Standard
            else:
                raise ValueError('Bad value')

            # Search 
            for ion in ions:
                try:
                    t = self.ions[ion]
                except KeyError:
                    continue
                # Measurement?
                if t['flg_clm'] == iflg:
                # Identify the transition
                    gdi = np.where( (self.ions.trans['Z'] == ion[0]) &
                                (self.ions.trans['ion'] == ion[1]) &
                                (self.ions.trans['flg_clm'] <= iflg) )[0]
                    # Take the first one
                    gdt = self.ions.trans[gdi[0]]
                    wrest = gdt['wrest']
                    flgs = self.clm_analy.clm_lines[wrest].analy['FLAGS']
                    spec_file = self.clm_analy.fits_files[flgs[1] % 64]
                    # Generate an Abs_Line with spectrum
                    line = abs_line.Abs_Line(wrest, z=self.clm_analy.zsys, spec_file=spec_file)
                    # vpeak
                    from astropy.relativity import velocities as arv
                    vpeak = line.vpeak()
                    self.zpeak = arv.z_from_v(self.clm_analy.zsys, vpeak)
                    if tt == 3:
                        print('zpeak WARNING: Using saturated high-ions!!')
                    break
            else:
                continue
            # get out
            break

        # Error catching
        if self.zpeak is None:
            # Skip primordial LLS
            print('lls_utils.zpeak: No transition in {:s}'.format(self.clm_analy.clm_fil))
            xdb.set_trace()
            return (0,0), 0.
        # Return
        return ion, vpeak
コード例 #41
0
ファイル: lls_literature.py プロジェクト: ntejos/xastropy
def jenkins2005():
    '''Jenkins, E. et al. 2005, ApJ, 2005, 623, 767
    PHL 1811
    HST/STIS, FUSE
    Metals parsed from Table 1
      OI taken from text
      Had to input error on columns by hand (JXP)
    Total NHI from Lyman series. see Fig 3
    M/H from O/H
    '''
    # Grab ASCII file from ApJ
    tab_fil = xa_path + "/data/LLS/jenkins2005.tb1.ascii"
    chk_fil = glob.glob(tab_fil)
    if len(chk_fil) > 0:
        tab_fil = chk_fil[0]
    else:
        url = 'http://iopscience.iop.org/0004-637X/623/2/767/fulltext/61520.tb1.txt'
        print('LLSSurvey: Grabbing table file from {:s}'.format(url))
        f = urllib2.urlopen(url)
        with open(tab_fil, "wb") as code:
            code.write(f.read())
    # Setup
    radec = xor.stod1('J215501.5152-092224.688')  # SIMBAD
    lls = LLSSystem(name='PHL1811_z0.081',
                    RA=radec[0],
                    Dec=radec[1],
                    zem=0.192,
                    zabs=0.080923,
                    vlim=[-100., 100.] * u.km / u.s,
                    NHI=17.98,
                    MH=-0.19,
                    sigNHI=np.array([0.05, 0.05]))

    # AbsLines
    ism = LineList('ISM')
    Nsig = {
        'C IV': 0.4,
        'N II': 0.4,
        'Si II': 0.05,
        'Si IV': 0.25,
        'S II': 0.2,
        'Fe II': 0.12,
        'H I': 0.05,
        'S III': 0.06
    }

    # Parse Table
    with open(tab_fil, 'r') as f:
        flines = f.readlines()
    ion_dict = {}
    for iline in flines:
        iline = iline.strip()
        if (len(iline) == 0):
            continue
        # Split on tabs
        isplit = iline.split('\t')
        # Offset?
        ioff = 0
        if isplit[0][0] in ['1', '2']:
            ioff = -1
        # Catch bad lines
        if (isplit[1 + ioff][0:6]
                in ['1442.0', '1443.7',
                    '1120.9']):  # Skip goofy CII line and CII*
            continue
        if len(isplit[2 + ioff]) == 0:
            continue
        # Ion
        if (len(isplit[0].strip()) > 0) & (isplit[0][0] not in ['1', '2']):
            ionc = isplit[0].strip()
            try:
                Zion = xai.name_ion(ionc)
            except KeyError:
                xdb.set_trace()
        # Generate the Line
        try:
            newline = AbsLine(float(isplit[2 + ioff]) * u.AA,
                              linelist=ism,
                              closest=True)
        except ValueError:
            xdb.set_trace()
        newline.attrib['z'] = lls.zabs
        # Spectrum
        newline.analy['datafile'] = 'STIS' if 'S' in isplit[1] else 'FUSE'
        # EW
        try:
            EWvals = isplit[4 + ioff].split(' ')
        except IndexError:
            xdb.set_trace()
        newline.attrib['EW'] = float(EWvals[0]) * u.AA / 1e3
        newline.attrib['EWsig'] = float(EWvals[2]) * u.AA / 1e3
        newline.attrib['flgEW'] = 1
        if len(isplit) < (5 + ioff + 1):
            continue
        # Colm?
        #xdb.set_trace()
        if (len(isplit[5 + ioff].strip()) > 0) & (isplit[5 + ioff].strip() !=
                                                  '\\ldots'):
            if isplit[5 + ioff][0] == '\\':
                ipos = isplit[5 + ioff].find(' ')
                newline.attrib['N'] = float(isplit[5 + ioff][ipos + 1:])
                newline.attrib['flagN'] = 2
            elif isplit[5 + ioff][0] == '<':
                ipos = 0
                newline.attrib['N'] = float(isplit[5 + ioff][ipos + 1:])
                newline.attrib['flagN'] = 3
            elif isplit[5 + ioff][0] == '1':
                try:
                    newline.attrib['N'] = float(isplit[5 + ioff][0:5])
                except ValueError:
                    xdb.set_trace()
                newline.attrib['flagN'] = 1
                try:
                    newline.attrib['Nsig'] = Nsig[ionc]
                except KeyError:
                    print('No error for {:s}'.format(ionc))
            else:
                raise ValueError('Bad character')
            # ion_dict
            ion_dict[ionc] = dict(clm=newline.attrib['N'],
                                  sig_clm=newline.attrib['Nsig'],
                                  flg_clm=newline.attrib['flagN'],
                                  Z=Zion[0],
                                  ion=Zion[1])
        # Append
        lls.lines.append(newline)
    # Fix NI, OI
    ion_dict['O I']['clm'] = 14.47
    ion_dict['O I']['sig_clm'] = 0.05
    ion_dict['N I']['flg_clm'] = 3
    lls._ionclms = IonClms(idict=ion_dict)

    lls.Refs.append('Jen05')
    # Return
    return lls
コード例 #42
0
def main(*args, **kwargs):
    """ Runs the AbsKinGui

    Command line
    or from Python
    Examples:
      1.  python ~/xastropy/xastropy/xguis/abskingui.py
      2.  abskingui.main(filename)
      3.  abskingui.main(spec1d)
    """
    import sys
    import argparse
    from specutils import Spectrum1D

    parser = argparse.ArgumentParser(description='Parse for AbsKingGui')
    parser.add_argument("file", type=str, help="Spectral file")
    parser.add_argument("-zsys", type=float, help="System Redshift")
    parser.add_argument("-outfil", type=str, help="Output filename")
    parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
                        action="store_true")

    if len(args) == 0:
        pargs = parser.parse_args()
    else: # better know what you are doing!
        if isinstance(args[0],(Spectrum1D, tuple)):
            if not kwargs['rerun']:
                app = QtGui.QApplication(sys.argv)
            xdb.set_trace()
            gui = AbsKinGui(args[0], **kwargs)
            gui.exec_()
            #gui.show()
            #app.exec_()
            return gui, app
        else: # String parsing
            largs = [iargs for iargs in args]
            pargs = parser.parse_args(largs)
            xdb.set_trace() # Not setup for command line yet

    # Normalized?
    norm = True
    if pargs.un_norm:
        norm = False

    # z
    try:
        zsys = pargs.zsys
    except AttributeError:
        zsys=None

    # z
    try:
        outfil = pargs.outfil
    except AttributeError:
        outfil=None

    app = QtGui.QApplication(sys.argv)
    gui = AbsKinGui(pargs.file, z=zsys, norm=norm, outfil=outfil)
    gui.show()
    app.exec_()

    return gui, app
コード例 #43
0
ファイル: lls_literature.py プロジェクト: ntejos/xastropy
def tripp2005():
    '''Tripp, T. et al. 2005, ApJ, 2005, 619, 714
    PG 1216+069 (LLS in Virgo)
    HST/STIS, FUSE
    Metal columns parsed from Tables 2 and 3
    Total NHI from damping wings
    M/H from O/H
    '''
    # Grab ASCII files from ApJ
    tab_fils = [
        xa_path + "/data/LLS/tripp2005.tb3.ascii",
        xa_path + "/data/LLS/tripp2005.tb2.ascii"
    ]
    urls = [
        'http://iopscience.iop.org/0004-637X/619/2/714/fulltext/60797.tb3.txt',
        'http://iopscience.iop.org/0004-637X/619/2/714/fulltext/60797.tb2.txt'
    ]
    for jj, tab_fil in enumerate(tab_fils):
        chk_fil = glob.glob(tab_fil)
        if len(chk_fil) > 0:
            tab_fil = chk_fil[0]
        else:
            url = urls[jj]
            print('LLSSurvey: Grabbing table file from {:s}'.format(url))
            f = urllib2.urlopen(url)
            with open(tab_fil, "wb") as code:
                code.write(f.read())
    # Setup
    radec = xor.stod1('J121920.9320+063838.476')  # SIMBAD
    lls = LLSSystem(name='PG1216+069_z0.006',
                    RA=radec[0],
                    Dec=radec[1],
                    zem=0.3313,
                    zabs=0.00632,
                    vlim=[-100., 100.] * u.km / u.s,
                    NHI=19.32,
                    MH=-1.6,
                    sigNHI=np.array([0.03, 0.03]))
    #lls.mk_subsys(2)

    # Columns
    # Start with Table 3 (VPFIT)
    with open(tab_fils[0], 'r') as f:
        flines3 = f.readlines()
    ion_dict = {}
    for iline in flines3:
        if (len(iline.strip()) == 0):
            continue
        isplit = iline.split('\t')
        # Ion
        flg = 2
        if (len(isplit[0].strip()) > 0):  # & (isplit[0][0] not in ['1','2']):
            ipos = isplit[0].find('1')
            ionc = isplit[0][0:ipos - 1].strip()
            try:
                Zion = xai.name_ion(ionc)
            except KeyError:
                xdb.set_trace()
            flg = 1
        # Column
        csplit = isplit[3].split(' ')
        clm = float(csplit[0])
        sig = float(csplit[2])
        if flg == 1:
            ion_dict[ionc] = dict(clm=clm,
                                  sig_clm=sig,
                                  flg_clm=1,
                                  Z=Zion[0],
                                  ion=Zion[1])
        else:  # Add it in
            tmp_dict = dict(clm=clm,
                            sig_clm=sig,
                            flg_clm=1,
                            Z=Zion[0],
                            ion=Zion[1])
            logN, siglogN = xiai.sum_logN(ion_dict[ionc], tmp_dict)
            ion_dict[ionc]['clm'] = logN
            ion_dict[ionc]['sig_clm'] = siglogN
    ions = ion_dict.keys()

    # Now Table 2 for the extras
    with open(tab_fils[1], 'r') as f:
        flines2 = f.readlines()
    # Trim the first 10 lines
    flines2 = flines2[10:]
    # Loop
    for iline in flines2:
        isplit = iline.split('\t')
        #
        ionc = isplit[0].strip()
        if (len(ionc) == 0) or (ionc in ions):
            continue
        #
        Zion = xai.name_ion(ionc)
        ion_dict[ionc] = dict(Z=Zion[0], ion=Zion[1], sig_clm=0.)
        if isplit[4][0] == '<':
            ion_dict[ionc]['clm'] = float(isplit[4][1:])
            ion_dict[ionc]['flg_clm'] = 3
        else:
            raise ValueError('Should not get here')

    # Finish
    lls._ionclms = IonClms(idict=ion_dict)
    lls.Refs.append('Tri05')
    return lls
コード例 #44
0
ファイル: fit_boss_qsos.py プロジェクト: profxj/desisim
def do_sdss_lya_parallel(istart, iend, cut_Lya, output, debug=False):
    '''
    Generate PCA coeff for the SDSS DR7 dataset, 0.5<z<2

    Parameters
    ----------
    cut_Lya: boolean (True)
      Avoid using the Lya forest in the analysis
    '''
    # Eigen
    eigen, eigen_wave = read_qso_eigen()

    # Open the BOSS catalog file
    sdss_cat_fil = os.environ.get('SDSSPATH') + '/DR7_QSO/dr7_qso.fits.gz'
    bcat_hdu = fits.open(sdss_cat_fil)
    t_sdss = bcat_hdu[1].data
    nqso = len(t_sdss)

    pca_val = np.zeros((iend - istart, 4))

    if cut_Lya is False:
        print('do_sdss: Not cutting the Lya Forest in the fit')

    # Loop us -- Should spawn on multiple CPU
    #for ii in range(nqso):
    datdir = os.environ.get('SDSSPATH') + '/DR7_QSO/spectro/1d_26/'
    jj = 0
    for ii in range(istart, iend):
        if (ii % 1000) == 0:
            print('SDSS ii = {:d}'.format(ii))
        # Spectrum file
        pnm = str(t_sdss['PLATE'][ii]).rjust(4, str('0'))
        fnm = str(t_sdss['FIBERID'][ii]).rjust(3, str('0'))
        mjd = str(t_sdss['MJD'][ii])
        sfil = datdir + pnm + '/1d/spSpec-'
        sfil = sfil + mjd + '-' + pnm + '-' + fnm + '.fit.gz'
        # Read spectrum
        spec_hdu = fits.open(sfil)
        head = spec_hdu[0].header
        iwave = head['CRVAL1']
        cdelt = head['CD1_1']

        t = spec_hdu[0].data
        flux = t[0, :]
        sig = t[2, :]
        npix = len(flux)
        wave = 10.**(iwave + np.arange(npix) * cdelt)
        ivar = np.zeros(npix)
        gd = np.where(sig > 0.)[0]
        ivar[gd] = 1. / sig[gd]**2
        zqso = t_sdss['z'][ii]

        wrest = wave / (1 + zqso)
        wlya = 1215.

        # Cut Lya forest?
        if cut_Lya is True:
            Ly_imn = np.argmin(np.abs(wrest - wlya))
        else:
            Ly_imn = 0

        # Pack
        imn = np.argmin(np.abs(wrest[Ly_imn] - eigen_wave))
        npix = len(wrest[Ly_imn:])
        imx = npix + imn
        eigen_flux = eigen[:, imn:imx]

        # FIT
        acoeff = fit_eigen(flux[Ly_imn:], ivar[Ly_imn:], eigen_flux)
        pca_val[jj, :] = acoeff
        jj += 1

        # Check
        if debug is True:
            model = np.dot(eigen.T, acoeff)
            if flg_xdb is True:
                xdb.xplot(wrest, flux, xtwo=eigen_wave, ytwo=model)
            xdb.set_trace()

    #xdb.set_trace()
    print('Done with my subset {:d}, {:d}'.format(istart, iend))
    if output is not None:
        output.put((istart, iend, pca_val))
        #output.put(None)
    else:
        return pca_val
コード例 #45
0
    def teff_ll(self, z912, zem, N_eval=5000, cosmo=None):
        """ Calculate teff_LL 
        Effective opacity from LL absorption at z912 from zem

        Parameters:
        z912: float
          Redshift for evaluation
        zem: float
          Redshift of source
        cosmo: astropy.cosmology (None)
          Cosmological model to adopt (as needed)
        N_eval: int (5000)
          Discretization parameter

        Returns:
        zval, teff_LL: array
          z values and Effective opacity from LL absorption from z912 to zem

        JXP 10 Nov 2014
        """
        # Imports
        from astropy import constants as const

        # NHI array
        lgNval = 11.5 + 10.5 * np.arange(N_eval) / (
            N_eval - 1.)  #; This is base 10 [Max at 22]
        dlgN = lgNval[1] - lgNval[0]
        Nval = 10.**lgNval

        #; z array
        zval = z912 + (zem - z912) * np.arange(N_eval) / (N_eval - 1.)
        dz = np.fabs(zval[1] - zval[0])

        teff_LL = np.zeros(N_eval)

        # dXdz
        dXdz = igmu.cosm_xz(zval, cosmo=cosmo, flg=1)
        #if keyword_set(FNZ) then dXdz = replicate(1.,N_eval)

        # Evaluate f(N,X)
        velo = (zval - zem) / (1 + zem) * (const.c.cgs.value / 1e5
                                           )  # Kludge for eval [km/s]

        log_fnX = self.eval(lgNval, zem, vel_array=velo)
        log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz))

        # Evaluate tau(z,N)
        teff_engy = (const.Ryd.to(u.eV, equivalencies=u.spectral()) /
                     ((1 + zval) / (1 + zem)))
        sigma_z = xai.photo_cross(1, 1, teff_engy)
        #xdb.set_trace()
        #sigma_z = teff_cross * ((1+zval)/(1+zem))**(2.75)  # Not exact but close
        tau_zN = np.outer(Nval, sigma_z)

        # Integrand
        intg = 10.**(log_fnz) * (1. - np.exp(-1. * tau_zN))

        # Sum
        sumz_first = False
        if sumz_first == False:
            #; Sum in N first
            N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)),
                              0) * dlgN * np.log(10.)
            #xdb.set_trace()
            # Sum in z
            teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz
        #xdb.set_trace()

        # Debug
        debug = False
        if debug == True:
            #        x_splot, lgNval, alog10(10.d^(log_fnX) * dxdz * dz * Nval), /bloc
            #        x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #     printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #     writecol, 'debug_file'+strtrim(qq,2)+'.dat', $
            #               lgNval, restEW, log_fnX
            xdb.set_trace()
        # Return
        return zval, teff_LL
コード例 #46
0
ファイル: fit_boss_qsos.py プロジェクト: profxj/desisim
def failed_parallel():
    '''
    Collision with np.dot
    Might fix with  OPENBLAS_NUM_THREADS=1
    '''
    flg = 0  # 0=BOSS, 1=SDSS

    ## ############################
    # Parallel
    if flg == 0:
        boss_cat_fil = os.environ.get(
            'BOSSPATH') + '/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
        bcat_hdu = fits.open(boss_cat_fil)
        t_boss = bcat_hdu[1].data
        nqso = len(t_boss)
    elif flg == 1:
        sdss_cat_fil = os.environ.get('SDSSPATH') + '/DR7_QSO/dr7_qso.fits.gz'
        scat_hdu = fits.open(sdss_cat_fil)
        t_sdss = scat_hdu[1].data
        nqso = len(t_sdss)
        outfil = 'SDSS_DR7Lya_PCA_values_nocut.fits'

    nqso = 40  # Testing

    #do_boss_lya_parallel(0,nqso, False, None,debug=False)

    output = mp.Queue()
    processes = []
    nproc = 1
    nsub = nqso // nproc

    cut_Lya = False

    # Setup the Processes
    for ii in range(nproc):
        # Generate
        istrt = ii * nsub
        if ii == (nproc - 1):
            iend = nqso
        else:
            iend = (ii + 1) * nsub
        #xdb.set_trace()
        if flg == 0:
            process = mp.Process(target=do_boss_lya_parallel,
                                 args=(istrt, iend, cut_Lya, output))
        elif flg == 1:
            process = mp.Process(target=do_sdss_lya_parallel,
                                 args=(istrt, iend, cut_Lya, output))
        processes.append(process)

    # Run processes
    for p in processes:
        p.start()

    print('Grabbing Output')
    results = [output.get() for p in processes]

    # Get process results from the output queue
    # Exit the completed processes
    print('Joining')
    for p in processes:
        p.join()

    xdb.set_trace()
    # Bring together
    #sorted(results, key=lambda result: result[0])
    #all_is = [ir[0] for ir in results]
    pca_val = np.zeros((nqso, 4))
    for ir in results:
        pca_val[ir[0]:ir[1], :] = ir[2]

    # Write to disk as a binary FITS table
    col0 = fits.Column(name='PCA0', format='E', array=pca_val[:, 0])
    col1 = fits.Column(name='PCA1', format='E', array=pca_val[:, 1])
    col2 = fits.Column(name='PCA2', format='E', array=pca_val[:, 2])
    col3 = fits.Column(name='PCA3', format='E', array=pca_val[:, 3])
    cols = fits.ColDefs([col0, col1, col2, col3])
    tbhdu = fits.BinTableHDU.from_columns(cols)

    prihdr = fits.Header()
    prihdr['OBSERVER'] = 'Edwin Hubble'
    prihdr['COMMENT'] = "Here's some commentary about this FITS file."
    prihdu = fits.PrimaryHDU(header=prihdr)

    thdulist = fits.HDUList([prihdu, tbhdu])
    if not (outfil in locals()):
        if cut_Lya is False:
            outfil = 'BOSS_DR10Lya_PCA_values_nocut.fits'
        else:
            outfil = 'BOSS_DR10Lya_PCA_values.fits'
    thdulist.writeto(outfil, clobber=True)

    # Done
    #xdb.set_trace()
    print('All done')
コード例 #47
0
def hecto_targets(field, obs_path, hecto_path=None):
    '''Read files related to Hectospec targets

    Parameters:
    -----------
    field : tuple
      (Name, ra, dec)
    obs_path : str, optional
      Path to the observing tree
    hecto_path : str, optional
      Path within the file tree to Hectospec data

    Returns:
    ----------
    Target and observing info 
    '''
    if hecto_path is None:
        hecto_path = '/Galx_Spectra/Hectospec/'

    # Targets
    targ_path = obs_path + field[0] + hecto_path

    # Target file
    targ_file = glob.glob(targ_path + '*.targ')
    if len(targ_file) != 1:
        raise ValueError('Wrong number of Hectospec target files')
    else:
        targ_file = targ_file[0]

    # Read PI, program info [NOT IMPLEMENTED]
    #f = open(msk_file, 'r')
    #lines = f.readlines()
    #f.close()

    # Read target table
    tab = ascii.read(targ_file, comment='#')
    # Restrict to targets
    itarg = np.where(tab['type'] == 'TARGET')
    targs = tab[itarg]
    # Polish
    nrow = len(targs)
    targs.rename_column('ra', 'RAS')
    targs.rename_column('dec', 'DECS')
    targs.add_column(Column([0.] * nrow, name='TARG_RA'))
    targs.add_column(Column([0.] * nrow, name='TARG_DEC'))
    # Get RA/DEC in degrees
    for k, row in enumerate(targs):
        coord = ltu.radec_to_coord((row['RAS'], row['DECS']))
        targs[k]['TARG_RA'] = coord.ra.value
        targs[k]['TARG_DEC'] = coord.dec.value
    # ID/Mag (not always present)
    targ_coord = SkyCoord(ra=targs['TARG_RA'] * u.deg,
                          dec=targs['TARG_DEC'] * u.deg)
    try:
        targs.rename_column('objid', 'TARG_ID')
    except KeyError:
        targs.add_column(Column([0] * nrow, name='TARG_ID'))
        targs.add_column(Column([0.] * nrow, name='TARG_MAG'))
        flg_id = 0
    else:
        flg_id = 1
        targs.rename_column('mag', 'TARG_MAG')
    targs.add_column(Column([0.] * nrow, name='EPOCH'))
    targs.add_column(Column(['SDSS'] * nrow, name='TARG_IMG'))
    targs.add_column(Column(['HECTOSPEC'] * nrow, name='INSTR'))

    targ_mask = {}
    cnames = ['MASK_NAME', 'MASK_ID']
    smsk = '--'
    msk_val = [smsk] * len(cnames)
    for kk, cname in enumerate(cnames):
        targ_mask[cname] = [msk_val[kk]] * nrow

    # Now the 'mask' files
    mask_files = glob.glob(targ_path + '*.cat')
    all_obs = []
    all_masks = []
    for mask_file in mask_files:
        print('Reading Hectospec mask file: {:s}'.format(mask_file))
        i0 = mask_file.rfind('/')
        mask_nm = mask_file[i0 + 1:mask_file.find('.cat')]
        # Grab info from spectrum file
        #xdb.set_trace()
        spec_fil = glob.glob(mask_file[:i0 + 1] + 'spHect-' + mask_nm +
                             '.*.fits.gz')
        if len(spec_fil) == 0:
            raise ValueError('Mask not found! {:s}'.format(spec_fil))
            #ras, decs = xra.dtos1((field[1],field[2]))
            #pa=0.
        else:
            header = fits.open(spec_fil[0])[0].header
            if header['APERTURE'] != mask_nm:
                raise ValueError('Mask doesnt match!')
            pa = header['POSANGLE']
            ras = header['CAT-RA']
            decs = header['CAT-DEC']
        # Continuing
        mask_dict = dict(
            INSTR='HECTOSPEC',
            MASK_NAME=mask_nm,
            MASK_RA=ras,
            MASK_DEC=decs,
            MASK_EPOCH=2000.,
            MASK_PA=pa)  # SHOULD GRAB PA, RA, DEC FROM SPECTRA FITS HEADER
        all_masks.append(mask_dict)
        # Read obs
        f = open(mask_file, 'r')
        lines = f.readlines()
        f.close()
        iall_obs = []
        for line in lines:
            if 'OBS' in line:
                prs = line.strip().split(' ')
                gdprs = [iprs for iprs in prs if len(iprs) > 0]
                obs_dict = {}
                obs_dict['DATE'] = gdprs[2]
                obs_dict['TEXP'] = float(gdprs[3])
                obs_dict['DISPERSER'] = gdprs[4]
                obs_dict['CONDITIONS'] = gdprs[5]
                #
                iall_obs.append(obs_dict)
        obs_tab = xxul.dict_list_to_table(iall_obs)
        obs_tab['TEXP'].unit = u.s
        # Read observed targets
        obs_targ = ascii.read(mask_file, comment='#')
        gdt = np.where(obs_targ['flag'] == 1)[0]
        # Match to target list
        obs_coord = SkyCoord(ra=obs_targ['ra'][gdt] * u.hour,
                             dec=obs_targ['dec'][gdt] * u.deg)
        idx, d2d, d3d = coords.match_coordinates_sky(obs_coord,
                                                     targ_coord,
                                                     nthneighbor=1)
        gdm = np.where(d2d < 1. * u.arcsec)[0]
        if len(gdm) != len(gdt):
            raise ValueError('No match')
        else:
            for ii in range(len(gdm)):
                targ_mask['MASK_NAME'][idx[ii]] = mask_nm
                if flg_id == 0:
                    targs['TARG_ID'][idx[ii]] = int(obs_targ['objid'][gdt[ii]])
        """
        for gdi in gdt:
            mtt = np.where(targs['TARG_ID']==
                int(obs_targ['objid'][gdi]))[0]
            if len(mtt) != 1:
                raise ValueError('Multiple matches?!')
            targ_mask['MASK_NAME'][mtt[0]] = mask_nm
        """
        all_obs.append(obs_tab)
    # Add columns to targs
    for tt, cname in enumerate(cnames):
        mask = np.array([False] * len(targs))
        bad = np.where(np.array(targ_mask[cname]) == msk_val[tt])[0]
        if len(bad) > 0:
            mask[bad] = True
        #
        clm = MaskedColumn(targ_mask[cname], name=cname, mask=mask)
        targs.add_column(clm)

    # Look for ID duplicates (rare)
    gdobj = targs['TARG_ID'] > 0
    idval = np.array(targs[gdobj]['TARG_ID']).astype(int)
    uni, counts = np.unique(idval, return_counts=True)
    if len(uni) != np.sum(gdobj):
        warnings.warn("Found duplicated ID values in Hectospect cat files")
        warnings.warn("Modifying these by hand!")
        dup = np.where(counts > 1)[0]
        # Fix by-hand
        for idup in dup:
            dobj = np.where(targs['TARG_ID'] == uni[idup])[0]
            if len(dobj) == 1:
                xdb.set_trace()
            # Confirm RA/DEC are different
            dcoord = SkyCoord(ra=targs['TARG_RA'][dobj] * u.deg,
                              dec=targs['TARG_DEC'][dobj] * u.deg)
            idx, d2d, d3d = coords.match_coordinates_sky(dcoord,
                                                         dcoord,
                                                         nthneighbor=2)
            if np.sum(d2d < 1 * u.arcsec) > 0:
                raise ValueError("Two with the same RA/DEC.  Deal")
            else:
                for ii in range(1, len(dobj)):
                    # Increment
                    print('Setting TARG_ID to {:d} from {:d}'.format(
                        (ii + 1) * targs['TARG_ID'][dobj[ii]],
                        targs['TARG_ID'][dobj[ii]]))
                    targs['TARG_ID'][
                        dobj[ii]] = (ii + 1) * targs['TARG_ID'][dobj[ii]]
    # Double check
    idval = np.array(targs[gdobj]['TARG_ID']).astype(int)
    uni, counts = np.unique(idval, return_counts=True)
    if len(uni) != np.sum(gdobj):
        raise ValueError("Cannot happen")

    # Finish
    return all_masks, all_obs, targs
コード例 #48
0
ファイル: fit_boss_qsos.py プロジェクト: profxj/desisim
def do_boss_lya_parallel(istart, iend, cut_Lya, output, debug=False):
    '''
    Generate PCA coeff for the BOSS Lya DR10 dataset, v2.1

    Parameters
    ----------
    cut_Lya: boolean (True)
      Avoid using the Lya forest in the analysis
    '''
    # Eigen
    eigen, eigen_wave = read_qso_eigen()

    # Open the BOSS catalog file
    boss_cat_fil = os.environ.get(
        'BOSSPATH') + '/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
    bcat_hdu = fits.open(boss_cat_fil)
    t_boss = bcat_hdu[1].data
    nqso = len(t_boss)

    pca_val = np.zeros((iend - istart, 4))

    if cut_Lya is False:
        print('do_boss: Not cutting the Lya Forest in the fit')

    # Loop us -- Should spawn on multiple CPU
    #for ii in range(nqso):
    datdir = os.environ.get('BOSSPATH') + '/DR10/BOSSLyaDR10_spectra_v2.1/'
    jj = 0
    print('istart = {:d}'.format(istart))
    for ii in range(istart, iend):
        if (ii % 100) == 0:
            print('ii = {:d}'.format(ii))
        #print('ii = {:d}'.format(ii))
        # Spectrum file
        pnm = str(t_boss['PLATE'][ii])
        fnm = str(t_boss['FIBERID'][ii]).rjust(4, str('0'))
        mjd = str(t_boss['MJD'][ii])
        sfil = datdir + pnm + '/speclya-'
        sfil = sfil + pnm + '-' + mjd + '-' + fnm + '.fits.gz'
        # Read spectrum
        spec_hdu = fits.open(sfil)
        t = spec_hdu[1].data
        flux = t['flux']
        wave = 10.**t['loglam']
        ivar = t['ivar']
        zqso = t_boss['z_pipe'][ii]

        wrest = wave / (1 + zqso)
        wlya = 1215.

        # Cut Lya forest?
        if cut_Lya is True:
            Ly_imn = np.argmin(np.abs(wrest - wlya))
        else:
            Ly_imn = 0

        # Pack
        imn = np.argmin(np.abs(wrest[Ly_imn] - eigen_wave))
        npix = len(wrest[Ly_imn:])
        imx = npix + imn
        eigen_flux = eigen[:, imn:imx]

        # FIT
        tflux = flux[Ly_imn:]
        tivar = ivar[Ly_imn:]
        acoeff = fit_eigen(tflux, ivar, eigen_flux)
        pca_val[jj, :] = acoeff
        jj += 1

        # Check
        if debug is True:
            model = np.dot(eigen.T, acoeff)
            if flg_xdb is True:
                xdb.xplot(wrest, flux, xtwo=eigen_wave, ytwo=model)
            xdb.set_trace()

    #xdb.set_trace()
    print('Done with my subset {:d}, {:d}'.format(istart, iend))
    if output is not None:
        output.put((istart, iend, pca_val))
        #output.put(None)
    else:
        return pca_val
コード例 #49
0
def grab_sdss_spectra(radec,
                      radius=0.1 * u.deg,
                      outfil=None,
                      debug=False,
                      maxsep=None,
                      timeout=600.,
                      zmin=None):
    """ Grab SDSS spectra

    Parameters
    ----------
    radec : tuple
      RA, DEC in deg
    radius : float, optional (0.1*u.deg)
      Search radius -- Astroquery actually makes a box, not a circle
    timeout : float, optional
      Timeout limit for connection with SDSS
    outfil : str ('tmp.fits')
      Name of output file for FITS table
    maxsep : float (None) :: Mpc
      Maximum separation to include
    zmin : float (None)
      Minimum redshift to include

    Returns
    -------
    tbl : Table

    """

    cC = coords.SkyCoord(ra=radec[0], dec=radec[1])

    # Query
    photoobj_fs = ['ra', 'dec', 'objid', 'run', 'rerun', 'camcol', 'field']
    mags = [
        'petroMag_u', 'petroMag_g', 'petroMag_r', 'petroMag_i', 'petroMag_z'
    ]
    magsErr = [
        'petroMagErr_u', 'petroMagErr_g', 'petroMagErr_r', 'petroMagErr_i',
        'petroMagErr_z'
    ]

    phot_catalog = SDSS.query_region(cC,
                                     spectro=True,
                                     radius=radius,
                                     timeout=timeout,
                                     photoobj_fields=photoobj_fs + mags +
                                     magsErr)  # Unique
    spec_catalog = SDSS.query_region(cC,
                                     spectro=True,
                                     radius=radius,
                                     timeout=timeout)  # Duplicates exist
    nobj = len(phot_catalog)

    #
    print('grab_sdss_spectra: Found {:d} sources in the search box.'.format(
        nobj))

    # Coordinates
    cgal = SkyCoord(ra=phot_catalog['ra'] * u.degree,
                    dec=phot_catalog['dec'] * u.degree)
    sgal = SkyCoord(ra=spec_catalog['ra'] * u.degree,
                    dec=spec_catalog['dec'] * u.degree)
    sepgal = cgal.separation(cC)  #in degrees

    # Check for problems and parse z
    zobj = np.zeros(nobj)
    idx, d2d, d3d = coords.match_coordinates_sky(cgal, sgal, nthneighbor=1)
    if np.max(d2d) > 1. * u.arcsec:
        print('No spectral match!')
        xdb.set_trace()
    else:
        zobj = spec_catalog['z'][idx]

    idx, d2d, d3d = coords.match_coordinates_sky(cgal, cgal, nthneighbor=2)
    if np.min(d2d.to('arcsec')) < 1. * u.arcsec:
        print('Two photometric sources with same RA/DEC')
        xdb.set_trace()

    #xdb.set_trace()

    # Cut on Separation
    if not maxsep is None:
        print('grab_sdss_spectra: Restricting to {:g} Mpc separation.'.format(
            maxsep))
        sepgal_kpc = cosmo.kpc_comoving_per_arcmin(zobj) * sepgal.to('arcmin')
        sepgal_mpc = sepgal_kpc.to('Mpc')
        gdg = np.where(sepgal_mpc < (maxsep * u.Unit('Mpc')))[0]
        phot_catalog = phot_catalog[gdg]
        #xdb.set_trace()

    nobj = len(phot_catalog)
    print('grab_sdss_spectra: Grabbing data for {:d} sources.'.format(nobj))

    # Grab Spectra from SDSS

    # Generate output table
    attribs = galaxy_attrib()
    npix = 5000  #len( spec_hdus[0][1].data.flux )
    spec_attrib = [(str('FLUX'), np.float32, (npix, )),
                   (str('SIG'), np.float32, (npix, )),
                   (str('WAVE'), np.float64, (npix, ))]
    tbl = np.recarray((nobj, ), dtype=attribs + spec_attrib)

    tbl['RA'] = phot_catalog['ra']
    tbl['DEC'] = phot_catalog['dec']
    tbl['TELESCOPE'] = str('SDSS 2.5-M')

    # Deal with spectra separately (for now)
    npix = 5000  #len( spec_hdus[0][1].data.flux )

    for idx, obj in enumerate(phot_catalog):
        #print('idx = {:d}'.format(idx))

        # Grab spectra (there may be duplicates)
        mt = np.where(
            sgal.separation(cgal[idx]).to('arcsec') < 1. * u.Unit('arcsec'))[0]
        if len(mt) > 1:
            # Use BOSS if you have it
            mmt = np.where(spec_catalog[mt]['instrument'] == 'BOSS')[0]
            if len(mmt) > 0:
                mt = mt[mmt[0]]
            else:
                mt = mt[0]
        elif len(mt) == 0:
            xdb.set_trace()
        else:
            mt = mt[0]

        # Grab spectra
        spec_hdus = SDSS.get_spectra(matches=Table(spec_catalog[mt]))

        tbl[idx]['INSTRUMENT'] = spec_catalog[mt]['instrument']
        spec = spec_hdus[0][1].data
        npp = len(spec.flux)
        tbl[idx]['FLUX'][0:npp] = spec.flux
        sig = np.zeros(npp)
        gdi = np.where(spec.ivar > 0.)[0]
        if len(gdi) > 0:
            sig[gdi] = np.sqrt(1. / spec.ivar[gdi])
        tbl[idx]['SIG'][0:npp] = sig
        tbl[idx]['WAVE'][0:npp] = 10.**spec.loglam

        # Redshifts
        meta = spec_hdus[0][2].data
        for attrib in ['Z', 'Z_ERR']:
            tbl[idx][attrib] = meta[attrib]

        if debug:
            sep_to_qso = cgal[idx].separation(cC).to('arcmin')
            print('z = {:g}, Separation = {:g}'.format(tbl[idx].Z, sep_to_qso))
            xdb.set_trace()

        # Fill in rest
        tbl[idx].SDSS_MAG = np.array([obj[phot] for phot in mags])
        tbl[idx].SDSS_MAGERR = np.array([obj[phot] for phot in magsErr])

    # Clip on redshift to excise stars/quasars
    if zmin is not None:
        gd = np.where(tbl['Z'] > zmin)[0]
        tbl = tbl[gd]

    # Write to FITS file
    if outfil is not None:
        prihdr = fits.Header()
        prihdr['COMMENT'] = 'SDSS Spectra'
        prihdu = fits.PrimaryHDU(header=prihdr)

        tbhdu = fits.BinTableHDU(tbl)

        thdulist = fits.HDUList([prihdu, tbhdu])
        thdulist.writeto(outfil, clobber=True)

    print('Wrote SDSS table to {:s}'.format(outfil))
    return tbl
コード例 #50
0
ファイル: armed.py プロジェクト: EdwardBetts/PYPIT
def ARMED(argflag, spect, fitsdict, reuseMaster=False):
    """
    Automatic Reduction and Modeling of Echelle Data

    Parameters
    ----------
    argflag : dict
      Arguments and flags used for reduction
    spect : dict
      Properties of the spectrograph.
    fitsdict : dict
      Contains relevant information from fits header files
    msgs : class
      Messages class used to log data reduction process
    reuseMaster : bool
      If True, a master frame that will be used for another science frame
      will not be regenerated after it is first made.
      This setting comes with a price, and if a large number of science frames are
      being generated, it may be more efficient to simply regenerate the master
      calibrations on the fly.

    Returns
    -------
    status : int
      Status of the reduction procedure
      0 = Successful execution
      1 = ...
    """
    status = 0

    # Create a list of science exposure classes
    sciexp = armbase.SetupScience(argflag, spect, fitsdict)
    numsci = len(sciexp)

    # Create a list of master calibration frames
    masters = armasters.MasterFrames(spect['mosaic']['ndet'])

    # Start reducing the data
    for sc in range(numsci):
        slf = sciexp[sc]
        scidx = slf._idx_sci[0]
        msgs.info("Reducing file {0:s}, target {1:s}".format(fitsdict['filename'][scidx], slf._target_name))
        # Loop on Detectors
        for kk in xrange(slf._spect['mosaic']['ndet']):
            det = kk + 1  # Detectors indexed from 1
            ###############
            # Get amplifier sections
            fitsdict = arproc.get_ampsec_trimmed(slf, fitsdict, det, scidx)
            ###############
            # Generate master bias frame
            update = slf.MasterBias(fitsdict, det)
            if update and reuseMaster:
                armbase.UpdateMasters(sciexp, sc, det, ftype="bias")
            ###############
            # Generate a bad pixel mask (should not repeat)
            update = slf.BadPixelMask(det)
            if update and reuseMaster:
                armbase.UpdateMasters(sciexp, sc, det, ftype="arc")
            ###############
            # Estimate gain and readout noise for the amplifiers
            msgs.work("Estimate Gain and Readout noise from the raw frames...")
            ###############
            # Generate a master arc frame
            update = slf.MasterArc(fitsdict, det)
            if update and reuseMaster:
                armbase.UpdateMasters(sciexp, sc, det, ftype="arc")
            ###############
            # Determine the dispersion direction (and transpose if necessary)
            slf.GetDispersionDirection(fitsdict, det, scidx)
            if slf._bpix[det-1] is None:
                slf.SetFrame(slf._bpix, np.zeros((slf._nspec[det-1], slf._nspat[det-1])), det)
            ###############
            # Generate a master trace frame
            update = slf.MasterTrace(fitsdict, det)
            if update and reuseMaster:
                armbase.UpdateMasters(sciexp, sc, det, ftype="flat", chktype="trace")
            ###############
            # Generate an array that provides the physical pixel locations on the detector
            slf.GetPixelLocations(det)
            ###############
            # Determine the edges of the spectrum (spatial)
            set_trace()
            lordloc, rordloc, extord = artrace.trace_orders(slf, slf._mstrace[det-1], det, pcadesc="PCA trace of the slit edges")
            slf.SetFrame(slf._lordloc, lordloc, det)
            slf.SetFrame(slf._rordloc, rordloc, det)


    return status
コード例 #51
0
ファイル: fit_boss_qsos.py プロジェクト: profxj/desisim
def failed_parallel():
    '''
    Collision with np.dot
    Might fix with  OPENBLAS_NUM_THREADS=1
    '''
    flg = 0 # 0=BOSS, 1=SDSS

    ## ############################
    # Parallel
    if flg == 0:
        boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
        bcat_hdu = fits.open(boss_cat_fil)
        t_boss = bcat_hdu[1].data
        nqso = len(t_boss)
    elif flg == 1:
        sdss_cat_fil = os.environ.get('SDSSPATH')+'/DR7_QSO/dr7_qso.fits.gz'
        scat_hdu = fits.open(sdss_cat_fil)
        t_sdss = scat_hdu[1].data
        nqso = len(t_sdss)
        outfil = 'SDSS_DR7Lya_PCA_values_nocut.fits'

    nqso = 40  # Testing

    #do_boss_lya_parallel(0,nqso, False, None,debug=False)

    output = mp.Queue()
    processes = []
    nproc = 1
    nsub = nqso // nproc
    
    cut_Lya = False

    # Setup the Processes
    for ii in range(nproc):
        # Generate
        istrt = ii * nsub
        if ii == (nproc-1):
            iend = nqso
        else:
            iend = (ii+1)*nsub
        #xdb.set_trace()
        if flg == 0:
            process = mp.Process(target=do_boss_lya_parallel,
                                args=(istrt,iend,cut_Lya, output))
        elif flg == 1:
            process = mp.Process(target=do_sdss_lya_parallel,
                                args=(istrt,iend,cut_Lya, output))
        processes.append(process)

    # Run processes
    for p in processes:
        p.start()

    print('Grabbing Output')
    results = [output.get() for p in processes]

    # Get process results from the output queue
    # Exit the completed processes
    print('Joining')
    for p in processes:
        p.join()


    xdb.set_trace()
    # Bring together
    #sorted(results, key=lambda result: result[0])
    #all_is = [ir[0] for ir in results]
    pca_val = np.zeros((nqso, 4))
    for ir in results:
        pca_val[ir[0]:ir[1],:] = ir[2]

    # Write to disk as a binary FITS table
    col0 = fits.Column(name='PCA0',format='E',array=pca_val[:,0])
    col1 = fits.Column(name='PCA1',format='E',array=pca_val[:,1])
    col2 = fits.Column(name='PCA2',format='E',array=pca_val[:,2])
    col3 = fits.Column(name='PCA3',format='E',array=pca_val[:,3])
    cols = fits.ColDefs([col0, col1, col2, col3])
    tbhdu = fits.BinTableHDU.from_columns(cols)

    prihdr = fits.Header()
    prihdr['OBSERVER'] = 'Edwin Hubble'
    prihdr['COMMENT'] = "Here's some commentary about this FITS file."
    prihdu = fits.PrimaryHDU(header=prihdr)

    thdulist = fits.HDUList([prihdu, tbhdu])
    if not (outfil in locals()):
        if cut_Lya is False:
            outfil = 'BOSS_DR10Lya_PCA_values_nocut.fits'
        else:
            outfil = 'BOSS_DR10Lya_PCA_values.fits'
    thdulist.writeto(outfil, clobber=True)

    # Done
    #xdb.set_trace()
    print('All done')
コード例 #52
0
ファイル: abs_line.py プロジェクト: astronomeara/xastropy-old
def mk_line_list_fits_table(outfil=None,XIDL=False):
    from barak import absorb as ba

    if XIDL is True:
        lindat =  os.getenv('XIDL_DIR')+'/Spec/Lines/Lists/grb.lst'
        finedat = os.getenv('XIDL_DIR')+'/Spec/Lines/Lists/fine_strct.lst'
    else:
        lindat = 'grb.lst'  # This pulls from xastropy/data/spec_lines first
        finedat = os.getenv('XIDL_DIR')+'/Spec/Lines/Lists/fine_strct.lst'
  
    # Read XIDL line list
    llist = Abs_Line_List(lindat)
    ndata = len(llist.data)

    # Add columns
    from astropy.table import Column
    gamma = Column(np.zeros(ndata),name='gamma')
    A = Column(np.zeros(ndata),name='A') # Einstein coefficient
    j = Column(np.zeros(ndata),name='j') # Tot ang mom (z projection)
    Ex = Column(np.zeros(ndata),name='Ex') # Excitation energy (cm^-1)
    Elow = Column(np.zeros(ndata),name='Elow') # Energy of lower level
    Eup = Column(np.zeros(ndata),name='Eup') # Energy of upper level
    Z = Column(np.zeros(ndata,dtype='int'),name='Z') # Atomic number
    ion = Column(np.zeros(ndata,dtype='int'),name='ion') # Ionic state

    llist.data.add_columns([gamma,A,j,Ex,Elow,Eup,Z,ion])

    # Z,ion
    for ii in range(ndata):
        nm = llist.data['name'][ii]
        # Z
        if nm[1] == 'I' or nm[1] == 'V': 
            ielm = 1
        else:
            ielm = 2
        elm = nm[:ielm]
        try:
            Zv = ELEMENTS[elm].number
        except KeyError:
            if elm in ['CO','CC','HH']: # Molecules
                Zv = 999
            elif elm in ['D']: # Deuterium
                Zv = 1
            else:
                xdb.set_trace()
        llist.data['Z'][ii] = Zv
        # ion
        ispc = nm.find(' ')
        cion = nm[ielm:ispc].strip('*')
        if len(cion) == 0:
            ionv =0
        else:
            ionv = roman.fromRoman(cion)
        llist.data['ion'][ii] = ionv

    # #######
    # Fill in matches
    
    # Read atom.dat using barak
    atom, atomflat = ba.readatom(flat=True)
    #pdb.set_trace()
    llist.sources.append('atom.dat') # I wish I could pull this from ba.readatom

    # Fine structure
    fdata = ascii.read(finedat)
    llist.sources.append(finedat)

    # Loop
    for ii in range(ndata):
        # Atom.dat
        mt = np.where(np.fabs(llist.data['wrest'][ii]-atomflat['wa']) < 1e-3)[0]
        if len(mt) > 0: llist.data['gamma'][ii] = atomflat['gam'][mt[0]] # Takes the first match

        # Fine structure
        mt = np.where(np.fabs(llist.data['wrest'][ii]-fdata['wrest']) < 1e-3)[0]
        if len(mt) > 0:
            llist.data['A'][ii] = fdata['A'][mt[0]] # Takes the first match
    
    # Output file
    if outfil is None:
        outfil = xa_path+'/data/atomic/spec_atomic_lines.fits'

    # Header
    '''
    prihdr = fits.Header()
    prihdr['COMMENT'] = "Above are the data sources"
    for ii in range(len(llist.sources)):
        card = 'SOURCE'+str(ii+1)
        prihdr[card] = llist.sources[ii]
    prihdu = fits.PrimaryHDU(header=prihdr)

    # Table
    table_hdu = fits.BinTableHDU.from_columns(np.array(llist.data.filled()))

    '''
    # Write
    llist.data.write(outfil, overwrite=True, format='fits')
    print('mk_line_list: Wrote {:s}'.format(outfil))
コード例 #53
0
def ew_teff_lyman(ilambda,
                  zem,
                  fN_model,
                  NHI_MIN=11.5,
                  NHI_MAX=22.0,
                  N_eval=5000,
                  EW_spline=None,
                  bval=24.,
                  fNz=False,
                  cosmo=None,
                  debug=False,
                  cumul=None,
                  verbose=False):
    """ tau effective (follows ew_teff_lyman.pro from XIDL)
       teff = ew_teff_lyman(3400., 2.4)

    Parameters:
    -------------
      ilambda: float
        Observed wavelength 
      zem: float 
        Emission redshift of the source [sets which Lyman lines are included]
      bva: float
         -- Characteristics Doppler parameter for the Lya forest
         -- [Options: 24, 35 km/s]
      NHI_MIN: float
         -- Minimum log HI column for integration [default = 11.5]
      NHI_MAX: float
         -- Maximum log HI column for integration [default = 22.0]
      fNz: Boolean (False)
         -- Inputs f(N,z) instead of f(N,X)
      cosmo: astropy.cosmology (None)
         -- Cosmological model to adopt (as needed)
      cumul: List of cumulative sums
         -- Recorded only if cumul is not None

    Returns:
      teff: 
        Total effective opacity of all lines contributing

    ToDo:
      1. Parallelize the Lyman loop

    JXP 07 Nov 2014
    """
    # Lambda
    if not isinstance(ilambda, float):
        raise ValueError('igm.tau_eff: ilambda must be a float for now')
    Lambda = ilambda
    if not isinstance(Lambda, u.quantity.Quantity):
        Lambda = Lambda * u.AA  # Ang

    # Read in EW spline (if needed)
    if EW_spline == None:
        if int(bval) == 24:
            EW_FIL = xa_path + '/igm/EW_SPLINE_b24.p'
        elif int(bval) == 35:
            EW_FIL = os.environ.get('XIDL_DIR') + '/IGM/EW_SPLINE_b35.fits'
        else:
            raise ValueError('igm.tau_eff: Not ready for this bvalue %g' %
                             bval)
        EW_spline = pickle.load(open(EW_FIL, "rb"))

    # Lines
    wrest = tau_eff_llist()

    # Find the lines
    gd_Lyman = wrest[(Lambda / (1 + zem)) < wrest]
    nlyman = len(gd_Lyman)
    if nlyman == 0:
        if verbose:
            print('igm.tau_eff: No Lyman lines covered at this wavelength')
        return 0

    # N_HI grid
    lgNval = NHI_MIN + (NHI_MAX - NHI_MIN) * np.arange(N_eval) / (N_eval - 1
                                                                  )  # Base 10
    dlgN = lgNval[1] - lgNval[0]
    Nval = 10.**lgNval
    teff_lyman = np.zeros(nlyman)

    # For cumulative
    if not cumul is None:
        cumul.append(lgNval)

    # Loop on the lines
    for qq, line in enumerate(
            gd_Lyman):  # Would be great to do this in parallel...
        # (Can pack together and should)
        # Redshift
        zeval = ((Lambda / line) - 1).value
        if zeval < 0.:
            teff_lyman[qq] = 0.
            continue
        # Cosmology
        if fNz is False:
            if cosmo not in locals():
                cosmo = FlatLambdaCDM(H0=70, Om0=0.3)  # Vanilla
            #dxdz = (np.fabs(xigmu.cosm_xz(zeval-0.1, cosmo=cosmo)-
            #            xigmu.cosm_xz(zeval+0.1,cosmo=cosmo)) / 0.2 )
            #xdb.set_trace()
            dxdz = xigmu.cosm_xz(zeval, cosmo=cosmo, flg=1)
        else:
            dxdz = 1.  # Code is using f(N,z)
        #print('dxdz = %g' % dxdz)

        # Get EW values (could pack these all together)
        idx = np.where(EW_spline['wrest'] == line)[0]
        if len(idx) != 1:
            raise ValueError(
                'tau_eff: Line %g not included or over included?!' % line)
        restEW = interpolate.splev(lgNval, EW_spline['tck'][idx], der=0)

        # dz
        dz = ((restEW * u.AA) * (1 + zeval) / line).value

        # Evaluate f(N,X) at zeval
        log_fnX = fN_model.eval(lgNval, zeval).flatten()
        #xdb.set_trace()

        # Sum
        intgrnd = 10.**(log_fnX) * dxdz * dz * Nval
        teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.)
        if not cumul is None:
            cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.))
        #xdb.set_trace()

        # Debug
        if debug == True:
            xdb.xplot(lgNval, np.log10(10.**(log_fnX) * dxdz * dz * Nval))
            #x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #writecol, 'debug_file'+strtrim(qq,2)+'.dat',  lgNval, restEW, log_fnX
            xdb.set_trace()

    #xdb.set_trace()
    return np.sum(teff_lyman)
コード例 #54
0
ファイル: model.py プロジェクト: jsribaud/xastropy
    def calc_lox(self, z, NHI_min, NHI_max=None, neval=10000, cumul=False):
        """ Calculate l(X) over an N_HI interval

        Parameters:
        z: float
          Redshift for evaluation
        NHI_min: float
          minimum NHI value
        NHI_max: float (Infinity)
          maximum NHI value for evaluation
        neval: int (10000)
          Discretization parameter
        cumul: boolean (False)
          Return a cumulative array?

        Returns:
        lX: float
          l(X) value

        JXP 10 Nov 2014
        """
        # Initial
        if NHI_max==None:
            NHI_max = 23.
            infinity=True
        else: infinity=False

        try:
            nz = len(z)
        except:
            nz=1
            z = np.array([z])

        # Brute force (should be good to ~0.5%)
        lgNHI = NHI_min + (NHI_max-NHI_min)*np.arange(neval)/(neval-1.)
        dlgN = lgNHI[1]-lgNHI[0]

        # Evaluate f(N,X)
        lgfNX = self.eval(lgNHI, z)
        #xdb.set_trace()

        # Sum
        lX = np.zeros(nz)
        for ii in range(nz): 
            lX[ii] = np.sum(10.**(lgfNX[:,ii]+lgNHI)) * dlgN * np.log(10.)
        if cumul==True: 
            if nz > 1: #; Have not modified this yet
                raise ValueError('fN.model: Not ready for this model type %s' % self.fN_mtype)
            cum_sum = np.cumsum(10.**(lgfNX[:,ii]+lgNHI)) * dlgN * np.log(10.)
        #xdb.set_trace()

        # Infinity?
        if infinity is True:
            # This is risky...
            # Best to cut it off
            xdb.set_trace()
            neval2 = 1000L
            lgNHI2 = NHI_max + (99.-NHI_max)*np.arange(neval2)/(neval2-1.)
            dlgN = lgNHI2[1]-lgNHI2[0]
            lgfNX = np.zeros((neval2,nz))
            lX2 = np.zeros(nz)
            for ii in range(nz):
                lgfNX[:,ii] = self.eval(lgNHI2, z[ii]).flatten()
                lX2[ii] = np.sum(10.**(lgfNX[:,ii]+lgNHI2)) * dlgN * np.log(10.)
                xdb.set_trace()
            # 
            lX = lX + lX2

        # Return
        if nz==1:
            lX = lX[0]
        if cumul==True:
            return lX, cum_sum, lgNHI
        else:
            return lX
コード例 #55
0
ファイル: abskin.py プロジェクト: LiuFang816/SALSTM_py_data
def generate_stau(velo, flux, sig, kbin=22. * u.km / u.s, debug=False):
    """ Generate the smoothed tau array for kinematic tests

    Parameters
    ----------
    velo : Quantity array (usually km/s)
    flux : Quantity array (flux)
    sig :  Quantity array (sig)
    kbin : Quantity (velocity), optional
      Kernel size for Gaussian smoothing of optical depth array
    debug : bool, optional

    Returns
    -------
    stau : array
       Smoothed tau array
    """
    # Velocity array
    npix = len(velo)
    pix = np.arange(npix).astype(int)

    # Calculate dv
    dv = np.abs(np.median(velo - np.roll(velo, 1)))

    # Test for bad pixels
    badzero = np.where((flux == 0) | (sig <= 0))[0]
    if len(badzero) > 0:
        if np.max(badzero) - np.min(badzero) >= 5:
            raise ValueError(
                'orig_kin: too many or too large sections of bad data')

        flux[badzero] = np.mean(
            np.array([flux[np.min(badzero) - 1], flux[np.max(badzero) + 1]]))
        pdb.set_trace()  # Should add sig too

    # Generate the tau array
    tau = np.zeros(npix)
    gd = np.where((flux > sig / 2.) & (sig > 0.))
    if len(gd) == 0:
        raise ValueError('orig_kin: Profile too saturated.')

    tau[gd] = np.log(1. / flux[gd])
    sat = (pix == pix)
    sat[gd] = False
    tau[sat] = np.log(2. / sig[sat])

    # Smooth
    nbin = (np.round(kbin / dv)).value
    try:
        kernel = Box1DKernel(nbin, mode='center')
    except:
        pdb.set_trace()
    stau = convolve(tau, kernel, boundary='fill', fill_value=0.)
    if debug is True:
        try:
            from xastropy.xutils import xdebug as xdb
        except ImportError:
            pdb.set_trace()
        else:
            xdb.xplot(velo, tau, stau)
            xdb.set_trace()

    # Return
    return stau
コード例 #56
0
ファイル: readwrite.py プロジェクト: profxj/old_xastropy
def readspec(specfil,
             inflg=None,
             efil=None,
             outfil=None,
             show_plot=0,
             use_barak=False,
             verbose=False,
             flux_tags=None,
             sig_tags=None,
             multi_ivar=False):
    ''' 
    specfil: string or Table
    multi_ivar: Bool (False)
      BOSS format of  flux, ivar, log10(wave) in multi-extension FITS
    '''
    from xastropy.files import general as xfg
    #from xastropy.plotting import x_guis as xpxg
    from astropy.table import Table
    from astropy.table import Column

    raise ValueError('USE LINETOOLS.spectra.io INSTEAD!!')

    # Initialize
    dat = None
    if inflg == None:
        inflg = 0

    # Check specfil type
    if type(specfil) is Table:
        datfil = 'None'
        # Dummy hdulist
        hdulist = [fits.PrimaryHDU(), specfil]
    else:
        # Read header
        datfil, chk = xfg.chk_for_gz(specfil)
        if chk == 0:
            print('xastropy.spec.readwrite: File does not exist ', specfil)
            return -1
        hdulist = fits.open(os.path.expanduser(datfil))

    head0 = hdulist[0].header

    ## #################
    # Binary FITS table?
    if head0['NAXIS'] == 0:
        # Flux
        if flux_tags is None:
            flux_tags = [
                'SPEC', 'FLUX', 'FLAM', 'FX', 'FLUXSTIS', 'FLUX_OPT', 'fl'
            ]
        fx, fx_tag = get_table_column(flux_tags, hdulist)
        #xdb.set_trace()
        if fx is None:
            print('spec.readwrite: Binary FITS Table but no Flux tag')
            return
        # Error
        if sig_tags is None:
            sig_tags = [
                'ERROR', 'ERR', 'SIGMA_FLUX', 'FLAM_SIG', 'SIGMA_UP',
                'ERRSTIS', 'FLUXERR', 'er'
            ]
        sig, sig_tag = get_table_column(sig_tags, hdulist)
        if sig is None:
            ivar_tags = ['IVAR', 'IVAR_OPT']
            ivar, ivar_tag = get_table_column(ivar_tags, hdulist)
            if ivar is None:
                print('spec.readwrite: Binary FITS Table but no error tags')
                return
            else:
                sig = np.zeros(ivar.size)
                gdi = np.where(ivar > 0.)[0]
                sig[gdi] = np.sqrt(1. / ivar[gdi])
        # Wavelength
        wave_tags = [
            'WAVE', 'WAVELENGTH', 'LAMBDA', 'LOGLAM', 'WAVESTIS', 'WAVE_OPT',
            'wa'
        ]
        wave, wave_tag = get_table_column(wave_tags, hdulist)
        if wave_tag == 'LOGLAM':
            wave = 10.**wave
        if wave is None:
            print('spec.readwrite: Binary FITS Table but no wavelength tag')
            return
    elif head0['NAXIS'] == 1:  # Data in the zero extension
        # How many entries?
        if len(hdulist) == 1:  # Old school (one file per flux, error)
            # Error
            if efil == None:
                ipos = max(specfil.find('F.fits'), specfil.find('f.fits'))
                if ipos < 0:  # No error array
                    efil = None
                    #sig = np.zeros(fx.size)
                else:
                    if specfil.find('F.fits') > 0:
                        efil, chk = xfg.chk_for_gz(specfil[0:ipos] + 'E.fits')
                    else:
                        efil, chk = xfg.chk_for_gz(specfil[0:ipos] + 'e.fits')
                if efil != None:
                    efil = os.path.expanduser(efil)
            # Generate Spectrum1D
            spec1d = spec_read_fits.read_fits_spectrum1d(
                os.path.expanduser(datfil), dispersion_unit='AA', efil=efil)
            xspec1d = XSpectrum1D.from_spec1d(spec1d)

            #spec1d = spec_read_fits.read_fits_spectrum1d(os.path.expanduser(datfil))

        elif len(hdulist) == 2:  # NEW SCHOOL (one file per flux, error)
            spec1d = spec_read_fits.read_fits_spectrum1d(
                os.path.expanduser(datfil), dispersion_unit='AA')
            # Error array
            sig = hdulist[1].data
            spec1d.uncertainty = StdDevUncertainty(sig)
            #
            xspec1d = XSpectrum1D.from_spec1d(spec1d)

        else:  # ASSUMING MULTI-EXTENSION
            if len(hdulist) <= 2:
                print(
                    'spec.readwrite: No wavelength info but only 2 extensions!'
                )
                return
            fx = hdulist[0].data.flatten()
            sig = hdulist[1].data.flatten()
            wave = hdulist[2].data.flatten()
            # BOSS/SDSS?
            try:
                multi_ivar = head0['TELESCOP'][0:4] in ['SDSS']
            except KeyError:
                pass
            #
            if multi_ivar is True:
                tmpsig = np.zeros(len(sig))
                gdp = np.where(sig > 0.)[0]
                tmpsig[gdp] = np.sqrt(1. / sig[gdp])
                sig = tmpsig
                wave = 10.**wave
    else:  # Should not be here
        print('spec.readwrite: Looks like an image')
        return dat

    # Generate, as needed
    if 'xspec1d' not in locals():
        # Give Ang as default
        if not hasattr(wave, 'unit'):
            uwave = u.Quantity(wave, unit=u.AA)
        else:
            if wave.unit is None:
                uwave = u.Quantity(wave, unit=u.AA)
            else:
                uwave = u.Quantity(wave)
        xspec1d = XSpectrum1D.from_array(uwave,
                                         u.Quantity(fx),
                                         uncertainty=StdDevUncertainty(sig))

    xspec1d.filename = specfil

    # Continuum?
    try:
        co = fits.getdata(name + '_c.fits')
    except:
        try:
            npix = len(fx)
        except UnboundLocalError:
            npix = len(xspec1d.flux)
        co = np.nan * np.ones(npix)
    '''
    # Generate a Barak Spectrum Class?
    hd = hdulist[0].header
    if use_barak is True:
        # Barak
        raise ValueError('Avoid!')
        from barak import spec as bs
        spec1d = bs.Spectrum(wa=wave, fl=fx, er=sig, co=co, filename=specfil)
        spec1d.header = hd
    '''

    # Plot?
    if show_plot:
        xpxg.plot_1d_arrays(wave, fx, sig, co)

    # Write to disk? Unlikely
    if outfil != None:
        if use_barak is True:
            spec1d.fits_write(outfil, overwrite=True)
        else:
            xdb.set_trace()  # Not ready

    # Add in the header
    xspec1d.head = head0

    # Return
    return xspec1d
コード例 #57
0
ファイル: tau_eff.py プロジェクト: nhmc/xastropy
def ew_teff_lyman(
    ilambda,
    zem,
    fN_model,
    NHI_MIN=11.5,
    NHI_MAX=22.0,
    N_eval=5000,
    EW_spline=None,
    bval=24.0,
    fNz=False,
    cosmo=None,
    debug=False,
    cumul=None,
    verbose=False,
):
    """ tau effective (follows ew_teff_lyman.pro from XIDL)
       teff = ew_teff_lyman(3400., 2.4)

    Parameters:
    -------------
      ilambda: float
        Observed wavelength 
      zem: float 
        Emission redshift of the source [sets which Lyman lines are included]
      bva: float
         -- Characteristics Doppler parameter for the Lya forest
         -- [Options: 24, 35 km/s]
      NHI_MIN: float
         -- Minimum log HI column for integration [default = 11.5]
      NHI_MAX: float
         -- Maximum log HI column for integration [default = 22.0]
      fNz: Boolean (False)
         -- Inputs f(N,z) instead of f(N,X)
      cosmo: astropy.cosmology (None)
         -- Cosmological model to adopt (as needed)
      cumul: List of cumulative sums
         -- Recorded only if cumul is not None

    Returns:
      teff: 
        Total effective opacity of all lines contributing

    ToDo:
      1. Parallelize the Lyman loop

    JXP 07 Nov 2014
    """
    # Lambda
    if not isinstance(ilambda, float):
        raise ValueError("igm.tau_eff: ilambda must be a float for now")
    Lambda = ilambda
    if not isinstance(Lambda, u.quantity.Quantity):
        Lambda = Lambda * u.AA  # Ang

    # Read in EW spline (if needed)
    if EW_spline == None:
        if int(bval) == 24:
            EW_FIL = xa_path + "/igm/EW_SPLINE_b24.p"
        elif int(bval) == 35:
            EW_FIL = os.environ.get("XIDL_DIR") + "/IGM/EW_SPLINE_b35.fits"
        else:
            raise ValueError("igm.tau_eff: Not ready for this bvalue %g" % bval)
        EW_spline = pickle.load(open(EW_FIL, "rb"))

    # Lines
    wrest = tau_eff_llist()

    # Find the lines
    gd_Lyman = wrest[(Lambda / (1 + zem)) < wrest]
    nlyman = len(gd_Lyman)
    if nlyman == 0:
        if verbose:
            print("igm.tau_eff: No Lyman lines covered at this wavelength")
        return 0

    # N_HI grid
    lgNval = NHI_MIN + (NHI_MAX - NHI_MIN) * np.arange(N_eval) / (N_eval - 1)  # Base 10
    dlgN = lgNval[1] - lgNval[0]
    Nval = 10.0 ** lgNval
    teff_lyman = np.zeros(nlyman)

    # For cumulative
    if not cumul is None:
        cumul.append(lgNval)

    # Loop on the lines
    for qq, line in enumerate(gd_Lyman):  # Would be great to do this in parallel...
        # (Can pack together and should)
        # Redshift
        zeval = ((Lambda / line) - 1).value
        if zeval < 0.0:
            teff_lyman[qq] = 0.0
            continue
        # Cosmology
        if fNz is False:
            if cosmo not in locals():
                cosmo = FlatLambdaCDM(H0=70, Om0=0.3)  # Vanilla
            # dxdz = (np.fabs(xigmu.cosm_xz(zeval-0.1, cosmo=cosmo)-
            #            xigmu.cosm_xz(zeval+0.1,cosmo=cosmo)) / 0.2 )
            # xdb.set_trace()
            dxdz = xigmu.cosm_xz(zeval, cosmo=cosmo, flg=1)
        else:
            dxdz = 1.0  # Code is using f(N,z)
        # print('dxdz = %g' % dxdz)

        # Get EW values (could pack these all together)
        idx = np.where(EW_spline["wrest"] == line)[0]
        if len(idx) != 1:
            raise ValueError("tau_eff: Line %g not included or over included?!" % line)
        restEW = interpolate.splev(lgNval, EW_spline["tck"][idx], der=0)

        # dz
        dz = ((restEW * u.AA) * (1 + zeval) / line).value

        # Evaluate f(N,X) at zeval
        log_fnX = fN_model.eval(lgNval, zeval).flatten()
        # xdb.set_trace()

        # Sum
        intgrnd = 10.0 ** (log_fnX) * dxdz * dz * Nval
        teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.0)
        if not cumul is None:
            cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.0))
        # xdb.set_trace()

        # Debug
        if debug == True:
            xdb.xplot(lgNval, np.log10(10.0 ** (log_fnX) * dxdz * dz * Nval))
            # x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            # printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            # writecol, 'debug_file'+strtrim(qq,2)+'.dat',  lgNval, restEW, log_fnX
            xdb.set_trace()

    # xdb.set_trace()
    return np.sum(teff_lyman)
コード例 #58
0
    if (flg_test % 2**3) >= 2**2:
        print('-------------------------')
        tmp1.fill_ions()
        ion,vpeak = tmp1.get_zpeak()
        print('zpeak = {:g}'.format(tmp1.zpeak))

    # Write .dat
    if (flg_test % 2**4) >= 2**3:
        tmp1.write_dat_file()

    # Read and Write AbsID
    if (flg_test % 2**5) >= 2**4:
        abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
        lls = LLS_System.from_absid_fil(abs_fil)
        tmpfil= '/Users/xavier/Desktop/tmp.fits'
        xdb.set_trace()
        lls.write_absid_file(tmpfil)
        lls = LLS_System.from_absid_fil(tmpfil)
        xdb.set_trace()

    # #############################
    # LLS Survey
    if (flg_test % 2**10) >= 2**9:
        print('-------------------------')
        lls = LLS_Survey('Lists/lls_metals.lst', tree=os.environ.get('LLSTREE'))
        xdb.xhist(lls.NHI, binsz=0.30)

    # LLS Survey ions
    if (flg_test % 2**11) >= 2**10:
        lls = LLS_Survey('Lists/lls_metals.lst', tree=os.environ.get('LLSTREE'))
        lls.fill_ions()