Example #1
0
def specpolextract(infilelist, logfile='salt.log', debug=False):
    """Produce a 1-D extract spectra for the O and E beams

    This also cleans the 2-D spectra of a number of artifacts, removes the background, accounts for small 
        spatial shifts in the observation, and resamples the data into a wavelength grid

    Parameters
    ----------
    infile_list: list
        List of filenames that include an extracted spectra

    logfile: str
        Name of file for logging


    """

#set up the files
    obsdate=os.path.basename(infilelist[0])[8:16]

    with logging(logfile, debug) as log:
        #create the observation log
        obs_dict=obslog(infilelist)
    # get rid of arcs
        for i in range(len(infilelist))[::-1]:
            if (obs_dict['OBJECT'][i].upper().strip()=='ARC'): del infilelist[i]            
        infiles = len(infilelist)

    # contiguous images of the same object and config are grouped together as an observation
        obs_dict=obslog(infilelist)
        confno_i,confdatlist = configmap(infilelist)
        configs = len(confdatlist)
        objectlist = list(set(obs_dict['OBJECT']))
        objno_i = np.array([objectlist.index(obs_dict['OBJECT'][i]) for i in range(infiles)],dtype=int)
        obs_i = np.zeros((infiles),dtype=int)
        obs_i[1:] = ((objno_i[1:] != objno_i[:-1]) | (confno_i[1:] != confno_i[:-1]) ).cumsum()
        dum,iarg_b =  np.unique(obs_i,return_index=True)    # gives i for beginning of each obs
        obss = iarg_b.shape[0]
        obscount_b = np.zeros((obss),dtype=int)
        oclist_b = np.array([[objno_i[iarg_b[b]], confno_i[iarg_b[b]]] for b in range(obss)])        
        if obss>1:
            for b in range(1,obss): 
                obscount_b[b] = (oclist_b[b]==oclist_b[0:b]).all(axis=1).sum()

        for b in range(obss):
            ilist = np.where(obs_i==b)[0]
            outfiles = len(ilist)
            outfilelist = [infilelist[i] for i in ilist]
            obs_dict=obslog(outfilelist)
            imagenolist = [int(os.path.basename(infilelist[i]).split('.')[0][-4:]) for i in ilist]
            log.message('\nExtract: '+objectlist[objno_i[ilist[0]]]+'  Grating %s  Grang %6.2f  Artic %6.2f' % \
               confdatlist[confno_i[ilist[0]]], with_header=False)
            log.message('  Images: '+outfiles*'%i ' % tuple(imagenolist), with_header=False)
            hdu0 =  pyfits.open(outfilelist[0])       
            rows,cols = hdu0['SCI'].data.shape[1:3]
            cbin,rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int)

        # special version for lamp data
            object = obs_dict["OBJECT"][0].strip().upper()
            lampid = obs_dict["LAMPID"][0].strip().upper()
            if ((object != "ARC") & (lampid != "NONE")) :
                specpollampextract(outfilelist, logfile=logfile)           
                continue

        # sum spectra to find target, background artifacts, and estimate sky flat and psf functions
            count = 0
            for i in range(outfiles):
                badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0
                if count == 0: 
                    count_orc = (~badbin_orc).astype(int)
                    image_orc = pyfits.open(outfilelist[i])['SCI'].data*count_orc
                    var_orc = pyfits.open(outfilelist[i])['VAR'].data*count_orc
                else:
                    count_orc += (~badbin_orc).astype(int)
                    image_orc += pyfits.open(outfilelist[i])['SCI'].data*(~badbin_orc).astype(int)
                    var_orc += pyfits.open(outfilelist[i])['VAR'].data*(~badbin_orc).astype(int)
                count += 1
            if count ==0:
                print 'No valid images'
                continue
            image_orc[count_orc>0] /= count_orc[count_orc>0]
            badbinall_orc = (count_orc==0) | (image_orc==0)             # bin is bad in all images
            badbinone_orc = (count_orc < count) | (image_orc==0)        # bin is bad in at least one image
            var_orc[count_orc>0] /= (count_orc[count_orc>0])**2

            wav_orc = pyfits.open(outfilelist[0])['WAV'].data
            slitid = obs_dict["MASKID"][0]
            okwav_oc = ~((wav_orc == 0).all(axis=1))
            if slitid[0] =="P": slitwidth = float(slitid[2:5])/10.
            else: slitwidth = float(slitid) 

            obsname = objectlist[oclist_b[b][0]]+"_c"+str(oclist_b[b][1])+"_"+str(obscount_b[b])
            hdusum = pyfits.PrimaryHDU(header=hdu0[0].header)   
            hdusum = pyfits.HDUList(hdusum)
            hdusum[0].header.update('OBJECT',obsname)     
            header=hdu0['SCI'].header.copy()       
            hdusum.append(pyfits.ImageHDU(data=image_orc, header=header, name='SCI'))
            hdusum.append(pyfits.ImageHDU(data=var_orc, header=header, name='VAR'))
            hdusum.append(pyfits.ImageHDU(data=badbinall_orc.astype('uint8'), header=header, name='BPM'))
            hdusum.append(pyfits.ImageHDU(data=wav_orc, header=header, name='WAV'))

            if debug: hdusum.writeto(obsname+".fits",clobber=True)

            psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \
                specpolsignalmap(hdusum,logfile=logfile,debug=debug)

            maprow_ocd = maprow_od[:,None,:] + np.zeros((2,cols,4)) 
            maprow_ocd[okwav_oc] += drow_oc[okwav_oc,None]      

            isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \
                (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3])
            istarget_orc = okwav_oc[:,None,:] & (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \
                (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2])
                                   
            isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc)
            badbinall_orc |= badbinnew_orc
            badbinone_orc |= badbinnew_orc
            hdusum['BPM'].data = badbinnew_orc.astype('uint8')

            if debug: 
#                hdusum.writeto(obsname+".fits",clobber=True)
               pyfits.PrimaryHDU(psf_orc.astype('float32')).writeto(obsname+'_psf_orc.fits',clobber=True) 
#               pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True)   
#               pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True)  
#               pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True)  

        # set up wavelength binning
            wbin = wav_orc[0,rows/2,cols/2]-wav_orc[0,rows/2,cols/2-1] 
            wbin = 2.**(np.rint(np.log2(wbin)))         # bin to nearest power of 2 angstroms
            wmin = (wav_orc.max(axis=1)[okwav_oc].reshape((2,-1))).min(axis=1).max()
            wmax = wav_orc.max()
            for o in (0,1): 
                colmax = np.where((wav_orc[o] > 0.).any(axis=0))[0][-1]
                row_r = np.where(wav_orc[o,:,colmax] > 0.)[0]
                wmax = min(wmax,wav_orc[o,row_r,colmax].min())
            wedgemin = wbin*int(wmin/wbin+0.5) + wbin/2.
            wedgemax = wbin*int(wmax/wbin-0.5) + wbin/2.
            wedge_w = np.arange(wedgemin,wedgemax+wbin,wbin)
            wavs = wedge_w.shape[0] - 1
            binedge_orw = np.zeros((2,rows,wavs+1))
            specrow_or = (maprow_od[:,1:3].mean(axis=1)[:,None] + np.arange(-rows/4,rows/4)).astype(int)

        # scrunch and normalize psf from summed images (using badbinone) for optimized extraction
        # psf is normalized so its integral over row is 1.
            psfnormmin = 0.70    # wavelengths with less than this flux in good bins are marked bad
            psf_orw = np.zeros((2,rows,wavs))

            for o in (0,1):
                for r in specrow_or[o]:
                    binedge_orw[o,r] = \
                        interp1d(wav_orc[o,r,okwav_oc[o]],np.arange(cols)[okwav_oc[o]], \
                                   kind='linear',bounds_error=False)(wedge_w)
                    psf_orw[o,r] = scrunch1d(psf_orc[o,r],binedge_orw[o,r])

            if debug: 
                pyfits.PrimaryHDU(binedge_orw.astype('float32')).writeto(obsname+'_binedge_orw.fits',clobber=True)
                pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto(obsname+'_psf_orw.fits',clobber=True)

            psf_orw /= psf_orw.sum(axis=1)[:,None,:]

        # set up optional image-dependent column shift for slitless data
            colshiftfilename = "colshift.txt"
            docolshift = os.path.isfile(colshiftfilename)
            if docolshift:
                img_I,dcol_I = np.loadtxt(colshiftfilename,dtype=float,unpack=True,usecols=(0,1))
                shifts = img_I.shape[0]
                log.message('Column shift: \n Images '+shifts*'%5i ' % tuple(img_I), with_header=False)                 
                log.message(' Bins    '+shifts*'%5.2f ' % tuple(dcol_I), with_header=False)                 
               
        # background-subtract and extract spectra
            for i in range(outfiles):
                hdulist = pyfits.open(outfilelist[i])
                tnum = os.path.basename(outfilelist[i]).split('.')[0][-3:]
                badbin_orc = (hdulist['BPM'].data > 0)
                badbinbkg_orc = (badbin_orc | badbinnew_orc | isedge_orc | istarget_orc)
                if debug:
                    pyfits.PrimaryHDU(isedge_orc.astype('uint8')).writeto('isedge_orc_'+tnum+'.fits',clobber=True)
                    pyfits.PrimaryHDU(istarget_orc.astype('uint8')).writeto('istarget_orc_'+tnum+'.fits',clobber=True) 
                    pyfits.PrimaryHDU(badbinbkg_orc.astype('uint8')).writeto('badbinbkg_orc_'+tnum+'.fits',clobber=True)
                target_orc = bkgsub(hdulist,badbinbkg_orc,isbkgcont_orc,skyflat_orc,maprow_ocd,tnum,debug=debug)
                target_orc *= (~badbin_orc).astype(int)             
                if debug:
                    pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_'+tnum+'_orc.fits',clobber=True)
                var_orc = hdulist['var'].data
                badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc
            # extract spectrum optimally (Horne, PASP 1986)
                target_orw = np.zeros((2,rows,wavs))   
                var_orw = np.zeros_like(target_orw)
                badbin_orw = np.ones((2,rows,wavs),dtype='bool')   
                wt_orw = np.zeros_like(target_orw)
                dcol = 0.
                if docolshift:
                    if int(tnum) in img_I:
                        dcol = dcol_I[np.where(img_I==int(tnum))]    # table has observed shift
                for o in (0,1):
                    for r in specrow_or[o]:
                        target_orw[o,r] = scrunch1d(target_orc[o,r],binedge_orw[o,r]+dcol)
                        var_orw[o,r] = scrunch1d(var_orc[o,r],binedge_orw[o,r]+dcol)
                        badbin_orw[o,r] = scrunch1d(badbin_orc[o,r].astype(float),binedge_orw[o,r]+dcol) > 0.001 
                badbin_orw |= (var_orw == 0)
                badbin_orw |= ((psf_orw*(~badbin_orw)).sum(axis=1)[:,None,:] < psfnormmin)
#                pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True)
#                pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto('badbin_'+tnum+'_orw.fits',clobber=True)
  
            # use master psf shifted in row to allow for guide errors
                pwidth = 2*int(1./psf_orw.max())
                ok_w = ((psf_orw*badbin_orw).sum(axis=1) < 0.03/float(pwidth/2)).all(axis=0)
                crosscor_s = np.zeros(pwidth)
                for s in range(pwidth):
                    crosscor_s[s] = (psf_orw[:,s:s-pwidth]*target_orw[:,pwidth/2:-pwidth/2]*ok_w).sum()
                smax = np.argmax(crosscor_s)
                s_S = np.arange(smax-pwidth/4,smax-pwidth/4+pwidth/2+1)
                polycof = la.lstsq(np.vstack((s_S**2,s_S,np.ones_like(s_S))).T,crosscor_s[s_S])[0]
                pshift = -(-0.5*polycof[1]/polycof[0] - pwidth/2)
                s = int(pshift+pwidth)-pwidth
                sfrac = pshift-s
                psfsh_orw = np.zeros_like(psf_orw)
                outrow = np.arange(max(0,s+1),rows-(1+int(abs(pshift)))+max(0,s+1))
                psfsh_orw[:,outrow] = (1.-sfrac)*psf_orw[:,outrow-s] + sfrac*psf_orw[:,outrow-s-1]
#                pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True)

                wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw]/var_orw[~badbin_orw]
                var_ow = (psfsh_orw*wt_orw*(~badbin_orw)).sum(axis=1)
                badbin_ow = (var_ow == 0)
                var_ow[~badbin_ow] = 1./var_ow[~badbin_ow]
#                pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True)
#                pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True)
#                pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True)

                sci_ow = (target_orw*wt_orw).sum(axis=1)*var_ow

                badlim = 0.20
                psfbadfrac_ow = (psfsh_orw*badbin_orw.astype(int)).sum(axis=1)/psfsh_orw.sum(axis=1)
                badbin_ow |= (psfbadfrac_ow > badlim)

                cdebug = 83
                if debug: np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \
                    wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e")

            # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1)
            # for consistency with other modes
                hduout = pyfits.PrimaryHDU(header=hdulist[0].header)    
                hduout = pyfits.HDUList(hduout)
                header=hdulist['SCI'].header.copy()
                header.update('VAREXT',2)
                header.update('BPMEXT',3)
                header.update('CRVAL1',wedge_w[0]+wbin/2.)
                header.update('CRVAL2',0)
                header.update('CDELT1',wbin)
                header.update('CTYPE1','Angstroms')
            
                hduout.append(pyfits.ImageHDU(data=sci_ow.reshape((2,1,wavs)), header=header, name='SCI'))
                header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT')
                hduout.append(pyfits.ImageHDU(data=var_ow.reshape((2,1,wavs)), header=header, name='VAR'))
                hduout.append(pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape((2,1,wavs)), header=header, name='BPM'))            
            
                hduout.writeto('e'+outfilelist[i],clobber=True,output_verify='warn')
                log.message('Output file '+'e'+outfilelist[i] , with_header=False)
    return
Example #2
0
def specpolcombine(infilelist,debug_output=False):
    """combine stokes files

    Parameters
    ----------
    infile_list: list
       one or more _stokes.fits files

    """
    """
    _b observations
    _w wavelengths in individual observations
    _W wavelengths in combined grid

    """
    obss = len(infilelist)
    obsdict=obslog(infilelist)

 #  construct common wavelength grid _W
    grating_b = obsdict['GRATING']
    grang_b = obsdict['GR-ANGLE']
    artic_b = obsdict['CAMANG']
    dwav_b = np.empty(obss)
    wav0_b = np.empty(obss)
    wavs_b = np.empty(obss)
    stokeslist_sw = []
    varlist_sw = []      
    oklist_sw = []
    for b in range(obss):
        hdul = pyfits.open(infilelist[b])
        dwav_b[b] = float(hdul['SCI'].header['CDELT1'])
        wav0_b[b] = float(hdul['SCI'].header['CRVAL1'])
        wavs_b[b] = int(hdul['SCI'].header['NAXIS1'])
        stokeslist_sw.append(hdul['SCI'].data[:,0,:])
        varlist_sw.append(hdul['VAR'].data[:,0,:])     
        oklist_sw.append(hdul['BPM'].data[:,0,:] == 0)

    dWav = dwav_b.max()
    Wav0 = dWav*(wav0_b.min()//dWav) 
    Wavs = (dWav*((wav0_b + dwav_b*wavs_b).max()//dWav) - Wav0)/dWav
    wav_W = np.arange(Wav0,Wav0+dWav*Wavs,dWav)
    stokess = stokeslist_sw[0].shape[0]
    vars = varlist_sw[0].shape[0]

    stokes_bsW = np.zeros((obss,stokess,Wavs))
    var_bsW = np.zeros((obss,vars,Wavs))       
    ok_bsW = np.zeros((obss,stokess,Wavs)).astype(bool)

 # get data and put on common grid, combining bins if necessary
    for b in range(obss):
        if dwav_b[b] == dWav:
            W0 = (wav0_b[b] - Wav0)/dWav
            stokes_bsW[b,:,W0:W0+wavs_b[b]] = stokeslist_sw[b]
            var_bsW[b,:,W0:W0+wavs_b[b]] = varlist_sw[b]
            ok_bsW[b,:,W0:W0+wavs_b[b]] = oklist_sw[b]
        else:
            wbinedge_W = (wav_W - dWav/2. - (wav0_b[b] - dwav_b[b]/2.))/dwav_b
            for s in range(stokess): 
                stokes_bsW[b,s] = scrunch1d(stokeslist_sw[b][s],wbinedge_W)
                var_bsW[b,s] = scrunch1d(varlist_sw[b][s],wbinedge_W) 
                ok_bsW[b,s] = (scrunch1d((oklist_sw[b][s]).astype(int),wbinedge_W) > 0)
            if (vars>stokess): var_bsW[b,vars] = scrunch1d(var_sw[vars],wbinedge_W) 

    if debug_output:    
        np.savetxt("stokes_bsW.txt",np.vstack((wav_W,stokes_bsW.reshape((6,Wavs)))).T,fmt="%10.3f")

 # correct (unfluxed) intensity for grating efficiency to match observations together
    for b in range(obss):
        greff_W = greff(grating_b[b],grang_b[b],artic_b[b],wav_W) 
        ok_W = (ok_bsW[b].all(axis=0) & (greff_W > 0.))
        stokes_bsW[b][:,ok_W] /= greff_W[ok_W]
        var_bsW[b][:,ok_W] /= greff_W[ok_W]**2

 # normalize at matching wavelengths _w
 # compute ratios at each wavelength, then error-weighted mean of ratio
    ismatch_W = ok_bsW.all(axis=0).all(axis=0)
    normint_bw = stokes_bsW[:,0,ismatch_W]/stokes_bsW[:,0,ismatch_W].mean(axis=0)
    varnorm_bw = var_bsW[:,0,ismatch_W]/stokes_bsW[:,0,ismatch_W].mean(axis=0)**2
    normint_b = (normint_bw/varnorm_bw).sum(axis=1)/(1./varnorm_bw).sum(axis=1)
    print normint_b
    stokes_bsW /= normint_b[:,None,None]
    var_bsW /= normint_b[:,None,None]**2

 # Do error weighted combine of observations
    stokes_sW = np.zeros((stokess,Wavs))
    var_sW = np.zeros((vars,Wavs))

    for b in range(obss):
        ok_W = ok_bsW[b].any(axis=0)
        stokes_sW[:,ok_W] += stokes_bsW[b][:,ok_W]/var_bsW[b][:stokess,ok_W]
        var_sW[:,ok_W] += 1./var_bsW[b][:,ok_W]
    ok_W = (var_sW != 0).all(axis=0)
    ok_sW = np.tile(ok_W,(3,1))
    var_sW[ok_sW] = 1./var_sW[ok_sW]
    stokes_sW[:,ok_W] *= var_sW[:stokess,ok_W]

 # Save result, name formed from unique elements of '_'-separated parts of names
    namepartlist = []
    parts = 100
    for file in infilelist:
        partlist = os.path.basename(file).split('.')[0].split('_')
        parts = min(parts,len(partlist)) 
        namepartlist.append(partlist)
    outfile = ''     
    for part in range(parts):
        outfile+='-'.join(sorted(set(zip(*namepartlist)[part])))+'_'   

    outfile = outfile[:-1]+'.fits'
    print "\n",outfile,"\n"

    hduout = hdul
    for ext in ('SCI','VAR','BPM'):
        hduout[ext].header.update('CDELT1',dWav)
        hduout[ext].header.update('CRVAL1',Wav0)
    hduout['SCI'].data = stokes_sW.astype('float32').reshape((stokess,1,-1))
    hduout['VAR'].data = var_sW.astype('float32').reshape((vars,1,-1))
    hduout['BPM'].data = (~ok_sW).astype('uint8').reshape((stokess,1,-1))
    hduout[0].header.add_history('POLCOMBINE: '+' '.join(infilelist))

    hduout.writeto(outfile,clobber=True,output_verify='warn')
    
    return
Example #3
0
def specpollampextract(infilelist, logfile='salt.log'):

    obsdate=os.path.basename(infilelist[0])[8:16]

    with logging(logfile, debug) as log:
        log.message('Extraction of Lamp Images' , with_header=False)
        obsdict=obslog(infilelist)

        hdu0 =  pyfits.open(infilelist[0])       
        rows,cols = hdu0['SCI'].data.shape[1:3]
        cbin,rbin = np.array(obsdict["CCDSUM"][0].split(" ")).astype(int)
        slitid = obsdict["MASKID"][0]
        lampid = obsdict["LAMPID"][0].strip().upper()
        lam_c = hdu0['WAV'].data[0,rows/2]
        files = len(infilelist)
        outfilelist = infilelist

# sum spectra to find target
        count = 0
        for i in range(files):
            badbin_orc = pyfits.open(outfilelist[i])['BPM'].data.astype(bool)
            if count == 0: 
                count_orc = (~badbin_orc).astype(int)
                image_orc = pyfits.open(outfilelist[i])['SCI'].data*count_orc
                var_orc = pyfits.open(outfilelist[i])['VAR'].data
            else:
                count_orc += (~badbin_orc).astype(int)
                image_orc += pyfits.open(outfilelist[i])['SCI'].data*(~badbin_orc)
                var_orc += pyfits.open(outfilelist[i])['VAR'].data
            count += 1
        if count ==0:
            print 'No valid images'
            exit()

        image_orc[count_orc>0] /= count_orc[count_orc>0]
        badbin_orc = (count_orc==0) | (image_orc==0)
        okbinpol_orc = (count_orc == count) & (image_orc != 0)    # conservative bpm for pol extraction
        var_orc[count_orc>0] /= count_orc[count_orc>0]**2
        wav_orc = pyfits.open(outfilelist[0])['WAV'].data
#        pyfits.PrimaryHDU(image_orc.astype('float32')).writeto('lampsum_orc.fits',clobber=True)            

        lam_m = np.loadtxt(datadir+"wollaston.txt",dtype=float,usecols=(0,))
        rpix_om = np.loadtxt(datadir+"wollaston.txt",dtype=float,unpack=True,usecols=(1,2))

    # trace spectrum, compute spatial profile 
        profile_orc = np.zeros_like(image_orc)
        drow_oc = np.zeros((2,cols))
        expectrow_oc = np.zeros((2,cols),dtype='float32')
        maxrow_oc = np.zeros((2,cols),dtype=int)
        maxval_oc = np.zeros((2,cols),dtype='float32')
        cross_orC = np.zeros((2,rows,2))
        col_cr,row_cr = np.indices(image_orc[0].T.shape)
    # sample cross-dispersion at center and on right (_C) to get offset and tilt
        Collist = [cols/2,0.8*cols]
        for C in (0,1): cross_orC[:,:,C] = np.sum(image_orc[:,:,Collist[C]-cols/16:Collist[C]+cols/16],axis=2)

        drow_oC = np.zeros((2,2))
        trow_o = np.zeros((2),dtype='int')
        okprof_oc = np.zeros((2,cols),dtype='bool')       
        okprof_orc = np.zeros((2,rows,cols),dtype='bool')
        norm_orc = np.zeros((2,rows,cols))
        sig_c = np.zeros((cols))
        sigmin = 20.; drowmax = 8.

        # find spectrum offset and tilt roughly from max of two cross-dispersion samples
        for o in (0,1):
            expectrow_oc[o] = (1-o)*rows + interp1d(lam_m,rpix_om[o],kind='cubic')(lam_c)/rbin
            for C in (0,1):    
                crossmaxval = np.max(cross_orC[o,   \
                            expectrow_oc[o,Collist[C]]-100/rbin:expectrow_oc[o,Collist[C]]+100/rbin,C])
                drow_oC[o,C] = np.where(cross_orC[o,:,C]==crossmaxval)[0][0] - expectrow_oc[o,Collist[C]]
        drow_o = drow_oC[:,0]
        rowtilt = (drow_oC[:,1]-drow_oC[:,0]).mean()/(Collist[1]-Collist[0])
        expectrow_oc += drow_o[:,None] + rowtilt*np.arange(-cols/2,cols/2)

        # get trace by finding max in narrow curved aperture and smoothing it
        for o in (0,1):
            row_c = expectrow_oc[o].astype(int)
            aperture_cr = ((row_cr-row_c[:,None])>=-20/rbin) & ((row_cr-row_c[:,None])<=20/rbin)            
            maxrow_oc[o] = np.argmax(image_orc[o].T[aperture_cr].reshape((cols,-1)),axis=1) + row_c - 20/rbin
            maxval_oc[o] = image_orc[o,maxrow_oc[o]].diagonal()
            trow_o[o] = maxrow_oc[o,cols/2]

        # mark as bad where signal drops too low or position is off
            median_c = np.median(image_orc[o].T[aperture_cr].reshape((cols,-1)),axis=1)
            var_c = np.mean(var_orc[o].T[aperture_cr].reshape((cols,-1)),axis=1)
            sig_c[var_c>0] = (maxval_oc[o] - median_c)[var_c>0]/np.sqrt(var_c[var_c>0])
            drow1_c = maxrow_oc[o] -expectrow_oc[o]
            okprof_oc[o] = (sig_c > sigmin) & (abs(drow1_c - np.median(drow1_c)) < drowmax)

        # divide out spectrum (allowing for spectral curvature and tilt) to make spatial profile
            drow2_c = np.polyval(np.polyfit(np.where(okprof_oc[o])[0],drow1_c[okprof_oc[o]],3),(range(cols)))
            okprof_orc[o] = (np.abs(drow2_c - drow1_c) < 3) & okprof_oc[o][None,:]
            drow_oc[o] = -(expectrow_oc[o] - expectrow_oc[o,cols/2] + drow2_c -drow2_c[cols/2])
            for r in range(rows):
                norm_orc[o,r] = interp1d(wav_orc[o,trow_o[o],okprof_oc[o]],maxval_oc[o,okprof_oc[o]], \
                    bounds_error = False, fill_value=0.)(wav_orc[o,r])

        log.message('Image tilt: %8.1f arcmin' % (60.*np.degrees(rowtilt*rbin/cbin)), with_header=False)        
        log.message('Target offset:     O    %4i     E    %4i' % tuple(drow_o), with_header=False)
        log.message('Target center row: O    %4i     E    %4i' % tuple(trow_o), with_header=False)

        okprof_orc &= (norm_orc != 0.)
        profile_orc[okprof_orc] = image_orc[okprof_orc]/norm_orc[okprof_orc]
        var_orc[okprof_orc] = var_orc[okprof_orc]/norm_orc[okprof_orc]**2
#        pyfits.PrimaryHDU(norm_rc.astype('float32')).writeto('norm_rc.fits',clobber=True)     
#        pyfits.PrimaryHDU(okprof_oc.astype('uint8')).writeto('okprof_oc.fits',clobber=True) 
        okprof_c = okprof_oc.all(axis=0)

    # Sample the normalized row profile at 5 places (_C)
        Cols = 5
        dcols = 64/cbin
        Collist = [np.argmax(okprof_c)+dcols, 0, cols/2, 0, cols-np.argmax(okprof_c[::-1])-dcols]
        for C in (1,3): Collist[C] = 0.5*(Collist[C-1] + Collist[C+1])
        Collist = map(int,Collist)
        profile_Cor = np.zeros((Cols,2,rows))

    # Using profile at center, find, mask off fov edge, including possible beam overlap
        edgerow_do = np.zeros((2,2),dtype=int)
        badrow_or = np.zeros((2,rows),dtype=bool)
        axisrow_o = np.zeros(2)
        maxoverlaprows = 34/rbin
        profile_Cor[Cols/2] = np.median(profile_orc[:,:,cols/2-dcols:cols/2+dcols],axis=2)                       
        for d,o in np.ndindex(2,2):                         # _d = (0,1) = (bottom,top)
            row_y = np.where((d==1) ^ (np.arange(rows) < trow_o[o]))[0][::2*d-1]
            edgeval = np.median(profile_Cor[Cols/2,o,row_y],axis=-1)        
            hist,bin = np.histogram(profile_Cor[Cols/2,o,row_y],bins=32,range=(0,edgeval))
            histarg = 32 - np.argmax(hist[::-1]<3)      # edge: <3 in hist in decreasing dirn
            edgeval = bin[histarg]
            edgerow_do[d,o] = trow_o[o] + (2*d-1)*(np.argmax(profile_Cor[Cols/2,o,row_y] <= edgeval))
            axisrow_o[o] += edgerow_do[d,o]
            edgerow_do[d,o] = np.clip(edgerow_do[d,o],maxoverlaprows,rows-maxoverlaprows)
            badrow_or[o] |= ((d==1) ^ (np.arange(rows) < (edgerow_do[d,o]+d)))       
        axisrow_o /= 2.

    # Row profile sample, now background subtracted
        profile_orc[okprof_orc] = ((image_orc-np.median(image_orc,axis=1)[:,None,:])[okprof_orc]) \
                                    /(norm_orc-np.median(image_orc,axis=1)[:,None,:])[okprof_orc]
#        pyfits.PrimaryHDU(profile_orc.astype('float32')).writeto('profile_orc.fits',clobber=True)
        for C in range(Cols): 
            okcol_c = (profile_orc.sum(axis=0).sum(axis=0)>0) & \
                    (np.abs(np.arange(cols)-Collist[C])<dcols)
            Collist[C] = np.where(okcol_c)[0].mean()
            profile_Cor[C] = np.median(profile_orc[:,:,okcol_c],axis=2)
#        print 5*"%7.1f " % tuple(Collist)
#        pyfits.PrimaryHDU(okprof_orc.astype('uint8')).writeto('okprof_orc.fits',clobber=True) 
        np.savetxt("profile_oCr.txt",profile_Cor.transpose((1,0,2)).reshape((2*Cols,-1)).T,fmt="%10.6f")

    # find edge of target slit, and neighboring slits, if multiple slits
    # background picked small enough to miss neighbors in all samples, but matched E and O
        isneighbor_d = np.zeros((2),dtype='bool')
        edgeoff_doC = np.zeros((2,2,Cols))
        for o in (0,1):
            plim = 0.35                         # slit finder
            bkgsafe = 0.90                      # avoiding next slit
            for C in range(Cols):
                leftrow_s = np.flatnonzero((profile_Cor[C,o,:-1] < plim) & (profile_Cor[C,o,1:] > plim))
                rightrow_s = np.flatnonzero((profile_Cor[C,o,leftrow_s[0]:-1] > plim) \
                            & (profile_Cor[C,o,leftrow_s[0]+1:] < plim)) + leftrow_s[0]
                slits = rightrow_s.shape[0]     # eliminate spikes:
                slitrow_s = 0.5*(rightrow_s + leftrow_s[:slits])[(rightrow_s-leftrow_s[:slits]) > 2]
                slits = slitrow_s.shape[0]
                targetslit = np.where(abs(maxrow_oc[o,Collist[C]] - slitrow_s) < 6)[0][0]
                if targetslit > 0:
                    edgeoff_doC[0,o,C] = maxrow_oc[o,Collist[C]] - slitrow_s[targetslit-1:targetslit+1].mean()
                    isneighbor_d[0] |= True
                if targetslit < slits-1:
                    edgeoff_doC[1,o,C] = slitrow_s[targetslit:targetslit+2].mean() - maxrow_oc[o,Collist[C]]
                    isneighbor_d[1] |= True

        for d in (0,1):
            if isneighbor_d[d]: edgerow_do[d] = trow_o + bkgsafe*(2*d-1)*edgeoff_doC[d].min()

        edgerow_doc = (edgerow_do[:,:,None] - drow_oc[None,:,:]).astype(int)
        bkgrows_do = ((trow_o - edgerow_do)/2.).astype(int)
        bkgrow_doc = edgerow_doc + bkgrows_do[:,:,None]/2
        isbkg_dorc = (((np.arange(rows)[:,None] - edgerow_doc[:,:,None,:]) * \
              (np.arange(rows)[:,None] - edgerow_doc[:,:,None,:] - bkgrows_do[:,:,None,None])) < 0)
        istarg_orc = ((np.arange(rows)[:,None] - edgerow_doc[:,:,None,:]).prod(axis=0) < 0)
        istarg_orc &= ~isbkg_dorc.any(axis=0)
        okbinpol_orc &= okprof_oc[:,None,:]

#        pyfits.PrimaryHDU(image_orc*(isbkg_dorc.sum(axis=0)).astype('float32')).writeto('lampbkg_orc.fits',clobber=True)  
#        pyfits.PrimaryHDU(istarg_orc.astype('uint8')).writeto('istarg_orc.fits',clobber=True)

        log.message('Bottom, top row:   O %4i %4i   E %4i %4i \n' \
                % tuple(edgerow_do.T.flatten()), with_header=False)

    # background-subtract and extract spectra

    # set up scrunch table and badpixels in wavelength space
        wbin = wav_orc[0,rows/2,cols/2]-wav_orc[0,rows/2,cols/2-1] 
        wbin = float(int(wbin/0.75))
        wmin,wmax = wav_orc.min(axis=2).max(),wav_orc.max(axis=2).min()
        wedgemin = wbin*int(wmin/wbin+0.5) + wbin/2.
        wedgemax = wbin*int(wmax/wbin-0.5) + wbin/2.
        wedge_w = np.arange(wedgemin,wedgemax+wbin,wbin)
        wavs = wedge_w.shape[0] - 1
        badbin_orc = ~okbinpol_orc 
        binedge_orw = np.zeros((2,rows,wavs+1))
        badbin_orw = np.ones((2,rows,wavs),dtype=bool); nottarg_orw = np.ones_like(badbin_orw)
        for o in (0,1):
            for r in range(edgerow_doc[0,o].min(),edgerow_doc[1,o].max()):
                binedge_orw[o,r] = interp1d(wav_orc[o,r],np.arange(cols))(wedge_w)
                badbin_orw[o,r] = (scrunch1d(badbin_orc[o,r].astype(int),binedge_orw[o,r]) > 0.)
                nottarg_orw[o,r] = (scrunch1d((~istarg_orc[o,r]).astype(int),binedge_orw[o,r]) > 0.)                
        okbin_orw = ~badbin_orw
        istarg_orw = ~nottarg_orw

    # wavelengths with bad pixels in targ area are flagged as bad
        badcol_ow = (istarg_orw & ~okbin_orw).any(axis=1)            
        for o in (0,1): okbin_orw[o] &= ~badcol_ow[o]

        for i in range(files):
            imageno = int(os.path.basename(outfilelist[i]).split('.')[0][-4:])
            hdulist = pyfits.open(outfilelist[i])
            sci_orc = hdulist['sci'].data
            var_orc = hdulist['var'].data

        # make background continuum image, linearly interpolated in row
            bkg_doc = np.zeros((2,2,cols))
            for d,o in np.ndindex(2,2):
                bkg_doc[d,o] = np.median(sci_orc[o].T[isbkg_dorc[d,o].T].reshape((cols,-1)),axis=1)         
            bkgslp_oc = (bkg_doc[1] - bkg_doc[0])/(bkgrow_doc[1] - bkgrow_doc[0])
            bkgbase_oc = (bkg_doc[1] + bkg_doc[0])/2. - bkgslp_oc*(bkgrow_doc[1] + bkgrow_doc[0])/2.
            bkg_orc = bkgbase_oc[:,None,:] + bkgslp_oc[:,None,:]*np.arange(rows)[:,None]
            target_orc = sci_orc-bkg_orc             
#            np.savetxt('bkg.txt',np.vstack((bkg_doc.reshape((4,-1)),bkgslp_oc,bkgbase_oc)).T,fmt="%11.4f")
#            pyfits.PrimaryHDU(bkg_orc.astype('float32')).writeto('bkg_orc_'+str(imageno)+'.fits',clobber=True)   
#            pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_orc_'+str(imageno)+'.fits',clobber=True)

        # extract spectrum 
            target_orw = np.zeros((2,rows,wavs));   var_orw = np.zeros_like(target_orw)
            for o in (0,1):
                for r in range(edgerow_doc[0,o].min(),edgerow_doc[1,o].max()):
                    target_orw[o,r] = scrunch1d(target_orc[o,r],binedge_orw[o,r])
                    var_orw[o,r] = scrunch1d(var_orc[o,r],binedge_orw[o,r])
  
        # columns with negative extracted intensity are marked as bad
            sci_ow = (target_orw*okbin_orw).sum(axis=1)
#            pyfits.PrimaryHDU((target_orw*okbin_orw).astype('float32')).writeto('sci_orw.fits',clobber=True)
            var_ow = (var_orw*okbin_orw).sum(axis=1)    
            okbin_ow = (okbin_orw.any(axis=1) & (sci_ow > 0.))
            bpm_ow = (~okbin_ow).astype('uint8')

        # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1)
        # for consistency with other modes
            hduout = pyfits.PrimaryHDU(header=hdulist[0].header)    
            hduout = pyfits.HDUList(hduout)
            hduout[0].header.update('OBJECT',lampid)
            header=hdulist['SCI'].header.copy()
            header.update('VAREXT',2)
            header.update('BPMEXT',3)
            header.update('CRVAL1',wedge_w[0]+wbin/2.)
            header.update('CRVAL2',0)
            header.update('CDELT1',wbin)
            header.update('CTYPE1','Angstroms')
            
            hduout.append(pyfits.ImageHDU(data=sci_ow.reshape((2,1,wavs)).astype('float32'), header=header, name='SCI'))
            header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT')
            hduout.append(pyfits.ImageHDU(data=var_ow.reshape((2,1,wavs)).astype('float32'), header=header, name='VAR'))
            hduout.append(pyfits.ImageHDU(data=bpm_ow.reshape((2,1,wavs)), header=header, name='BPM'))            
            
            hduout.writeto('e'+outfilelist[i],clobber=True,output_verify='warn')
            log.message('Output file '+'e'+outfilelist[i] , with_header=False)
      
    return
Example #4
0
def specpolflux(infilelist, logfile='salt.log', debug=False):
    """Finds/ produces fluxcal tables, and applies to listed _stokes.fits

    Parameters
    ----------
    infilelist: list
        filename or list of finalstokes filenames 

    logfile: str
        Name of file for logging

    """
    # Info on CAL_SPST files:
    calspstname_s,calspstfile_s=np.loadtxt(datadir+"spst_filenames.txt",    \
        dtype=str,usecols=(0,1),unpack=True)
    namelen = max(map(len, calspstname_s))

    confitemlist = ['GRATING', 'GR-ANGLE', 'CAMANG']

    # Find fluxdb files already in this directory
    fluxdbtab = Table(names=['no','OBJECT']+confitemlist,        \
               dtype=[int,'S'+str(namelen),'S6',float,float])
    fluxdblist = sorted(glob.glob('fluxdb*.txt'))
    olddbentries = len(fluxdblist)
    for e, dbfile in enumerate(fluxdblist):
        confdat_d = np.genfromtxt(dbfile,usecols=2,comments='?', \
                max_rows=4,dtype=str)
        fluxdbtab.add_row(np.insert(confdat_d, 0, e + 1))
    if olddbentries:
        printstdlog('\n    Existing Fluxdb:\n' + str(fluxdbtab), logfile)

# Create new fluxdb files if new data present
    eclist = sorted(glob.glob('ec*.fits'))
    obs_i, config_i, obstab, configtab = configmap(eclist, confitemlist)
    obss = len(obstab)

    for obs in range(obss):
        object, config = obstab[obs]

        # Find spst standards
        i_j = np.where(obs_i == obs)[0]
        if object not in calspstname_s: continue

        newfluxdbtab = Table( \
            names=['no','OBJECT']+confitemlist,dtype=[int,'S'+str(namelen),'S6',float,float])
        newfluxdbtab.add_row([len(fluxdbtab) + 1, object] +
                             list(configtab[obs]))
        if Table(newfluxdbtab.columns[1:]) in Table(fluxdbtab.columns[1:]):
            continue

        # It's a new flux standard, process it
        s = np.where(object == calspstname_s)[0][0]

        spstfile = iraf.osfn("pysalt$data/standards/spectroscopic/" +
                             calspstfile_s[s])
        wav_f, ABmag_f = np.loadtxt(spstfile, usecols=(0, 1), unpack=True)
        flam_f = 10.**(-0.4 * (ABmag_f + 2.402)) / (wav_f)**2

        wbinedge_f = (wav_f[1:] + wav_f[:-1]) / 2.
        wbinedge_f = np.insert(wbinedge_f, 0, 2. * wav_f[0] - wbinedge_f[0])
        wbinedge_f = np.append(wbinedge_f, 2. * wav_f[-1] - wbinedge_f[-1])

        # average multiple samples,E,O
        hdul = pyfits.open(eclist[i_j[0]])
        grating, grang, artic = configtab[config]
        wav0 = hdul['SCI'].header['CRVAL1']
        dwav = hdul['SCI'].header['CDELT1']
        wavs = hdul['SCI'].data.shape[-1]
        phot_w = np.zeros(wavs)
        count_w = np.zeros(wavs)
        exptime = 0.
        samples = i_j.shape[0]
        for j in range(samples):
            hdul = pyfits.open(eclist[i_j[j]])
            phot_w += hdul['SCI'].data.reshape((2, -1)).sum(axis=0)
            count_w += (hdul['BPM'].data.reshape((2, -1)) == 0).sum(axis=0)
            exptime += hdul['SCI'].header['EXPTIME']
        int_w = phot_w / (2 * samples * exptime)
        ok_w = (count_w == 2 * samples)
        # scrunch onto flux star grid
        wav_w = np.arange(wav0, wav0 + wavs * dwav, dwav)
        binedge_f = (wbinedge_f - (wav_w[0] - dwav / 2.)) / dwav
        int_f = scrunch1d(int_w, binedge_f)
        ok_f = (scrunch1d((~ok_w).astype(int),
                          binedge_f) == 0)  # good flux bins have no bad wavs
        ok_f &= ((wav_f > wav_w[0]) & (wav_f < wav_w[-1]))

        # save flux/intensity mean, extrapolate to edge
        fluxcal_F = flam_f[ok_f] / int_f[ok_f]
        wav_F = wav_f[ok_f]
        fluxcalslope_F = (fluxcal_F[1:] - fluxcal_F[:-1]) / (wav_F[1:] -
                                                             wav_F[:-1])
        wav_F = np.insert(wav_F, 0, wav_w[0])
        wav_F = np.append(wav_F, wav_w[-1])
        fluxcal_F = np.insert(
            fluxcal_F, 0,
            fluxcal_F[0] - fluxcalslope_F[0] * (wav_F[1] - wav_F[0]))
        fluxcal_F = np.append(
            fluxcal_F,
            fluxcal_F[-1] + fluxcalslope_F[-1] * (wav_F[-1] - wav_F[-2]))
        fluxdbfile = 'fluxdb_' + calspstname_s[s] + '_c' + str(config) + '.txt'
        hdr = ("OBJECT: "+object+"\nGRATING: %s \nARTIC: %s \nGRANG: %s" \
            % (grating,grang,artic))
        np.savetxt(fluxdbfile,
                   np.vstack((wav_F, fluxcal_F)).T,
                   fmt="%8.2f %12.3e ",
                   header=hdr)
        fluxdbtab.add_row(list(newfluxdbtab[0]))
        fluxdblist.append(fluxdbfile)
    dbentries = len(fluxdbtab)
    if (dbentries > olddbentries):
        printstdlog(
            '\n    New Fluxdb entries:\n' + str(fluxdbtab[olddbentries:]),
            logfile)

# do fluxcal on listed stokes.fits files
    if len(fluxdbtab) == 0:
        printstdlog('\n    No fluxdb data available', logfile)
        return

    if (type(infilelist) is str): infilelist = [
            infilelist,
    ]

    obs_i, config_i, obstab, configtab = configmap(infilelist, confitemlist)
    obss = len(obstab)
    fluxdbconftab = fluxdbtab[confitemlist]

    cunitfluxed = 'erg/s/cm^2/Ang'  # header keyword CUNIT3 if data is already fluxed
    for obs in range(obss):
        iobs = np.where(obs_i == obs)[0][0]
        hdul = pyfits.open(infilelist[iobs])
        if 'CUNIT3' in hdul['SCI'].header:
            if hdul['SCI'].header['CUNIT3'].replace(' ', '') == cunitfluxed:
                printstdlog(
                    ('\n    %s already flux calibrated' % infilelist[iobs]),
                    logfile)
                continue
        fluxdbentry_e = np.where(configtab[obs] == fluxdbconftab)[0]
        if fluxdbentry_e.shape[0] == 0:
            printstdlog(('\n    No flux calibration available for  %s' %
                         infilelist[iobs]), logfile)
            continue

        wav0 = hdul['SCI'].header['CRVAL1']
        dwav = hdul['SCI'].header['CDELT1']
        wavs = hdul['SCI'].data.shape[-1]
        wav_w = np.arange(wav0, wav0 + wavs * dwav, dwav)
        fluxcal_w = np.zeros(wavs)

        # average all applicable fluxdb entries after interpolation onto wavelength grid
        fluxcalhistory = "FluxCal: "
        fluxcallog = fluxcalhistory
        for e in fluxdbentry_e:
            wav_F,fluxcal_F = np.loadtxt(fluxdblist[e],skiprows=4,comments='?', \
                dtype=float,unpack=True)
            fluxcal_w += interp1d(wav_F, fluxcal_F, bounds_error=False)(wav_w)
            fluxcalhistory += " " + fluxdblist[e]
            fluxcallog += "  " + str(e + 1) + " " + fluxdblist[e]
        fluxcal_w /= fluxdbentry_e.shape[0]
        fluxcal_w = (np.nan_to_num(fluxcal_w))
        hdul['SCI'].data *= fluxcal_w
        hdul['SCI'].header['CUNIT3'] = cunitfluxed
        hdul['VAR'].data *= fluxcal_w**2
        hdul['VAR'].header['CUNIT3'] = cunitfluxed
        hdul[0].header.add_history(fluxcalhistory)
        hdul.writeto(infilelist[iobs], clobber=True)

        printstdlog((('\n    %s ' + fluxcallog) % infilelist[iobs]), logfile)

    return
Example #5
0
def specpolflux(infilelist, logfile='salt.log', debug=False):
    """Finds/ produces fluxcal tables, and applies to listed _stokes.fits

    Parameters
    ----------
    infilelist: list
        filename or list of finalstokes filenames 

    logfile: str
        Name of file for logging

    """
    if len(glob.glob('specpol*.log')): logfile = glob.glob('specpol*.log')[0]
    fluxcal_w = np.zeros(0)
    # Info on CAL_SPST files:
    calspstname_s,calspstfile_s=np.loadtxt(datadir+"spst_filenames.txt",    \
        dtype=str,usecols=(0,1),unpack=True)
    namelen = max(map(len, calspstname_s))

    confitemList = ['DATE-OBS', 'GRATING', 'GR-ANGLE', 'CAMANG']

    # Find fluxdb files already in this directory
    fluxdbtab = Table(names=['no','OBJECT']+confitemList,        \
               dtype=[int,'S'+str(namelen),'S10','S6',float,float])
    fluxdblist = sorted(glob.glob('fluxdb*.txt'))
    goodfluxdblist = copy.copy(fluxdblist)

    for e, dbfile in enumerate(fluxdblist):
        if (open(dbfile).read().count("#") < 5):
            printstdlog('\n    Invalid flux file ' + dbfile + ', not used',
                        logfile)
            goodfluxdblist.remove(dbfile)
            continue
        confdat_d = np.genfromtxt(dbfile,usecols=2,comments='?', \
                max_rows=5,dtype=str)
        fluxdbtab.add_row(np.insert(confdat_d, 0, e + 1))

    fluxdblist = goodfluxdblist
    olddbentries = len(fluxdblist)
    if olddbentries:
        printstdlog('\n    Existing Fluxdb:\n' + str(fluxdbtab), logfile)

# Create new fluxdb files if new data present
    eclist = sorted(glob.glob('ec*.fits'))
    obss = 0
    if len(eclist):
        obs_i, config_i, obstab, configtab = configmap(eclist, confitemList)
        obss = len(obstab)

    for obs in range(obss):
        object, config = obstab[obs]

        # Find spst standards
        i_j = np.where(obs_i == obs)[0]
        if object not in calspstname_s: continue

        newfluxdbtab = Table( \
            names=['no','OBJECT']+confitemList,dtype=[int,'S'+str(namelen),'S10','S6',float,float])
        newfluxdbtab.add_row([len(fluxdbtab) + 1, object] +
                             list(configtab[obs]))
        if Table(newfluxdbtab.columns[1:]) in Table(fluxdbtab.columns[1:]):
            continue

        # It's a new flux standard, process it
        printstdlog('\n    New Fluxdb entry:\n' + str(newfluxdbtab), logfile)
        s = np.where(object == calspstname_s)[0][0]
        spstfile = iraf.osfn("pysalt$data/standards/spectroscopic/" +
                             calspstfile_s[s])
        wav_f, ABmag_f = np.loadtxt(spstfile, usecols=(0, 1), unpack=True)
        wbinedge_f = (wav_f[1:] + wav_f[:-1]) / 2.
        wbinedge_f = np.insert(wbinedge_f, 0, 2. * wav_f[0] - wbinedge_f[0])
        wbinedge_f = np.append(wbinedge_f, 2. * wav_f[-1] - wbinedge_f[-1])
        flam_f = 10.**(-0.4 * (ABmag_f + 2.402)) / (wav_f)**2

        # for HST standards, scrunch down to 50A bins
        if (wav_f[0] < 3000.):
            wbinedge_F = np.arange(3000., 11000., 50.)
            binedge_F = interp1d(wbinedge_f,
                                 np.arange(wbinedge_f.shape[0]))(wbinedge_F)
            flam_F = scrunch1d(flam_f, binedge_F) / np.diff(
                binedge_F)  # mean over new flux bin
            wav_f = (wbinedge_F[:-1] + wbinedge_F[1:]) / 2.
            flam_f = flam_F
            wbinedge_f = wbinedge_F

    # average multiple samples,E,O
        hdul = pyfits.open(eclist[i_j[0]])
        dateobs, grating, grang, artic = configtab[config]
        wav0 = hdul['SCI'].header['CRVAL1']
        dwav = hdul['SCI'].header['CDELT1']
        wavs = hdul['SCI'].data.shape[-1]
        phot_w = np.zeros(wavs)
        count_w = np.zeros(wavs)
        exptime = 0.
        samples = i_j.shape[0]
        for j in range(samples):
            hdul = pyfits.open(eclist[i_j[j]])
            phot_w += hdul['SCI'].data.reshape((2, -1)).sum(axis=0)
            count_w += (hdul['BPM'].data.reshape((2, -1)) == 0).sum(axis=0)
            exptime += hdul['SCI'].header['EXPTIME']
        int_w = phot_w / exptime  # phot/sec/bin, E+O sum
        ok_w = (count_w == 2 * samples)

        # check for gain corrections. BPM==2 marks internal ccd amp intersections
        aw_pA = np.array(
            np.where(hdul['BPM'].data.reshape((2, -1)) == 2))[1].reshape(
                (2, -1))
        awmin_A, aw_A, awmax_A = (aw_pA.min(axis=0), aw_pA.mean(axis=0),
                                  aw_pA.max(axis=0))
        wloList = [
            0, awmax_A[0] + 1, aw_A[[0, 1]].mean(), awmax_A[1] + 1,
            aw_A[[1, 2]].mean(), awmax_A[2] + 1
        ]
        whiList = [
            awmin_A[0] - 1, aw_A[[0, 1]].mean(), awmin_A[1] - 1,
            aw_A[[1, 2]].mean(), awmin_A[2] - 1, wavs
        ]
        wList = [
            0, aw_A[0], aw_A[[0, 1]].mean(), aw_A[1], aw_A[[1, 2]].mean(),
            aw_A[2], wavs
        ]
        photedge_da = np.zeros((2, 6))
        for d, a in np.ndindex(2, 6):
            w1 = wloList[a] + d * (whiList[a] - wloList[a]) * 2 / 3
            w2 = whiList[a] - (1 - d) * (whiList[a] - wloList[a]) * 2 / 3
            use_w = (ok_w & (np.arange(wavs) >= w1) & (np.arange(wavs) <= w2))
            cof_c = np.polyfit(np.arange(wavs)[use_w], phot_w[use_w], 1)
            if debug: print d, a, ('%8.2f %8.0f' % tuple(cof_c))
            photedge_da[d, a] = np.polyval(cof_c, wList[a + d])
        photrat_A = photedge_da[0, 1:] / photedge_da[1, :-1]
        photrat_a = np.insert(np.cumprod(photrat_A), 0, 1.)
        historyDict = dict(
            [line.split(' ', 1) for line in hdul['SCI'].header['HISTORY']])
        if historyDict.has_key('GainCorrection:'):
            printstdlog(
                ('\n    Gain cors : ' + historyDict['GainCorrection:']),
                logfile)
        else:
            printstdlog(('\n    no gain correction'), logfile)
        printstdlog(
            ('    Gain Ratio:          ' + 6 * '%6.4f ' % tuple(photrat_a)),
            logfile)

        # scrunch onto flux star grid
        wav_w = np.arange(wav0, wav0 + wavs * dwav, dwav)
        binedge_f = (wbinedge_f - (wav_w[0] - dwav / 2.)) / dwav
        int_f = scrunch1d(int_w, binedge_f) / np.diff(
            binedge_f)  # mean of int_w over flux bin

        ok_f = (scrunch1d((~ok_w).astype(int),
                          binedge_f) == 0)  # good flux bins have no bad wavs
        ok_f &= ((wav_f > wav_w[0]) & (wav_f < wav_w[-1]))

        # save flux/intensity mean over flux standard bin, extrapolate to edge
        fluxcal_F = flam_f[ok_f] / int_f[ok_f]
        wav_F = wav_f[ok_f]
        fluxcalslope_F = (fluxcal_F[1:] - fluxcal_F[:-1]) / (wav_F[1:] -
                                                             wav_F[:-1])
        wav_F = np.insert(wav_F, 0, wav_w[0])
        wav_F = np.append(wav_F, wav_w[-1])
        fluxcal_F = np.insert(
            fluxcal_F, 0,
            fluxcal_F[0] - fluxcalslope_F[0] * (wav_F[1] - wav_F[0]))
        fluxcal_F = np.append(
            fluxcal_F,
            fluxcal_F[-1] + fluxcalslope_F[-1] * (wav_F[-1] - wav_F[-2]))
        fluxdbfile = 'fluxdb_' + calspstname_s[s] + '_c' + str(config) + '.txt'
        hdr = ("OBJECT: "+object+"\nDATEOBS: %s \nGRATING: %s \nARTIC: %s \nGRANG: %s" \
            % (dateobs,grating,grang,artic))
        np.savetxt(fluxdbfile,
                   np.vstack((wav_F, fluxcal_F)).T,
                   fmt="%8.2f %12.3e ",
                   header=hdr)
        fluxdbtab.add_row(list(newfluxdbtab[0]))
        fluxdblist.append(fluxdbfile)
    dbentries = len(fluxdbtab)

    # do fluxcal on listed stokes.fits files
    if len(fluxdbtab) == 0:
        printstdlog('\n    No fluxdb data available', logfile)
        return fluxcal_w

    if (type(infilelist) is str): infilelist = [
            infilelist,
    ]
    if len(infilelist) == 0:
        print "No files to calibrate"
        exit()

    obs_i, config_i, obstab, configtab = configmap(infilelist, confitemList)
    obss = len(obstab)
    fluxdbconftab = fluxdbtab[confitemList]

    cunitfluxed = 'erg/s/cm^2/Ang'  # header keyword CUNIT3 if data is already fluxed
    for obs in range(obss):
        iobs = np.where(obs_i == obs)[0][0]
        hdul = pyfits.open(infilelist[iobs])
        if 'CUNIT3' in hdul['SCI'].header:
            if hdul['SCI'].header['CUNIT3'].replace(' ', '') == cunitfluxed:
                printstdlog(
                    ('\n    %s already flux calibrated' % infilelist[iobs]),
                    logfile)
                continue

        fluxdbentry_e = []
        for e in range(len(fluxdbconftab)):
            if ((fluxdbconftab[e]['GRATING']==configtab[obs]['GRATING']) &  \
                (fluxdbconftab[e]['CAMANG']==configtab[obs]['CAMANG'])  &  \
                (abs(fluxdbconftab[e]['GR-ANGLE']-configtab[obs]['GR-ANGLE']) < 0.1)):
                fluxdbentry_e.append(e)
        if len(fluxdbentry_e) == 0:
            printstdlog(('\n    No flux calibration available for  %s' %
                         infilelist[iobs]), logfile)
            continue

        wav0 = hdul['SCI'].header['CRVAL1']
        dwav = hdul['SCI'].header['CDELT1']
        wavs = hdul['SCI'].data.shape[-1]
        exptime = hdul['SCI'].header['EXPTIME']
        wav_w = np.arange(wav0, wav0 + wavs * dwav, dwav)
        fluxcal_w = np.zeros(wavs)

        # average all applicable fluxdb entries after interpolation onto wavelength grid
        # if necessary, block average onto ~50Ang grid, then average onto 50 Ang grid
        # interpolate onto configuration for fluxcal
        fluxcallog = ''
        for e in fluxdbentry_e:
            wav_F,fluxcal_F = np.loadtxt(fluxdblist[e],skiprows=5,comments='?', \
                dtype=float,unpack=True)
            fluxcal_w += interp1d(wav_F, fluxcal_F, bounds_error=False)(wav_w)
            hdul[0].header.add_history("FluxCal: " +
                                       fluxdbconftab[e]["DATE-OBS"] + ' ' +
                                       fluxdblist[e])
            fluxcallog += ('\n    ' + str(e + 1) + ' ' +
                           fluxdbconftab[e]["DATE-OBS"] + ' ' + fluxdblist[e])
        fluxcal_w /= len(fluxdbentry_e)
        fluxcal_w = (np.nan_to_num(fluxcal_w)) / exptime
        hdul['SCI'].data *= fluxcal_w
        hdul['SCI'].header['CUNIT3'] = cunitfluxed
        hdul['VAR'].data *= fluxcal_w**2
        hdul['VAR'].header['CUNIT3'] = cunitfluxed
        hdul['COV'].data *= fluxcal_w**2
        hdul['COV'].header['CUNIT3'] = cunitfluxed
        hdul['BPM'].data = ((hdul['BPM'].data > 0) |
                            (fluxcal_w == 0.)).astype('uint8')
        hdul.writeto(infilelist[iobs], overwrite=True)

        printstdlog((('\n    %s Fluxcal:' + fluxcallog) % infilelist[iobs]),
                    logfile)

    return fluxcal_w
Example #6
0
def specpolextract(infilelist, logfile='salt.log', debug=False):
    """Produce a 1-D extract spectra for the O and E beams

    This also cleans the 2-D spectra of a number of artifacts, removes the background, accounts for small 
        spatial shifts in the observation, and resamples the data into a wavelength grid

    Parameters
    ----------
    infile_list: list
        List of filenames that include an extracted spectra

    logfile: str
        Name of file for logging


    """

    with logging(logfile, debug) as log:

        config_dict = list_configurations(infilelist, log)
        config_count = 0

        for config in config_dict:
            outfilelist = config_dict[config]['object']
            outfiles = len(outfilelist)
            obs_dict = obslog(outfilelist)
            hdu0 = pyfits.open(outfilelist[0])
            rows, cols = hdu0['SCI'].data.shape[1:3]
            cbin, rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int)
            object_name = hdu0[0].header['OBJECT']
            log.message(
                '\nExtract: {3}  Grating {0} Grang {1:6.2f}  Artic {2:6.2f}'.
                format(config[0], config[1], config[2], object_name))
            log.message(
                ' Images: ' +
                ' '.join([str(image_number(img)) for img in outfilelist]),
                with_header=False)

            # special version for lamp data
            # this is now removed and will not be part of this code
            #object = obs_dict["OBJECT"][0].strip().upper()
            #ampid = obs_dict["LAMPID"][0].strip().upper()
            #f ((object != "ARC") & (lampid != "NONE")) :
            #   specpollampextract(outfilelist, logfile=logfile)
            #   continue

            # sum spectra to find target, background artifacts, and estimate sky flat and psf functions
            count = 0
            for i in range(outfiles):
                badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0
                if count == 0:
                    count_orc = (~badbin_orc).astype(int)
                    image_orc = pyfits.open(
                        outfilelist[i])['SCI'].data * count_orc
                    var_orc = pyfits.open(
                        outfilelist[i])['VAR'].data * count_orc
                else:
                    count_orc += (~badbin_orc).astype(int)
                    image_orc += pyfits.open(
                        outfilelist[i])['SCI'].data * (~badbin_orc).astype(int)
                    var_orc += pyfits.open(
                        outfilelist[i])['VAR'].data * (~badbin_orc).astype(int)
                count += 1
            if count == 0:
                print 'No valid images'
                continue
            image_orc[count_orc > 0] /= count_orc[count_orc > 0]
            badbinall_orc = (count_orc == 0) | (image_orc == 0
                                                )  # bin is bad in all images
            badbinone_orc = (count_orc < count) | (
                image_orc == 0)  # bin is bad in at least one image
            var_orc[count_orc > 0] /= (count_orc[count_orc > 0])**2

            wav_orc = pyfits.open(outfilelist[0])['WAV'].data
            slitid = obs_dict["MASKID"][0]
            okwav_oc = ~((wav_orc == 0).all(axis=1))

            obsname = object_name + "_c" + str(config_count) + "_" + str(
                outfiles)
            hdusum = pyfits.PrimaryHDU(header=hdu0[0].header)
            hdusum = pyfits.HDUList(hdusum)
            hdusum[0].header['OBJECT'] = obsname
            header = hdu0['SCI'].header.copy()
            hdusum.append(
                pyfits.ImageHDU(data=image_orc, header=header, name='SCI'))
            hdusum.append(
                pyfits.ImageHDU(data=var_orc, header=header, name='VAR'))
            hdusum.append(
                pyfits.ImageHDU(data=badbinall_orc.astype('uint8'),
                                header=header,
                                name='BPM'))
            hdusum.append(
                pyfits.ImageHDU(data=wav_orc, header=header, name='WAV'))

            if debug: hdusum.writeto(obsname + ".fits", clobber=True)

            # run specpolsignalmap on image
            psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \
                specpolsignalmap(hdusum,logfile=logfile,debug=debug)

            maprow_ocd = maprow_od[:, None, :] + np.zeros((2, cols, 4))
            maprow_ocd[okwav_oc] += drow_oc[okwav_oc, None]

            isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \
                (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3])
            istarget_orc = okwav_oc[:,None,:] & (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \
                (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2])

            isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc)
            badbinall_orc |= badbinnew_orc
            badbinone_orc |= badbinnew_orc
            hdusum['BPM'].data = badbinnew_orc.astype('uint8')
            psf_orc *= istarget_orc.astype(int)

            if debug:
                #                hdusum.writeto(obsname+".fits",clobber=True)
                pyfits.PrimaryHDU(psf_orc.astype('float32')).writeto(
                    obsname + '_psf_orc.fits', clobber=True)
#               pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True)
#               pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True)
#               pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True)

# set up wavelength binning
            wbin = wav_orc[0, rows / 2, cols / 2] - wav_orc[0, rows / 2,
                                                            cols / 2 - 1]
            wbin = 2.**(np.rint(np.log2(wbin))
                        )  # bin to nearest power of 2 angstroms
            wmin = (wav_orc.max(axis=1)[okwav_oc].reshape(
                (2, -1))).min(axis=1).max()
            wmax = wav_orc.max()
            for o in (0, 1):
                colmax = np.where((wav_orc[o] > 0.).any(axis=0))[0][-1]
                row_r = np.where(wav_orc[o, :, colmax] > 0.)[0]
                wmax = min(wmax, wav_orc[o, row_r, colmax].min())
            wedgemin = wbin * int(wmin / wbin + 0.5) + wbin / 2.
            wedgemax = wbin * int(wmax / wbin - 0.5) + wbin / 2.
            wedge_w = np.arange(wedgemin, wedgemax + wbin, wbin)
            wavs = wedge_w.shape[0] - 1
            binedge_orw = np.zeros((2, rows, wavs + 1))
            specrow_or = (maprow_od[:, 1:3].mean(axis=1)[:, None] +
                          np.arange(-rows / 4, rows / 4)).astype(int)

            # scrunch and normalize psf from summed images (using badbinone) for optimized extraction
            # psf is normalized so its integral over row is 1.
            psfnormmin = 0.70  # wavelengths with less than this flux in good bins are marked bad
            psf_orw = np.zeros((2, rows, wavs))

            for o in (0, 1):
                for r in specrow_or[o]:
                    binedge_orw[o,r] = \
                        interp1d(wav_orc[o,r,okwav_oc[o]],np.arange(cols)[okwav_oc[o]], \
                                   kind='linear',bounds_error=False)(wedge_w)
                    psf_orw[o, r] = scrunch1d(psf_orc[o, r], binedge_orw[o, r])

            if debug:
                pyfits.PrimaryHDU(binedge_orw.astype('float32')).writeto(
                    obsname + '_binedge_orw.fits', clobber=True)
                pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto(
                    obsname + '_psf_orw.fits', clobber=True)

            psfnorm_orw = np.repeat(psf_orw.sum(axis=1), rows,
                                    axis=1).reshape(2, rows, -1)
            psf_orw[psfnorm_orw > 0.] /= psfnorm_orw[psfnorm_orw > 0.]
            pmax = np.minimum(
                1.,
                np.median(psf_orw[psfnorm_orw > 0.].reshape(
                    (2, rows, -1)).max(axis=1)))

            log.message('Stellar profile width: %8.2f arcsec' %
                        ((1. / pmax) * rbin / 8.),
                        with_header=False)
            pwidth = int(1. / pmax)

            if debug:
                pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto(
                    obsname + '_psfnormed_orw.fits', clobber=True)

            # set up optional image-dependent column shift for slitless data
            colshiftfilename = "colshift.txt"
            docolshift = os.path.isfile(colshiftfilename)
            if docolshift:
                img_I, dcol_I = np.loadtxt(colshiftfilename,
                                           dtype=float,
                                           unpack=True,
                                           usecols=(0, 1))
                shifts = img_I.shape[0]
                log.message('Column shift: \n Images ' +
                            shifts * '%5i ' % tuple(img_I),
                            with_header=False)
                log.message(' Bins    ' + shifts * '%5.2f ' % tuple(dcol_I),
                            with_header=False)

            log.message('\nArcsec offset     Output File', with_header=False)

            # background-subtract and extract spectra
            for i in range(outfiles):
                hdulist = pyfits.open(outfilelist[i])
                tnum = image_number(outfilelist[i])
                badbin_orc = (hdulist['BPM'].data > 0)
                badbinbkg_orc = (badbin_orc | badbinnew_orc | isedge_orc
                                 | istarget_orc)
                if debug:
                    pyfits.PrimaryHDU(isedge_orc.astype('uint8')).writeto(
                        'isedge_orc_' + tnum + '.fits', clobber=True)
                    pyfits.PrimaryHDU(istarget_orc.astype('uint8')).writeto(
                        'istarget_orc_' + tnum + '.fits', clobber=True)
                    pyfits.PrimaryHDU(badbinbkg_orc.astype('uint8')).writeto(
                        'badbinbkg_orc_' + tnum + '.fits', clobber=True)
                target_orc = bkgsub(hdulist,
                                    badbinbkg_orc,
                                    isbkgcont_orc,
                                    skyflat_orc,
                                    maprow_ocd,
                                    tnum,
                                    debug=debug)
                target_orc *= (~badbin_orc).astype(int)
                if debug:
                    pyfits.PrimaryHDU(target_orc.astype('float32')).writeto(
                        'target_' + tnum + '_orc.fits', clobber=True)
                var_orc = hdulist['var'].data
                badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc

                # extract spectrum optimally (Horne, PASP 1986)
                target_orw = np.zeros((2, rows, wavs))
                var_orw = np.zeros_like(target_orw)
                badbin_orw = np.ones((2, rows, wavs), dtype='bool')
                wt_orw = np.zeros_like(target_orw)
                dcol = 0.
                if docolshift:
                    if int(tnum) in img_I:
                        dcol = dcol_I[np.where(
                            img_I == int(tnum))]  # table has observed shift
                for o in (0, 1):
                    for r in specrow_or[o]:
                        target_orw[o, r] = scrunch1d(target_orc[o, r],
                                                     binedge_orw[o, r] + dcol)
                        var_orw[o, r] = scrunch1d(var_orc[o, r],
                                                  binedge_orw[o, r] + dcol)
                        badbin_orw[o, r] = scrunch1d(
                            badbin_orc[o, r].astype(float),
                            binedge_orw[o, r] + dcol) > 0.001
                badbin_orw |= (var_orw == 0)
                badbin_orw |= ((psf_orw *
                                (~badbin_orw)).sum(axis=1)[:, None, :] <
                               psfnormmin)
                if debug:
                    #                   pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True)
                    pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto(
                        'badbin_' + tnum + '_orw.fits', clobber=True)

                # use master psf shifted in row to allow for guide errors
                ok_w = ((psf_orw * badbin_orw).sum(axis=1) <
                        0.03 / float(pwidth / 2)).all(axis=0)
                crosscor_s = np.zeros(pwidth)
                for s in range(pwidth):
                    crosscor_s[s] = (psf_orw[:, s:s - pwidth] *
                                     target_orw[:, pwidth / 2:-pwidth / 2] *
                                     ok_w).sum()
                smax = np.argmax(crosscor_s)
                s_S = np.arange(smax - pwidth / 4,
                                smax - pwidth / 4 + pwidth / 2 + 1)
                polycof = la.lstsq(
                    np.vstack((s_S**2, s_S, np.ones_like(s_S))).T,
                    crosscor_s[s_S])[0]
                pshift = -(-0.5 * polycof[1] / polycof[0] - pwidth / 2)
                s = int(pshift + pwidth) - pwidth
                sfrac = pshift - s
                psfsh_orw = np.zeros_like(psf_orw)
                outrow = np.arange(
                    max(0, s + 1),
                    rows - (1 + int(abs(pshift))) + max(0, s + 1))
                psfsh_orw[:, outrow] = (
                    1. - sfrac
                ) * psf_orw[:, outrow - s] + sfrac * psf_orw[:, outrow - s - 1]
                #                pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True)

                wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw] / var_orw[
                    ~badbin_orw]
                var_ow = (psfsh_orw * wt_orw * (~badbin_orw)).sum(axis=1)
                badbin_ow = (var_ow == 0)
                var_ow[~badbin_ow] = 1. / var_ow[~badbin_ow]
                #                pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True)
                #                pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True)
                #                pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True)

                sci_ow = (target_orw * wt_orw).sum(axis=1) * var_ow

                badlim = 0.20
                psfbadfrac_ow = (psfsh_orw * badbin_orw.astype(int)).sum(
                    axis=1) / psfsh_orw.sum(axis=1)
                badbin_ow |= (psfbadfrac_ow > badlim)

                cdebug = 83
                if debug:                    np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \
              wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e")

                # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1)
                # for consistency with other modes
                hduout = pyfits.PrimaryHDU(header=hdulist[0].header)
                hduout = pyfits.HDUList(hduout)
                header = hdulist['SCI'].header.copy()
                header.update('VAREXT', 2)
                header.update('BPMEXT', 3)
                header.update('CRVAL1', wedge_w[0] + wbin / 2.)
                header.update('CRVAL2', 0)
                header.update('CDELT1', wbin)
                header.update('CTYPE1', 'Angstroms')

                hduout.append(
                    pyfits.ImageHDU(data=sci_ow.reshape((2, 1, wavs)),
                                    header=header,
                                    name='SCI'))
                header.update('SCIEXT',
                              1,
                              'Extension for Science Frame',
                              before='VAREXT')
                hduout.append(
                    pyfits.ImageHDU(data=var_ow.reshape((2, 1, wavs)),
                                    header=header,
                                    name='VAR'))
                hduout.append(
                    pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape(
                        (2, 1, wavs)),
                                    header=header,
                                    name='BPM'))

                hduout.writeto('e' + outfilelist[i],
                               clobber=True,
                               output_verify='warn')
                log.message('  %8.2f   e%s' %
                            (pshift * rbin / 8., outfilelist[i]),
                            with_header=False)

            #increate the config count
            config_count += 1

    return
Example #7
0
def specpolextract(infilelist, logfile='salt.log'):

    #set up the files
    obsdate = os.path.basename(infilelist[0])[8:16]

    with logging(logfile, debug) as log:
        #create the observation log
        obs_dict = obslog(infilelist)
        # get rid of arcs
        for i in range(len(infilelist))[::-1]:
            if (obs_dict['OBJECT'][i].upper().strip() == 'ARC'):
                del infilelist[i]
        infiles = len(infilelist)

        # contiguous images of the same object and config are grouped together
        obs_dict = obslog(infilelist)
        confno_i, confdatlist = configmap(infilelist)
        configs = len(confdatlist)
        objectlist = list(set(obs_dict['OBJECT']))
        objno_i = np.array(
            [objectlist.index(obs_dict['OBJECT'][i]) for i in range(infiles)],
            dtype=int)
        grp_i = np.zeros((infiles), dtype=int)
        grp_i[1:] = ((confno_i[1:] != confno_i[:-1]) |
                     (objno_i[1:] != objno_i[:-1])).cumsum()

        for g in np.unique(grp_i):
            ilist = np.where(grp_i == g)[0]
            outfiles = len(ilist)
            outfilelist = [infilelist[i] for i in ilist]
            imagenolist = [
                int(os.path.basename(infilelist[i]).split('.')[0][-4:])
                for i in ilist
            ]
            log.message('\nExtract: '+objectlist[objno_i[ilist[0]]]+'  Grating %s  Grang %6.2f  Artic %6.2f' % \
               confdatlist[confno_i[ilist[0]]], with_header=False)
            log.message('  Images: ' + outfiles * '%i ' % tuple(imagenolist),
                        with_header=False)
            hdu0 = pyfits.open(outfilelist[0])
            rows, cols = hdu0['SCI'].data.shape[1:3]
            cbin, rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int)

            # special version for lamp data
            lampid = obs_dict["LAMPID"][0].strip().upper()
            if lampid != "NONE":
                specpollampextract(outfilelist, logfile=logfile)
                continue

        # sum spectra to find target, background artifacts, and estimate sky flat and psf functions
            count = 0
            for i in range(outfiles):
                badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0
                if count == 0:
                    count_orc = (~badbin_orc).astype(int)
                    image_orc = pyfits.open(
                        outfilelist[i])['SCI'].data * count_orc
                    var_orc = pyfits.open(
                        outfilelist[i])['VAR'].data * count_orc
                else:
                    count_orc += (~badbin_orc).astype(int)
                    image_orc += pyfits.open(
                        infilelist[i])['SCI'].data * (~badbin_orc).astype(int)
                    var_orc += pyfits.open(
                        outfilelist[i])['VAR'].data * (~badbin_orc).astype(int)
                count += 1
            if count == 0:
                print 'No valid images'
                continue
            image_orc[count_orc > 0] /= count_orc[count_orc > 0]
            badbinall_orc = (count_orc == 0) | (image_orc == 0
                                                )  # bin is bad in all images
            badbinone_orc = (count_orc < count) | (
                image_orc == 0)  # bin is bad in at least one image
            var_orc[count_orc > 0] /= (count_orc[count_orc > 0])**2

            wav_orc = pyfits.open(outfilelist[0])['WAV'].data
            slitid = obs_dict["MASKID"][0]
            if slitid[0] == "P": slitwidth = float(slitid[2:5]) / 10.
            else: slitwidth = float(slitid)

            hdusum = pyfits.PrimaryHDU(header=hdu0[0].header)
            hdusum = pyfits.HDUList(hdusum)
            header = hdu0['SCI'].header.copy()
            hdusum.append(
                pyfits.ImageHDU(data=image_orc, header=header, name='SCI'))
            hdusum.append(
                pyfits.ImageHDU(data=var_orc, header=header, name='VAR'))
            hdusum.append(
                pyfits.ImageHDU(data=badbinall_orc.astype('uint8'),
                                header=header,
                                name='BPM'))
            hdusum.append(
                pyfits.ImageHDU(data=wav_orc, header=header, name='WAV'))
            #            hdusum.writeto("groupsum_"+str(g)+".fits",clobber=True)

            psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \
                specpolsignalmap(hdusum,logfile=logfile)

            maprow_ocd = maprow_od[:, None, :] + np.zeros((2, cols, 4))
            maprow_ocd[:, :,
                       [1, 2
                        ]] -= drow_oc[:, :,
                                      None]  # edge is straight, target curved

            isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \
                (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3])
            istarget_orc = (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \
                (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2])
            isskycont_orc = (((np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]+rows/16) |  \
                (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]-rows/16)) & ~isedge_orc)
            isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc)
            badbinall_orc |= badbinnew_orc
            badbinone_orc |= badbinnew_orc

            #            pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc1.fits',clobber=True)
            #            pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True)
            #            pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True)
            #            pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True)

            # scrunch and normalize psf from summed images (using badbinone) for optimized extraction
            psfnormmin = 0.70  # wavelengths with less than this flux in good bins are marked bad
            wbin = wav_orc[0, rows / 2, cols / 2] - wav_orc[0, rows / 2,
                                                            cols / 2 - 1]
            wbin = float(int(wbin / 0.75))
            wmin, wmax = wav_orc.min(axis=2).max(), wav_orc.max(axis=2).min()
            wedgemin = wbin * int(wmin / wbin + 0.5) + wbin / 2.
            wedgemax = wbin * int(wmax / wbin - 0.5) + wbin / 2.
            wedge_w = np.arange(wedgemin, wedgemax + wbin, wbin)
            wavs = wedge_w.shape[0] - 1
            binedge_orw = np.zeros((2, rows, wavs + 1))
            psf_orw = np.zeros((2, rows, wavs))
            specrow_or = maprow_od[:, 1:3].mean(axis=1)[:, None] + np.arange(
                -rows / 4, rows / 4)
            #            pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc2.fits',clobber=True)
            for o in (0, 1):
                for r in specrow_or[o]:
                    binedge_orw[o, r] = interp1d(wav_orc[o, r],
                                                 np.arange(cols))(wedge_w)
                    psf_orw[o, r] = scrunch1d(psf_orc[o, r], binedge_orw[o, r])
            psf_orw /= psf_orw.sum(axis=1)[:, None, :]

            #            np.savetxt("psfnorm_ow.txt",(psf_orw*okbin_orw).sum(axis=1).T,fmt="%10.4f")
            #            pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto('psf_orw.fits',clobber=True)
            #            pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_orw.fits',clobber=True)

            # set up optional image-dependent column shift for slitless data
            colshiftfilename = "colshift.txt"
            docolshift = os.path.isfile(colshiftfilename)
            if docolshift:
                img_I, dcol_I = np.loadtxt(colshiftfilename,
                                           dtype=float,
                                           unpack=True,
                                           usecols=(0, 1))
                shifts = img_I.shape[0]
                log.message('Column shift: \n Images ' +
                            shifts * '%5i ' % tuple(img_I),
                            with_header=False)
                log.message(' Bins    ' + shifts * '%5.2f ' % tuple(dcol_I),
                            with_header=False)

        # background-subtract and extract spectra
            psfbadfrac_iow = np.zeros((outfiles, 2, wavs))

            for i in range(outfiles):
                hdulist = pyfits.open(outfilelist[i])
                sci_orc = hdulist['sci'].data
                var_orc = hdulist['var'].data
                badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc
                tnum = os.path.basename(outfilelist[i]).split('.')[0][-3:]

                # make background continuum image, smoothed over resolution element
                rblk, cblk = int(1.5 * 8. / rbin), int(slitwidth * 8. / cbin)
                target_orc = np.zeros_like(sci_orc)

                for o in (0, 1):
                    bkgcont_rc = blksmooth2d(sci_orc[o],
                                             isbkgcont_orc[o],
                                             rblk,
                                             cblk,
                                             0.25,
                                             mode="mean")

                    # remove sky continuum: ends of bkg continuum * skyflat
                    skycont_c = (bkgcont_rc.T[isskycont_orc[o].T]/skyflat_orc[o].T[isskycont_orc[o].T])  \
                            .reshape((cols,-1)).mean(axis=1)
                    skycont_rc = skycont_c * skyflat_orc[o]

                    # remove sky lines: image - bkg cont run through 2d sky averaging
                    obj_data = ((sci_orc[o] - bkgcont_rc) / skyflat_orc)[o]
                    obj_data[(badbin_orc | isedge_orc
                              | istarget_orc)[o]] = np.nan
                    #                    pyfits.PrimaryHDU(obj_data.astype('float32')).writeto('obj_data.fits',clobber=True)
                    skylines_rc = make_2d_skyspectrum(obj_data, wav_orc[o],
                                                      np.array([
                                                          [0, rows],
                                                      ])) * skyflat_orc[o]
                    target_orc[o] = sci_orc[o] - skycont_rc - skylines_rc
#                    pyfits.PrimaryHDU(skylines_rc.astype('float32')).writeto('skylines_rc_'+tnum+'_'+str(o)+'.fits',clobber=True)
#                    pyfits.PrimaryHDU(skycont_rc.astype('float32')).writeto('skycont_rc_'+tnum+'_'+str(o)+'.fits',clobber=True)
                target_orc *= (~badbin_orc).astype(int)
                #                pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_'+tnum+'_orc.fits',clobber=True)

                # extract spectrum optimally (Horne, PASP 1986)
                target_orw = np.zeros((2, rows, wavs))
                var_orw = np.zeros_like(target_orw)
                badbin_orw = np.ones((2, rows, wavs), dtype='bool')
                wt_orw = np.zeros_like(target_orw)
                dcol = 0.
                if docolshift:
                    if int(tnum) in img_I:
                        dcol = dcol_I[np.where(
                            img_I == int(tnum))]  # table has observed shift
                for o in (0, 1):
                    for r in specrow_or[o]:
                        target_orw[o, r] = scrunch1d(target_orc[o, r],
                                                     binedge_orw[o, r] + dcol)
                        var_orw[o, r] = scrunch1d(var_orc[o, r],
                                                  binedge_orw[o, r] + dcol)
                        badbin_orw[o, r] = scrunch1d(
                            badbin_orc[o, r].astype(float),
                            binedge_orw[o, r] + dcol) > 0.001
                badbin_orw |= (var_orw == 0)
                badbin_orw |= ((psf_orw *
                                (~badbin_orw)).sum(axis=1)[:, None, :] <
                               psfnormmin)

                #                pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True)
                #                pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto('badbin_'+tnum+'_orw.fits',clobber=True)

                # use master psf shifted in row to allow for guide errors
                pwidth = 2 * int(1. / psf_orw.max())
                ok_w = ((psf_orw * badbin_orw).sum(axis=1) <
                        0.03 / float(pwidth / 2)).all(axis=0)
                crosscor_s = np.zeros(pwidth)
                for s in range(pwidth):
                    crosscor_s[s] = (psf_orw[:, s:s - pwidth] *
                                     target_orw[:, pwidth / 2:-pwidth / 2] *
                                     ok_w).sum()
                smax = np.argmax(crosscor_s)
                s_S = np.arange(smax - pwidth / 4,
                                smax - pwidth / 4 + pwidth / 2 + 1)
                polycof = la.lstsq(
                    np.vstack((s_S**2, s_S, np.ones_like(s_S))).T,
                    crosscor_s[s_S])[0]
                pshift = -(-0.5 * polycof[1] / polycof[0] - pwidth / 2)
                s = int(pshift + pwidth) - pwidth
                sfrac = pshift - s
                psfsh_orw = np.zeros_like(psf_orw)
                outrow = np.arange(
                    max(0, s + 1),
                    rows - (1 + int(abs(pshift))) + max(0, s + 1))
                psfsh_orw[:, outrow] = (
                    1. - sfrac
                ) * psf_orw[:, outrow - s] + sfrac * psf_orw[:, outrow - s - 1]
                #                pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True)

                wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw] / var_orw[
                    ~badbin_orw]
                var_ow = (psfsh_orw * wt_orw * (~badbin_orw)).sum(axis=1)
                badbin_ow = (var_ow == 0)
                var_ow[~badbin_ow] = 1. / var_ow[~badbin_ow]

                #                pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True)
                #                pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True)
                #                pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True)

                sci_ow = (target_orw * wt_orw).sum(axis=1) * var_ow

                badlim = 0.20
                psfbadfrac_iow[i] = (psfsh_orw * badbin_orw.astype(int)).sum(
                    axis=1) / psfsh_orw.sum(axis=1)
                badbin_ow |= (psfbadfrac_iow[i] > badlim)

                #                cdebug = 39
                #                np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \
                #                 wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e")

                # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1)
                # for consistency with other modes
                hduout = pyfits.PrimaryHDU(header=hdulist[0].header)
                hduout = pyfits.HDUList(hduout)
                header = hdulist['SCI'].header.copy()
                header.update('VAREXT', 2)
                header.update('BPMEXT', 3)
                header.update('CRVAL1', wedge_w[0] + wbin / 2.)
                header.update('CRVAL2', 0)
                header.update('CDELT1', wbin)
                header.update('CTYPE1', 'Angstroms')

                hduout.append(
                    pyfits.ImageHDU(data=sci_ow.reshape((2, 1, wavs)),
                                    header=header,
                                    name='SCI'))
                header.update('SCIEXT',
                              1,
                              'Extension for Science Frame',
                              before='VAREXT')
                hduout.append(
                    pyfits.ImageHDU(data=var_ow.reshape((2, 1, wavs)),
                                    header=header,
                                    name='VAR'))
                hduout.append(
                    pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape(
                        (2, 1, wavs)),
                                    header=header,
                                    name='BPM'))

                hduout.writeto('e' + outfilelist[i],
                               clobber=True,
                               output_verify='warn')
                log.message('Output file ' + 'e' + outfilelist[i],
                            with_header=False)

#            np.savetxt("psfbadfrac_iow.txt",psfbadfrac_iow.reshape((-1,wavs)).T,fmt="%8.5f")
    return
Example #8
0
def specpolextract(infilelist, logfile='salt.log'):

#set up the files
    obsdate=os.path.basename(infilelist[0])[8:16]

    with logging(logfile, debug) as log:
        #create the observation log
        obs_dict=obslog(infilelist)
    # get rid of arcs
        for i in range(len(infilelist))[::-1]:
            if (obs_dict['OBJECT'][i].upper().strip()=='ARC'): del infilelist[i]            
        infiles = len(infilelist)

    # contiguous images of the same object and config are grouped together
        obs_dict=obslog(infilelist)
        confno_i,confdatlist = configmap(infilelist)
        configs = len(confdatlist)
        objectlist = list(set(obs_dict['OBJECT']))
        objno_i = np.array([objectlist.index(obs_dict['OBJECT'][i]) for i in range(infiles)],dtype=int)
        grp_i = np.zeros((infiles),dtype=int)
        grp_i[1:] = ((confno_i[1:] != confno_i[:-1]) | (objno_i[1:] != objno_i[:-1])).cumsum()

        for g in np.unique(grp_i):
            ilist = np.where(grp_i==g)[0]
            outfiles = len(ilist)
            outfilelist = [infilelist[i] for i in ilist]
            imagenolist = [int(os.path.basename(infilelist[i]).split('.')[0][-4:]) for i in ilist]
            log.message('\nExtract: '+objectlist[objno_i[ilist[0]]]+'  Grating %s  Grang %6.2f  Artic %6.2f' % \
               confdatlist[confno_i[ilist[0]]], with_header=False)
            log.message('  Images: '+outfiles*'%i ' % tuple(imagenolist), with_header=False)
            hdu0 =  pyfits.open(outfilelist[0])       
            rows,cols = hdu0['SCI'].data.shape[1:3]
            cbin,rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int)

        # special version for lamp data
            lampid = obs_dict["LAMPID"][0].strip().upper()
            if lampid!="NONE":
                specpollampextract(outfilelist, logfile=logfile)           
                continue

        # sum spectra to find target, background artifacts, and estimate sky flat and psf functions
            count = 0
            for i in range(outfiles):
                badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0
                if count == 0: 
                    count_orc = (~badbin_orc).astype(int)
                    image_orc = pyfits.open(outfilelist[i])['SCI'].data*count_orc
                    var_orc = pyfits.open(outfilelist[i])['VAR'].data*count_orc
                else:
                    count_orc += (~badbin_orc).astype(int)
                    image_orc += pyfits.open(infilelist[i])['SCI'].data*(~badbin_orc).astype(int)
                    var_orc += pyfits.open(outfilelist[i])['VAR'].data*(~badbin_orc).astype(int)
                count += 1
            if count ==0:
                print 'No valid images'
                continue
            image_orc[count_orc>0] /= count_orc[count_orc>0]
            badbinall_orc = (count_orc==0) | (image_orc==0)             # bin is bad in all images
            badbinone_orc = (count_orc < count) | (image_orc==0)        # bin is bad in at least one image
            var_orc[count_orc>0] /= (count_orc[count_orc>0])**2

            wav_orc = pyfits.open(outfilelist[0])['WAV'].data
            slitid = obs_dict["MASKID"][0]
            if slitid[0] =="P": slitwidth = float(slitid[2:5])/10.
            else: slitwidth = float(slitid) 

            hdusum = pyfits.PrimaryHDU(header=hdu0[0].header)    
            hdusum = pyfits.HDUList(hdusum)
            header=hdu0['SCI'].header.copy()           
            hdusum.append(pyfits.ImageHDU(data=image_orc, header=header, name='SCI'))
            hdusum.append(pyfits.ImageHDU(data=var_orc, header=header, name='VAR'))
            hdusum.append(pyfits.ImageHDU(data=badbinall_orc.astype('uint8'), header=header, name='BPM'))
            hdusum.append(pyfits.ImageHDU(data=wav_orc, header=header, name='WAV'))
#            hdusum.writeto("groupsum_"+str(g)+".fits",clobber=True)

            psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \
                specpolsignalmap(hdusum,logfile=logfile)

            maprow_ocd = maprow_od[:,None,:] + np.zeros((2,cols,4)) 
            maprow_ocd[:,:,[1,2]] -= drow_oc[:,:,None]      # edge is straight, target curved

            isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \
                (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3])
            istarget_orc = (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \
                (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2])
            isskycont_orc = (((np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]+rows/16) |  \
                (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]-rows/16)) & ~isedge_orc)                                     
            isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc)
            badbinall_orc |= badbinnew_orc
            badbinone_orc |= badbinnew_orc

#            pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc1.fits',clobber=True) 
#            pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True)   
#            pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True)  
#            pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True)  

        # scrunch and normalize psf from summed images (using badbinone) for optimized extraction
            psfnormmin = 0.70    # wavelengths with less than this flux in good bins are marked bad
            wbin = wav_orc[0,rows/2,cols/2]-wav_orc[0,rows/2,cols/2-1] 
            wbin = float(int(wbin/0.75))
            wmin,wmax = wav_orc.min(axis=2).max(),wav_orc.max(axis=2).min()
            wedgemin = wbin*int(wmin/wbin+0.5) + wbin/2.
            wedgemax = wbin*int(wmax/wbin-0.5) + wbin/2.
            wedge_w = np.arange(wedgemin,wedgemax+wbin,wbin)
            wavs = wedge_w.shape[0] - 1
            binedge_orw = np.zeros((2,rows,wavs+1))
            psf_orw = np.zeros((2,rows,wavs))
            specrow_or = maprow_od[:,1:3].mean(axis=1)[:,None] + np.arange(-rows/4,rows/4)
#            pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc2.fits',clobber=True)  
            for o in (0,1):
                for r in specrow_or[o]:
                    binedge_orw[o,r] = interp1d(wav_orc[o,r],np.arange(cols))(wedge_w)
                    psf_orw[o,r] = scrunch1d(psf_orc[o,r],binedge_orw[o,r])
            psf_orw /= psf_orw.sum(axis=1)[:,None,:]

#            np.savetxt("psfnorm_ow.txt",(psf_orw*okbin_orw).sum(axis=1).T,fmt="%10.4f") 
#            pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto('psf_orw.fits',clobber=True)
#            pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_orw.fits',clobber=True) 

        # set up optional image-dependent column shift for slitless data
            colshiftfilename = "colshift.txt"
            docolshift = os.path.isfile(colshiftfilename)
            if docolshift:
                img_I,dcol_I = np.loadtxt(colshiftfilename,dtype=float,unpack=True,usecols=(0,1))
                shifts = img_I.shape[0]
                log.message('Column shift: \n Images '+shifts*'%5i ' % tuple(img_I), with_header=False)                 
                log.message(' Bins    '+shifts*'%5.2f ' % tuple(dcol_I), with_header=False)                 
               
        # background-subtract and extract spectra                
            psfbadfrac_iow = np.zeros((outfiles,2,wavs))

            for i in range(outfiles):
                hdulist = pyfits.open(outfilelist[i])
                sci_orc = hdulist['sci'].data
                var_orc = hdulist['var'].data
                badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc
                tnum = os.path.basename(outfilelist[i]).split('.')[0][-3:]

            # make background continuum image, smoothed over resolution element
                rblk,cblk = int(1.5*8./rbin), int(slitwidth*8./cbin)
                target_orc = np.zeros_like(sci_orc)

                for o in (0,1):
                    bkgcont_rc = blksmooth2d(sci_orc[o],isbkgcont_orc[o],rblk,cblk,0.25,mode="mean")
           
            # remove sky continuum: ends of bkg continuum * skyflat
                    skycont_c = (bkgcont_rc.T[isskycont_orc[o].T]/skyflat_orc[o].T[isskycont_orc[o].T])  \
                            .reshape((cols,-1)).mean(axis=1)
                    skycont_rc = skycont_c*skyflat_orc[o]
        
            # remove sky lines: image - bkg cont run through 2d sky averaging
                    obj_data = ((sci_orc[o] - bkgcont_rc)/skyflat_orc)[o]
                    obj_data[(badbin_orc | isedge_orc | istarget_orc)[o]] = np.nan
#                    pyfits.PrimaryHDU(obj_data.astype('float32')).writeto('obj_data.fits',clobber=True)
                    skylines_rc = make_2d_skyspectrum(obj_data,wav_orc[o],np.array([[0,rows],]))*skyflat_orc[o]
                    target_orc[o] = sci_orc[o] - skycont_rc - skylines_rc
#                    pyfits.PrimaryHDU(skylines_rc.astype('float32')).writeto('skylines_rc_'+tnum+'_'+str(o)+'.fits',clobber=True)
#                    pyfits.PrimaryHDU(skycont_rc.astype('float32')).writeto('skycont_rc_'+tnum+'_'+str(o)+'.fits',clobber=True)
                target_orc *= (~badbin_orc).astype(int)             
#                pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_'+tnum+'_orc.fits',clobber=True)

            # extract spectrum optimally (Horne, PASP 1986)
                target_orw = np.zeros((2,rows,wavs));   var_orw = np.zeros_like(target_orw)
                badbin_orw = np.ones((2,rows,wavs),dtype='bool');   wt_orw = np.zeros_like(target_orw)
                dcol = 0.
                if docolshift:
                    if int(tnum) in img_I:
                        dcol = dcol_I[np.where(img_I==int(tnum))]    # table has observed shift
                for o in (0,1):
                    for r in specrow_or[o]:
                        target_orw[o,r] = scrunch1d(target_orc[o,r],binedge_orw[o,r]+dcol)
                        var_orw[o,r] = scrunch1d(var_orc[o,r],binedge_orw[o,r]+dcol)
                        badbin_orw[o,r] = scrunch1d(badbin_orc[o,r].astype(float),binedge_orw[o,r]+dcol) > 0.001 
                badbin_orw |= (var_orw == 0)
                badbin_orw |= ((psf_orw*(~badbin_orw)).sum(axis=1)[:,None,:] < psfnormmin)


#                pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True)
#                pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto('badbin_'+tnum+'_orw.fits',clobber=True)
  
            # use master psf shifted in row to allow for guide errors
                pwidth = 2*int(1./psf_orw.max())
                ok_w = ((psf_orw*badbin_orw).sum(axis=1) < 0.03/float(pwidth/2)).all(axis=0)
                crosscor_s = np.zeros(pwidth)
                for s in range(pwidth):
                    crosscor_s[s] = (psf_orw[:,s:s-pwidth]*target_orw[:,pwidth/2:-pwidth/2]*ok_w).sum()
                smax = np.argmax(crosscor_s)
                s_S = np.arange(smax-pwidth/4,smax-pwidth/4+pwidth/2+1)
                polycof = la.lstsq(np.vstack((s_S**2,s_S,np.ones_like(s_S))).T,crosscor_s[s_S])[0]
                pshift = -(-0.5*polycof[1]/polycof[0] - pwidth/2)
                s = int(pshift+pwidth)-pwidth
                sfrac = pshift-s
                psfsh_orw = np.zeros_like(psf_orw)
                outrow = np.arange(max(0,s+1),rows-(1+int(abs(pshift)))+max(0,s+1))
                psfsh_orw[:,outrow] = (1.-sfrac)*psf_orw[:,outrow-s] + sfrac*psf_orw[:,outrow-s-1]
#                pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True)

                wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw]/var_orw[~badbin_orw]
                var_ow = (psfsh_orw*wt_orw*(~badbin_orw)).sum(axis=1)
                badbin_ow = (var_ow == 0)
                var_ow[~badbin_ow] = 1./var_ow[~badbin_ow]

#                pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True)
#                pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True)
#                pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True)

                sci_ow = (target_orw*wt_orw).sum(axis=1)*var_ow

                badlim = 0.20
                psfbadfrac_iow[i] = (psfsh_orw*badbin_orw.astype(int)).sum(axis=1)/psfsh_orw.sum(axis=1)
                badbin_ow |= (psfbadfrac_iow[i] > badlim)

#                cdebug = 39
#                np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \
#                 wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e")

            # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1)
            # for consistency with other modes
                hduout = pyfits.PrimaryHDU(header=hdulist[0].header)    
                hduout = pyfits.HDUList(hduout)
                header=hdulist['SCI'].header.copy()
                header.update('VAREXT',2)
                header.update('BPMEXT',3)
                header.update('CRVAL1',wedge_w[0]+wbin/2.)
                header.update('CRVAL2',0)
                header.update('CDELT1',wbin)
                header.update('CTYPE1','Angstroms')
            
                hduout.append(pyfits.ImageHDU(data=sci_ow.reshape((2,1,wavs)), header=header, name='SCI'))
                header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT')
                hduout.append(pyfits.ImageHDU(data=var_ow.reshape((2,1,wavs)), header=header, name='VAR'))
                hduout.append(pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape((2,1,wavs)), header=header, name='BPM'))            
            
                hduout.writeto('e'+outfilelist[i],clobber=True,output_verify='warn')
                log.message('Output file '+'e'+outfilelist[i] , with_header=False)

#            np.savetxt("psfbadfrac_iow.txt",psfbadfrac_iow.reshape((-1,wavs)).T,fmt="%8.5f")
    return