Beispiel #1
0
    def _medFilterMaskedWgtIm(self, imname, medfiltsize=5):
        """ Method to median filter an image, intended to be the
        detection weight image.  Default filter size is 5 pix.
        It first makes a mask from the input image so that no input
        zero pixels (which are specially flagged as having no weight)
        become nonzero after filtering.
        """
        tmpMed  = '_tmpMED.fits'
        tmpMask = '_tmpMask.fits'
        iraf.flpr()
        iraf.flpr(iraf.median)
        iraf.flpr(iraf.imcalc)

        iraf.unlearn(iraf.median)
        iraf.median.input   = imname
        iraf.median.output  = tmpMed
        iraf.median.xwindow = medfiltsize
        iraf.median.ywindow = medfiltsize
        iraf.median.mode = 'h'
        iraf.median()

        iraf.unlearn(iraf.imcalc)
        iraf.imcalc(imname, tmpMask, "if im1 .eq. 0 then 0 else 1")
        _instring = tmpMed+','+tmpMask
        iraf.imcalc(_instring, tmpMed, "im1 * im2")

        os.rename(tmpMed,imname)
        os.remove(tmpMask)
        iraf.flpr(iraf.median)
        iraf.flpr(iraf.imcalc)
        iraf.flpr()

        return
Beispiel #2
0
def bootstrapZP(image,cat='zp.cat',refcat='ref.cat',refzp=1.0):

    (ra,dec,mag,star)=np.loadtxt(cat,usecols=(0,1,2,3),unpack=True,
                                     dtype=np.dtype([('ra','<f10'),('dec','<f10'),('mag','<f10'),
                                                     ('star','<f10')]))

    (rra,rdec,rmag,rstar)=np.loadtxt(refcat,usecols=(0,1,2,3),unpack=True,
                                            dtype=np.dtype([('rra','<f10'),('rdec','<f10'),('rmag','<f10'),
                                                            ('rstar','<f10')]))

    #Grab only stars from the reference catalog
    refgood=np.where((rstar >= 0.98) & (rmag != 99.0) & (rmag > 17.0) & (rmag < 22.5))
    refcat=SkyCoord(ra=rra[refgood]*u.degree,dec=rdec[refgood]*u.degree)

    #Sort through and remove anything that is not a star and is not isolated
    #from other sources in the input catalog
    catgood=np.where((star >= 0.98) & (mag != 99.0))
    cat=SkyCoord(ra=ra[catgood]*u.degree,dec=dec[catgood]*u.degree)
    idx,d2d,_=cat.match_to_catalog_sky(refcat)
    _,d2d2,_=cat.match_to_catalog_sky(cat,2)
    final=np.where((d2d.arcsec < 0.5) & (d2d2.arcsec >= 5.0))
    
    diff=rmag[refgood][idx][final]-mag[catgood][final]
    imgZP=np.mean(diff)

    print '\n\tUsing '+str(len(diff))+' stars to calculate ZP...'
    print '\tMean ZP: '+str(round(imgZP,3))+' mag\n'

    scaleFactor=10.0**(0.4*(refzp-imgZP))

    iraf.unlearn('imcalc')
    iraf.imcalc(image,image[:-5]+'_scaled.fits','im1*'+str(scaleFactor))
Beispiel #3
0
def mkScaleImage(image):

    hdr=pf.open(image)[0].header
    (mult,add)=(hdr['MSCSCALE'],hdr['MSCZERO'])
    iraf.unlearn('imcalc')
    if os.path.exists(image[:-5]+'_scale.fits'):
        os.remove(image[:-5]+'_scale.fits')
    iraf.imcalc(image,image[:-5]+'_scale.fits','(im1+'+str(add)+')*'+str(mult))
Beispiel #4
0
def mkWeightMap(image,search=''):

    if search != '':
        imname=glob.glob(search)[0]
    else:
        imname=image
        
    iraf.unlearn('imcalc')
    iraf.imcalc(imname,'weight.fits','if im1 .eq. 0 then 0.0 else 1.0')
Beispiel #5
0
	def doImcalc(self, input, output, operation):
		'''
		Simple interface for IRAF task imcalc. Can be used for simple
		arithmetic operations between images. This could be rewritten with
		PyFits and NumPy to give broader selection of operations.
		'''
		self.input = input
		self.output = output
		self.operation = operation

		I.imcalc(self.input, self.output, self.operation)
Beispiel #6
0
    def doImcalc(self, input, output, operation):
        '''
		Simple interface for IRAF task imcalc. Can be used for simple
		arithmetic operations between images. This could be rewritten with
		PyFits and NumPy to give broader selection of operations.
		'''
        self.input = input
        self.output = output
        self.operation = operation

        I.imcalc(self.input, self.output, self.operation)
Beispiel #7
0
def make_covar_map(idmap, catalog):
    """
   Given the IDMAP image, replace the ID by the covariance index of the object,
   in order to visualize the distribution of covariance indices in the image.
   """
    c = sextractor(catalog)
    cvdict = {}
    for i in range(len(c)):
        cvdict[c.objectid[i]] = c.maxcvratio[i]

    # Now make covariance id map
    covar_idmap = 'covar_' + idmap
    if os.path.exists(covar_idmap):
        os.remove(covar_idmap)
    iraf.imcalc(idmap, covar_idmap, 'im1*1.0', pixtype='double')
    #os.system('cp %s %s' % (idmap, covar_idmap))
    h = pyfits.open(covar_idmap, mode='update')
    data = h[0].data
    datacopy = np.copy(data)
    for k, v in cvdict.iteritems():
        datacopy[data == k] = v
    h[0].data = datacopy
    h.flush()
    h.close()
Beispiel #8
0
def scaleZP(inputList,reference,logfile='mosaic_log'):

    refZP=pf.open(reference)[0].header['ZEROPT']
    refZPerr=pf.open(reference)[0].header['ZEROERR']

    print '\nNormalizing all images in stack to common zeropoint...'

    newStack=[]
    for x in inputList:
        copy=x[:-5]+'_tmp.fits'
        hdu=pf.open(x,mode='update')
        imgZP=hdu[0].header['ZEROPT']
        hdu[0].header['ZEROPT']=refZP
        hdu[0].header['ZEROERR']=refZPerr
        hdu.flush()
        hdu.close()
        
        scaleFactor=10.0**(-0.4*(refZP-imgZP))
        iraf.unlearn('imcalc')
        iraf.imcalc(x,copy,'im1*'+str(scaleFactor),verbose='yes')

        newStack.append(copy)

    return (newStack,refZP,refZPerr)
Beispiel #9
0
    def _do_driz_cr(self, simplefits, mef, Ncombed):
        """ find cosmic rays using iraf.dither.driz_cr """
        iraf.unlearn(iraf.driz_cr)
        inputfits = '_dz_' + simplefits  # WZ
        #os.rename(simplefits,'temp.fits') #WZ
        #os.rename(inputfits,simplefits)
        iraf.driz_cr.inlist = simplefits
        iraf.driz_cr.group = 0

        #pdb.set_trace()
        if self.crlower:
            iraf.driz_cr.SNR = "4.4 2.0"
            iraf.driz_cr.scale = "0.6 0.4"
        else:
            # iraf.driz_cr.SNR    = "5.0 3.0"
            # iraf.driz_cr.SNR    = "4.8 3.0"    #<-- changed from 5.0 to 4.8
            # iraf.driz_cr.SNR    = "4.7 2.7"

            #iraf.driz_cr.SNR    = "4.7 2.6"
            #iraf.driz_cr.SNR    = "4.7 2.4"

            # iraf.driz_cr.scale  = "0.9 0.5"
            # 15/Apr/2002, jpb: changing yet again - does this help?
            # iraf.driz_cr.scale  = "1.0 0.6"
            # iraf.driz_cr.scale  = "1.2 0.8"
            # 18/Jun/2002 now that the blot rotation problem is fixed...
            # iraf.driz_cr.scale  = "1.0 0.6"
            # iraf.driz_cr.scale  = "1.2 0.6"
            #iraf.driz_cr.scale  = "1.3 0.6"

            # ARGH, still problems with centers of some stars
            # has to do with the smooth of the derivative in driz/blotting
            # going with this 14/Nov/2002:
            # iraf.driz_cr.SNR    = "4.5 2.4"   tweak again 18/March/2003
            iraf.driz_cr.SNR = "4.5 2.1"
            iraf.driz_cr.scale = "1.7 0.7"

        if self.verbose:
            print "Using driz_cr S/N rejection thresholds: ", iraf.driz_cr.SNR
            print "and derivative scales: ", iraf.driz_cr.scale
        self.logfile.write(' pyblot driz_cr rej thresh: ' + iraf.driz_cr.SNR +
                           '   deriv scales: ' + iraf.driz_cr.scale)

        iraf.driz_cr.backg = self.skyKey

        # get gain, readnoise from image header
        _tmp_hdr_gain_, _tmp_single_rn = _gain_rn(mef, self.logfile, ext=0)
        # increase readnoise by sqrt of # of constituent raw images
        iraf.driz_cr.rn = math.sqrt(1.0 * Ncombed) * _tmp_single_rn

        #  only use the header gain if hdrGain has been set
        if self.hdrGain:
            iraf.driz_cr.gain = _tmp_hdr_gain_
        else:
            iraf.driz_cr.gain = 1.0
        del _tmp_hdr_gain_, _tmp_single_rn
        iraf.driz_cr.mode = 'h'
        print '\n ***> driz_cr using gain,rn_tot = ', iraf.driz_cr.gain, iraf.driz_cr.rn
        print ' ***> (NCOMBINE = ' + str(Ncombed) + ')'

        plmask = string.split(simplefits, '.')[0] + '_cr.pl'
        fitsmask = string.split(simplefits, '.')[0] + '_cr.fits'
        try:
            os.remove(plmask)
        except:
            pass
        try:
            os.remove(fitsmask)
        except:
            pass

        if self.verbose:
            # print ' running driz_cr() on ',simplefits
            print ' running driz_cr() on ', inputfits  #WZ
            print '   output: ', plmask
        #pdb.set_trace()
        iraf.driz_cr()

        #os.rename(simplefits,inputfile) # WZ
        #os.rename('temp.fits',simplefits)

        # convert mask from pixel list to short integer fits
        iraf.unlearn(iraf.imcalc)
        iraf.imcalc.pixtype = 'short'

        # following line is not right -- drizzle uses 1=good, 0=bad
        #iraf.imcalc(plmask, fitsmask, "if im1 .gt. 0 then 0 else 1")
        if self.verbose:
            print ' converting', plmask, 'to', fitsmask
        iraf.imcalc(plmask, fitsmask, "if im1 .eq. 0 then 0 else 1")
        # the pixel-list version can be removed immediately
        if self.clean_up:
            os.remove(plmask)

        # finally, save the mask name in a dictionary
        nzero = numMaskZeros(fitsmask)
        self.crmasks[simplefits] = (fitsmask, nzero)
        if self.verbose:
            print 'Masked ' + str(nzero) + ' CR pixels in ' + simplefits
        return fitsmask
Beispiel #10
0
    def _medDriz(self, parList, usemask=1):
        """median-combined separate drizzle images.  there is an iraf dependant kludge
       in here involving the inputString which the a string of input files for imcombine.
       iraf cannot apparently handle a string over a certain size (guessing 512 char).
       So, we now write out a temp file, imcombine_input, which is just a list of the
       input files imcombine is use.  We use the iraf idiom input = "@file" to get this
       task to run.  So far it seems to work.
       """
        drizList = []
        maskList = []
        iraf.flpr('imcombine')
        iraf.unlearn(iraf.imcalc)
        iraf.imcalc.pixtype = 'short'
        self.logfile.write("Entered _medDriz for asn number %d." %
                           (self.runNum))
        for ii in range(len(parList)):
            for jj in range(ii + 1, len(parList)):
                if parList[ii]['outdata'] == parList[jj]['outdata']:
                    raise KeyError, "requested to median stack same images!"
            drizList.append(parList[ii]['outdata'])
            if usemask:
                plmask = string.split(parList[ii]['outcontext'],
                                      '.')[0] + '.pl'
                try:
                    os.remove(plmask)
                except:
                    pass
                if self.verbose:
                    print 'making', plmask, 'from', parList[ii][
                        'outcontext'], '...'
                iraf.imcalc(parList[ii]['outcontext'], plmask,
                            "if im1 .eq. 0 then 0 else 1")
                maskList.append(plmask)
                self.removeList.append(plmask)

        # construct input list and add masks info to the headers
        inputString = drizList[0]

        # ***> NOTE!  If list too big, imcombine crashes!
        # ***> NOTE!  define 76 as a safe maximum, or 80...
        MAX_IM = 80
        NumTot = len(drizList)
        NumIm = min(NumTot, MAX_IM)
        for ii in range(NumIm):
            if ii > 0:
                inputString = inputString + ',' + drizList[ii]
            if usemask:
                fUtil.fixHeader(drizList[ii], [('BPM', maskList[ii])])
        if self.verbose:
            print 'median stacking: ', inputString
            if usemask: print '  with masks: ', maskList

        #if that all checks out, go ahead and median
        iraf.unlearn(iraf.imcombine)

        # 15/Apr/2002, jpb: want to keep all the medriz's around for CR-rej debugging
        outfile = 'medriz_' + str(self.runNum) + '.fits'
        # self.removeList.append(outfile)
        try:
            os.remove(outfile)
        except:
            pass

        # temp file for iraf input because the list might be too big.
        filekludge = open("imcombine_input", "w")
        newinputList = inputString.split(',')

        if (NumIm != len(newinputList)):
            errtxt = "ERROR! Error:  NumIm != len(newinputList) in _medDriz ?!"
            print errtxt
            self.logfile.write(errtxt)

        for item in newinputList:
            filekludge.write(item + "\n")
        filekludge.close()

        #iraf.imcombine.input = inputString         # this is what we used to do.
        iraf.imcombine.input = "@imcombine_input"
        iraf.imcombine.output = outfile
        # iraf.imcombine.plfile = ''
        iraf.imcombine.sigma = ''
        iraf.imcombine.combine = 'median'
        iraf.imcombine.reject = 'minmax'
        iraf.imcombine.outtype = 'real'
        iraf.imcombine.offsets = 'none'
        if usemask:
            iraf.imcombine.masktype = 'badvalue'
        else:
            iraf.imcombine.masktype = 'none'
        iraf.imcombine.maskvalue = 0.
        iraf.imcombine.scale = 'exposure'
        iraf.imcombine.expname = 'EXPTIME'
        iraf.imcombine.nkeep = 1

        # paradoxically, this is not what we want
        # NumIm = len(drizList)/self.imNsci
        # imcombine considers the total number of images being
        # stacked, not the number at any given point!

        # NOTE: nhigh must be >= NumIm/2 if cr rejection to be done everywhere
        if NumIm == 1:  #1,2
            iraf.imcombine.nlow = 0
            iraf.imcombine.nhigh = 0
        elif NumIm == 2:  # 2
            iraf.imcombine.nlow = 0
            iraf.imcombine.nhigh = 1
        elif NumIm == 3:  # 3
            iraf.imcombine.nlow = 0
            iraf.imcombine.nhigh = 2
        elif NumIm == 4:  # 4 HRC;   2 WFC
            iraf.imcombine.nlow = 0
            iraf.imcombine.nhigh = 3
        elif NumIm == 5:  # 5 HRC
            iraf.imcombine.nlow = 1
            iraf.imcombine.nhigh = 3
        elif NumIm == 6:  # 6 HRC;   3 WFC
            iraf.imcombine.nlow = 1
            iraf.imcombine.nhigh = 4
        elif NumIm == 7:  # 7 HRC
            iraf.imcombine.nlow = 1
            iraf.imcombine.nhigh = 5
        elif NumIm < 10:  # 8,9 HRC;  4 WFC
            iraf.imcombine.nlow = 1
            iraf.imcombine.nhigh = 6
        elif NumIm < 12:  # 10,11 HRC;   5 WFC
            iraf.imcombine.nlow = 2
            iraf.imcombine.nhigh = 7
        # next added/changed
        else:
            iraf.imcombine.nlow = (NumIm + 2) / 4 - 1
            iraf.imcombine.nhigh = 3 * NumIm / 4

        iraf.imcombine.mode = 'h'
        self.logfile.write(self.modName+' calling imcombine. come in imcombine.  NumIm/nlo/nhi: '+\
                           str(NumIm)+' '+str(iraf.imcombine.nlow)+' '+str(iraf.imcombine.nhigh)+\
                           (' [NumTot=%d]'%(NumTot)))
        iraf.imcombine()
        if self.verbose:
            print 'NumIm = %d  nlow,high:  %d %d' % (
                NumIm, iraf.imcombine.nlow, iraf.imcombine.nhigh)
            print 'median image', outfile, 'created'
        self.logfile.write('median image ' + outfile +
                           ' created.  Removing imcombine_input temp file.')
        print "removing imcombine_input temp file."
        try:
            os.remove("imcombine_input")
        except:
            pass
        return outfile
Beispiel #11
0
                 process="no")

# creating master flat
unlearn(iraf.flatcombine)
print "> creating master flat image"
iraf.flatcombine("Flat*.fit",
                 output="master_flat_temp.fits",
                 combine="median",
                 reject="crreject",
                 ccdtype="flat",
                 process="no",
                 delete="no",
                 scale="median")
# removing 0s and negative values with standard ones (-9.0)
iraf.imcalc("master_flat_temp.fits",
            output="master_flat.fits",
            equals="if (im1.le.0.0) then -9.0 else im1",
            verbose="no")

# creating the necessary lists for dark and flat processing
rd = open('rawdata.irafl', 'w')
cd = open('cordata.irafl', 'w')

for im in data:
    #	print im
    name, ext = os.path.splitext(im)
    # 	print name, ext
    rd.write(name + ext + "\n")
    cd.write(name + '_cor' + ext + '\n')
#	print name+'_cor'+ext+'\n'

rd.close()
Beispiel #12
0
def stacking(cllist,zpofflist,ref,zprefoff=0.0,stackname='stack',shiftsize=400):

    """
    """

    #Reset the IRAF tasks used in this routine.
    iraf.unlearn('imcalc')
    iraf.unlearn('imcombine')
    iraf.unlearn('imreplace')
    iraf.unlearn('xyxymatch')
    iraf.unlearn('geomap')
    iraf.unlearn('geotran')
    iraf.unlearn('imcopy')

    #Find reference image in reference directory. Check to make
    #sure that it is actually the image and not the mask file!
    #Grab the mask for adding to the mask list now.
    (refimg,refmask,expmap)=classify(ref+'/tu*.fits')
    zpref=pf.open(refimg)[0].header['MAGZERO']
#    zprefoff=NewfirmZPoffset[ref.split('/')[-1]]
    zprefoff=float(zprefoff)

    #Get 2MASS PSC positions for reference cluster image.
    catalog=get2masspsc(refimg)
    foo=file_check(ref+'/2mass_ref_stars.cdt',delete=True)
    foo=open(ref+'/2mass_ref_stars.cdt','w')
    for y in catalog:
        data=y.split()
        foo.write(data[6]+'\t'+data[7]+'\n')
    foo.close()

    #Create lists for files to be input into the stacking routine.
    foo=file_check('matchlist',delete=True)
    foo=file_check('scalelist',delete=True)
    foo=file_check('shiftlist',delete=True)
    foo=file_check('masklist',delete=True)
    foo=file_check('shiftmask',delete=True)
    foo=file_check('expmaplist',delete=True)
    (matchlist,scalelist,shiftlist,masklist,
     shiftmask,finalmasks,stacklist,stackmask,
     finalmasks2,expmaplist,shiftexp,expmaplist2)=(open('matchlist','w'),open('scalelist','w'),
                                        open('shiftlist','w'),open('masklist','w'),
                                        open('shiftmask','w'),open('finalmasks','w'),
                                        open('stacklist','w'),open('stackmask','w'),
                                        open('finalmasks2','w'),open('expmaplist','w'),
                                        open('shiftexp','w'),open('expmaplist2','w'))
    (xsize,ysize)=(np.array([]),np.array([]))
    
    
    #Step through all of the input cluster directories.
    i=0
    for x in cllist:
        #Find the image, mask, and exposure map files. Get zeropoints and
        #scale image to the reference image.
        scaleimg=x+'/scaled_to_'+ref.split('/')[-1]+'.fits'
        foo=file_check(scaleimg,delete=True)
        (img,mask,expmap)=classify(x+'/tu*.fits')
        imgzp=pf.open(img)[0].header['MAGZERO']
        (xs,ys)=(pf.open(img)[0].header['NAXIS1'],pf.open(img)[0].header['NAXIS2'])
        (xsize,ysize)=(np.append(xsize,xs),np.append(ysize,ys))

        imgzpoff=float(zpofflist[i])
#        imgzpoff=NewfirmZPoffset[x.split('/')[-1]]
        scale=scalecounts(imgzp+imgzpoff,zpref+zprefoff)
        iraf.imcalc(img,scaleimg,'im1*'+str(scale))

        #Get X,Y pixel positions of 2MASS sources from the 2MASS PSC
        #in the image. Use these to compute shifts relative to the
        #reference image using IRAF task geomap.
        foo=file_check(x+'/2mass_ref_stars.cdt',delete=True)
        foo=open(x+'/2mass_ref_stars.cdt','w')
        catalog=get2masspsc(scaleimg)
        for y in catalog:
            data=y.split()
            foo.write(data[6]+'\t'+data[7]+'\n')
        foo.close()
    
        #Match the 2MASS PSC positions with stars in the reference
        #image using xyxymatch. The matched source list is then fed
        #into geomap to get the X and Y shifts.
        foo=file_check(x+'/2mass_matched.cdt',delete=True)
        iraf.xyxymatch(x+'/2mass_ref_stars.cdt',ref+'/2mass_ref_stars.cdt',
                       x+'/2mass_matched.cdt','200.0',verbose='no')

        #Append all of the names of the files for the input and output filename
        #lists to be passed to IRAF tasks further down the line.
        matchlist.write(x+'/2mass_matched.cdt\n')
        scalelist.write(scaleimg+'\n')
        foo=file_check(x+'/scaled_and_shifted.fits',delete=True)
        shiftlist.write(x+'/scaled_and_shifted.fits['+str(shiftsize)+':'+\
                 str(int(np.max(xsize))+shiftsize)+','+str(shiftsize)+':'+\
                 str(int(np.max(ysize))+shiftsize)+']\n')
        stacklist.write(x+'/scaled_and_shifted.fits\n')
        file_check(x+'/mask_tmp.fits',delete=True)
        file_check(x+'/expmap_tmp.fits',delete=True)
        iraf.imarith(mask+'[1]','*',1000.0,x+'/mask_tmp.fits',pixtype='real')
        iraf.imarith(expmap+'[1]','*',1.0,x+'/expmap_tmp.fits',pixtype='real')
        offset=2.558435
        file_check(x+'/mask_tmp2.fits',delete=True)
        iraf.imcalc(x+'/mask_tmp.fits',x+'/mask_tmp2.fits','im1+'+str(offset))
        os.remove(x+'/mask_tmp.fits')
        masklist.write(x+'/mask_tmp2.fits\n')
        file_check(x+'/mask_shift.fits',delete=True)
        shiftmask.write(x+'/mask_shift.fits['+str(shiftsize)+':'+\
                    str(int(np.max(xsize))+shiftsize)+','+str(shiftsize)+':'+\
                    str(int(np.max(ysize))+shiftsize)+']\n')
        stackmask.write(x+'/mask_shift.fits\n')
        finalmasks.write(x+'/mask_final.fits\n')
        finalmasks2.write(x+'/mask_final.fits[0]\n')
        expmaplist.write(x+'/expmap_tmp.fits[0]\n')
        shiftexp.write(x+'/expmap_shift.fits['+str(shiftsize)+':'+\
                    str(int(np.max(xsize))+shiftsize)+','+str(shiftsize)+':'+\
                    str(int(np.max(ysize))+shiftsize)+']\n')
        expmaplist2.write(x+'/expmap_shift.fits\n')
        i += 1

    #Close all of the input and output filename lists to be passed to IRAF tasks.
    matchlist.close()
    scalelist.close()
    stacklist.close()
    masklist.close()
    shiftmask.close()
    finalmasks.close()
    shiftlist.close()
    stackmask.close()
    finalmasks2.close()
    expmaplist.close()
    expmaplist2.close()
    shiftexp.close()

    #Get the shifts between all input files (including the reference) and the
    #reference image itself.
    foo=file_check('shift.db',delete=True)
    iraf.geomap('@matchlist','shift.db',1.0,np.max(xsize),
                1.0,np.max(ysize),fitgeometry='shift',interactive='no',
                maxiter=2,function='legendre',verbose='no')

    #Shift the input images (including the reference) and associated mask files
    #to a common pixel grid. Add some padding around the individual frames (-99
    #in the images, 1 in the bad pixel masks) to ensure that the images will
    #combine properly.
    (maxx,maxy)=(np.max(xsize)+shiftsize+100.0,np.max(ysize)+shiftsize+100.0)
    iraf.geotran('@scalelist','@shiftlist','shift.db','@matchlist',geometry='linear',
                 boundary='constant',nlines=maxy,ncols=maxx,constant=-99.0)

    iraf.geotran('@masklist','@shiftmask','shift.db','@matchlist',geometry='linear',
                 boundary='constant',nlines=maxy,ncols=maxx,constant=1000.0,
                 nxblock=10000,nyblock=10000)
    
    iraf.geotran('@expmaplist','@shiftexp','shift.db','@matchlist',geometry='linear',
                 boundary='constant',nlines=maxy,ncols=maxx,constant=0.)

    for x in cllist:
        file_check(x+'/mask_final.fits',delete=True)
        shutil.copy(x+'/mask_shift.fits',x+'/mask_final.fits')
        iraf.hedit(x+'/scaled_and_shifted.fits[0]','BPM',x+'/mask_final.fits[0]',
                   add='yes',update='yes',verify='no')
    iraf.imreplace('@finalmasks2',0,upper=offset)
    iraf.imreplace('@finalmasks2',1,lower=offset)

    file_check(stackname,delete=True)
    file_check(stackname[:-5]+'_mask.pl',delete=True)
    file_check(stackname[:-5]+'_expmap.fits',delete=True)
    iraf.imcombine('@stacklist',stackname,bpmasks=stackname[:-5]+'_bpm',
                   masktype='goodval',reject='none',mclip='yes',lthresh='INDEF',hthresh='INDEF',
                   hsigma=10.0,lsigma='INDEF',nrejmasks=stackname[:-5]+'_nrej',
                   sigmas=stackname[:-5]+'_sigma',grow=2.5,nkeep=1,blank=-99.0,gain=8.0,rdnoise=35.0)
    iraf.imcombine('@expmaplist2',stackname[:-5]+'_expmap.fits',combine='sum')
    hdu=pf.open(stackname,mode='update')
    hdu[0].header['BPM']=stackname.split('/')[-1][:-5]+'_mask.pl'
    hdu[0].header['MAGZERO']=zpref+zprefoff
    hdu.close()

	#Fix the WCS information in the stacked image.
    copyhead(stackname,refimg,offset=shiftsize)
    applywcs(stackname,stackname[:-5]+'_wcs.fits')

    trash=['matchlist','scalelist','shiftlist','masklist','shiftmask','finalmasks',
            'shift.db','stacklist','finalmasks2','stackmask','tmp_wcs.fits','expmaplist',
            'expmaplist2','shiftexp']
    for x in trash:
        os.remove(x)
Beispiel #13
0
def subtract_constant_bkgd(input_image, mask_image, output_image, growsig=0,
                           subreg=None, savemask=True, boxsize=40, 
                           boxcenter=None, fitgauss=True, show_plot=False):
   """
   Subtract a constant (median) background not masked by the mask_image, where 
   mask_image > 0 means the pixels are masked. The mask image should contain
   at least the segmentations of sources, and could also define the subregion
   of an image.

   boxsize: the size of the square box centered around the object within which
            one determines the median background
   """
   #if subreg == None:
   #   img = pyfits.getdata(input_image)
   #   subreg = [1, img.shape[0], 1, img.shape[1]]
   # output_image = os.getcwd() + '/' + os.path.split(output_image)[-1]
   img1 = pyfits.getdata(input_image)
   mask = pyfits.getdata(mask_image)
   if growsig > 0:
      mask = ssb.growmask(mask, growsig=growsig)
      mask = np.where(mask, 1, 0)
      maskout = os.path.splitext(mask_image)[0] + '_out.fits'
      if os.path.exists(maskout):
         os.remove(maskout)
      hdu = pyfits.PrimaryHDU(mask)
      hdu.writeto(maskout)
   if subreg != None:
      xmin, xmax, ymin, ymax = subreg
   elif boxsize != None:
      xc, yc = boxcenter
      xmin = xc - boxsize / 2
      xmax = xc + boxsize / 2
      ymin = yc - boxsize / 2
      ymax = yc + boxsize / 2
   else:
      xmin = 0
      xmax = img1.shape[0]
      ymin = 0
      ymax = img1.shape[1]
   print "Determining median background within the region:"
   print "(xmin, xmax, ymin, ymax) = (%d, %d, %d, %d)" % (xmin, xmax, ymin, ymax)
   mask_subreg = np.ones(img1.shape, 'int')
   mask_subreg[ymin-1:ymax,xmin-1:xmax] = 0
   mask = np.logical_or(mask, mask_subreg)
   mask = np.where(mask==True, 1, 0)
   img1_masked = np.ma.masked_array(img1, mask=mask)
   bgpix = np.compress(mask.ravel()==0, img1.ravel())
   y = np.histogram(bgpix, bins=30)
   # figure out if the best-fit peak is too far away from the peak of the 
   # histogram, which suggests bimodality
   ipeak = np.argsort(y[0])[-1]
   # calculate the bin center of the peak of histogram
   xmode = y[1][ipeak] + (y[1][1] - y[1][0]) / 2.
   if fitgauss:
      ## Determin local background by fitting a Gaussian...
      bkgd_median, bkgd_sig = gauss.fitgauss(bgpix, clip=True, clipsigma=3.0)
      # if np.abs(bkgd_median - xmode) >= 0.5 * bkgd_sig:
      # do something...?
   else:
      ## ... or by just calculating the median & standard deviation
      bkgd_median = np.ma.median(img1_masked)
      bkgd_sig = np.ma.std(img1_masked)
   num_bkgd = np.sum(mask == 0)
   print "Median sky background: %.6f" % bkgd_median
   print "1-sigma of sky background: %.6f" % bkgd_sig
   print "Number of sky pixels for determining background: %d" % num_bkgd
   if os.path.exists(output_image):
      os.remove(output_image)
   iraf.imcalc(input_image, output_image, 'im1 - %f' % bkgd_median)
   h = pyfits.open(output_image, mode='update')
   h[0].header['MEDBKGD'] = bkgd_median
   h[0].header['SIGBKGD'] = bkgd_sig
   h[0].header['NUMBKGD'] = num_bkgd
   h.flush()
   h.close()
   # Show background pixel histogram
   print "Number of background pixels:", len(bgpix)
   fig = plt.figure()
   ax = fig.add_subplot(111)
   y = ax.hist(bgpix, bins=y[1], histtype='step', lw=2.0)
   # also plot the best-fit gaussian
   x = np.linspace(bgpix.min(), bgpix.max(), num=100)
   g = gauss.gauss(x, bkgd_median, bkgd_sig)
   g = g * np.max(y[0]) / g.max()
   ax.plot(x, g, color='red', lw=2.0)
   bkgd_str = "median=%.2e\nsigma=%.2e" % (bkgd_median, bkgd_sig)
   ax.text(0.95, 0.95, bkgd_str, transform=ax.transAxes, ha='right', va='top',
           bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
   ymax = ax.get_ylim()[1]
   ax.plot([bkgd_median, bkgd_median], [0., ymax], ls='--', lw=2.0, color='black')
   plt.title('Background pixel values', size=24)
   if not show_plot:
      plt.close('all')
   if savemask:
      maskname = os.path.splitext(output_image)[0] + '_mask.fits'
      if os.path.exists(maskname):
         os.remove(maskname)
      hdu2 = pyfits.PrimaryHDU(mask)
      hdu2.writeto(maskname)
   return fig
Beispiel #14
0
def combine(do_cti=False, doreduce=True, doshifts=True):

    if do_cti:
        os.system('stis_cti --crds_update')
    if doreduce:
        # Defringing didn't seem to converge because of the low S/N
        stistools.ocrreject.ocrreject('oc0102070_flc.fits',
                                      'oc0102070_crc.fits')
        iraf.normspflat(inflat='oc0102070_crc.fits',
                        outflat='oc0102070_nsp.fits',
                        do_cal='no')

        iraf.imcalc(input='oc0102070_nsp.fits',
                    output='temp_nsp.fits',
                    equals='if(x .lt. 250) then 1 else im1')
        iraf.imcopy('temp_nsp.fits[1][1:250,*]',
                    'oc0102070_nsp.fits[1][1:250,*]')

        #iraf.defringe('oc0102060_flc.fits', 'oc0102070_nsp.fits', 'oc0102060_dfr.fits')
        #for each image
        for im in ['oc0102050_flc', 'oc0102060_flc']:
            outbase = 'blue'
            if im[:-4] == 'oc0102060':
                outbase = 'red'
            #reset the aperture table to the newer file (we maybe should check this)
            pyfits.setval(im + '.fits',
                          'APERTAB',
                          value='oref$y2r1559to_apt.fits')
            pyfits.setval(im + '.fits',
                          'SPTRCTAB',
                          value='oref$qa31608go_1dt.fits')

            # fixpix any negative values. In principle some of this noise
            # could be real, but I have found that is often not the case
            hdu = fits.open(im + '.fits')
            mask1 = hdu[1].data < -20
            mask2 = hdu[4].data < -20
            hdu.close()
            fits.writeto(outbase + 'mask1.fits',
                         mask1.astype('i'),
                         clobber=True)
            fits.writeto(outbase + 'mask2.fits',
                         mask2.astype('i'),
                         clobber=True)

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im + '[1]', outbase + 'mask1.fits')

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im + '[4]', outbase + 'mask2.fits')

            # Subtract off the median value
            hdu = fits.open(im + '.fits', mode='update')
            hdu[1].data -= np.median(hdu[1].data)
            hdu[4].data -= np.median(hdu[4].data)

            readnoise1 = 1.4826 * np.median(np.abs(hdu[1].data))
            readnoise2 = 1.4826 * np.median(np.abs(hdu[4].data))

            # Cosmic ray reject both images using scrappy
            # Make sure to treat the noise in a sensible way
            crmask1, clean1 = detect_cosmics(hdu[1].data,
                                             readnoise=readnoise1,
                                             sigclip=5,
                                             objlim=5,
                                             sigfrac=0.8,
                                             fsmode='median',
                                             psfmodel='gaussy',
                                             psffwhm=2.,
                                             cleantype='idw')

            crmask2, clean2 = detect_cosmics(hdu[4].data,
                                             readnoise=readnoise2,
                                             sigclip=5,
                                             objlim=5,
                                             sigfrac=0.8,
                                             fsmode='median',
                                             psfmodel='gaussy',
                                             psffwhm=2.,
                                             cleantype='idw')

            hdu.flush()
            hdu.close()

            fits.writeto(outbase + '_crmask1.fits',
                         crmask1.astype('i'),
                         clobber=True)
            fits.writeto(outbase + '_crmask2.fits',
                         crmask2.astype('i'),
                         clobber=True)
            # Run fixpix on the frames
            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im + '[1]', outbase + '_crmask1.fits')

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im + '[4]', outbase + '_crmask2.fits')

            if outbase == 'red':
                iraf.mkfringeflat('oc0102060_flc.fits',
                                  'oc0102070_nsp.fits',
                                  'oc0102070_frr.fits',
                                  beg_scale=0.6,
                                  end_scale=1.5,
                                  scale_step=0.01,
                                  beg_shift=-3.0,
                                  end_shift=3.0,
                                  shift_step=0.05)
                iraf.defringe('oc0102060_flc.fits', 'oc0102070_frr.fits',
                              'oc0102060_dfr.fits')
                #Run x2d on the flt frame
                stistools.x2d.x2d(input='oc0102060_dfr.fits',
                                  output=im[:-4] + 'x2d.fits')
            else:
                stistools.x2d.x2d(input='oc0102050_flc.fits',
                                  output=im[:-4] + 'x2d.fits')

            h = pyfits.open(im[:-4] + 'x2d.fits', mode='update')
            #Replace all of the bad pixels in the image by -666 based on the DQ array
            #save them to a new file
            #Throw away bad reference file pixels and saturated pixels. None of the other error codes
            #were in the first file so I haven't included them here, but we might want to
            d = h[3].data
            badpix = logical_and(
                bitwise_and(d, 256) == 256,
                bitwise_and(d, 512) == 512)
            h[1].data[badpix] = -10000
            d = h[6].data
            badpix = logical_and(
                bitwise_and(d, 256) == 256,
                bitwise_and(d, 512) == 512)
            h[4].data[badpix] = -10000
            h.flush()

            # Trim the images
            for i in range(1, 7):
                h[i].data = h[i].data[100:-100, 100:-100]
                h[i].header['CRPIX1'] -= 100
                h[i].header['CRPIX2'] -= 100

            h.flush()
            h.close()

            # Combine the images
            iraf.unlearn(iraf.imcombine)
            iraf.imcombine(input=im[:-4] + 'x2d[1],' + im[:-4] + 'x2d[4]',
                           output=outbase + '_com.fits',
                           reject='crreject')

            hdu = pyfits.open(outbase + '_com.fits')
            mask = hdu[0].data == 0.0
            hdu.close()
            fits.writeto(outbase + '_mask.fits',
                         mask.astype('i'),
                         clobber=True)

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(outbase + '_com.fits', outbase + '_mask.fits')

            iraf.unlearn(iraf.apall)
            iraf.apall(input=outbase + '_com',
                       output='13dh_' + outbase,
                       review='no',
                       nsum=-500,
                       b_order=1,
                       b_function='legendre',
                       b_niterate=30,
                       b_naverage=-21,
                       nfind=1,
                       t_order=3,
                       background='median',
                       weights='variance',
                       skybox=100)
            iraf.splot(outbase + '[SCI]')
Beispiel #15
0
def subBack(image,bkg):

    iraf.unlearn('imcalc')
    if os.path.exists(image[:-5]+'_bkgsub.fits'):
        os.remove(image[:-5]+'_bkgsub.fits')
    iraf.imcalc(image+','+bkg,image[:-5]+'_bkgsub.fits','im1-im2')
Beispiel #16
0
def sigmap(inlist,
           sigmap,
           expmap='none',
           whtmap='none',
           inpref='',
           ffpref='',
           objmask='none',
           reject='sigclip',
           fscale=False,
           fbase=100,
           fhead='F1',
           gain=5.6):

    # check output image
    if os.access(sigmap, os.R_OK):
        print >> sys.stderr, 'operation would overwrite existing image (%s)' % sigmap
        return 1

    if os.access(expmap, os.R_OK):
        print >> sys.stderr, 'operation would overwrite existing image (%s)' % expmap
        return 1

    if os.access(whtmap, os.R_OK):
        print >> sys.stderr, 'operation would overwrite existing image (%s)' % whtmap
        return 1

    # check input image list
    inimg_arr = check_input(inlist, inpref)
    if isinstance(inimg_arr, int):
        return 1

    # check input image list
    ffimg_arr = check_input2(inlist, ffpref)
    if isinstance(ffimg_arr, int):
        return 1

    # get array size
    im = pyfits.open(inimg_arr[0])
    nx = im[0].header['NAXIS1']
    ny = im[0].header['NAXIS2']
    im.close()

    # check geomap data
    dx = []
    dy = []
    gmp_arr = []
    gmp2_arr = []
    dbs_arr = []
    for i in range(len(inimg_arr)):
        fname, ext = os.path.splitext(inimg_arr[i])
        gmp = fname + '.gmp'
        gmp2 = fname + '.gmp2'
        dbs = fname + '.dbs'

        gmp_arr.append(gmp)
        gmp2_arr.append(gmp2)
        dbs_arr.append(dbs)

        if not os.access(gmp, os.R_OK):
            print >> sys.stderr, 'geomap file (%s) does not exist' % (gmp)
            return 1

        if not os.access(dbs, os.R_OK):
            print >> sys.stderr, 'database file (%s) does not exist' % (dbs)
            return 1

        if not os.access(gmp2, os.R_OK):
            print >> sys.stderr, 'modified geomap file (%s) does not exist' % (
                gmp2)
            return 1

        fgmp = open(gmp)
        nl = 1
        dx_ave = 0.0
        dy_ave = 0.0
        for line in fgmp:
            if not line.startswith('#'):
                param = line[:-1].split()
                if len(param) != 4:
                    print >> sys.stderr, 'Invalid format in line %d of %s: %s' % (
                        nl, gmp, line[:-1])
                    fgmp.close()
                    return 1
                else:
                    if isfloat(param[0]) == False or isfloat(
                            param[1]) == False or isfloat(
                                param[2]) == False or isfloat(
                                    param[3]) == False:
                        print >> sys.stderr, 'failed to decode line %d of %s: %s' % (
                            nl, gmp, line[:-1])
                        fgmp.close()
                        return 1
                    else:
                        dx_ave += float(param[0]) - float(param[2])
                        dy_ave += float(param[1]) - float(param[3])

                nl += 1
        #print inimg_arr[i],nl
        dx.append(dx_ave / (nl - 1))
        dy.append(dy_ave / (nl - 1))

    if len(inimg_arr) != len(dx):
        print >> sys.stderr, 'number of input images does not match with that of offsets'
        return 1

    # check object mask
    if objmask.lower() == 'none':
        objmask = ''
    else:
        objmask_arr = check_inpref(objmask, inimg_arr)
        if isinstance(objmask_arr, int):
            return 1

    # prepare for temporary file
    tmp = tempfile.NamedTemporaryFile(suffix='', prefix='', dir='/tmp')
    tmp_prefix = tmp.name
    tmp.close()

    # get large array size and combined image size
    ret = get_large_region(nx, ny, dx, dy)
    if len(ret) != 6:
        print >> sys.stderr, 'failed to get large array size'
        return 1

    x_size = ret[0]
    y_size = ret[1]
    xcmin = ret[2]
    xcmax = ret[3]
    ycmin = ret[4]
    ycmax = ret[5]

    # calculate image region in the large format
    xmin = int((x_size - nx) / 2) + 1
    xmax = nx + int((x_size - nx) / 2)
    ymin = int((y_size - ny) / 2) + 1
    ymax = ny + int((y_size - ny) / 2)

    # copy image to larger format and shift image #
    iraf.unlearn('geomap')
    iraf.unlearn('geotran')

    # for exposure time weight and flux scaling
    expt_arr = []
    flux_scale_arr = []
    for i in range(len(inimg_arr)):

        # load original frame
        img = pyfits.open(inimg_arr[i])

        # for exposure time weight
        try:
            t = float(img[0].header['EXP1TIME'])
            coadd = float(img[0].header['COADDS'])
            expt_arr.append(t * coadd)
        except KeyError:
            print >> sys.stderr, 'can not read exposure time from the header of %s' % inimg_arr[
                i]
            img.close()
            return 1

        # for flux scaling and weight
        if fscale:
            try:
                flux = float(img[0].header[fhead])
            except KeyError:
                print >> sys.stderr, 'can not read flux keyword (%s) from the header of %s' % (
                    fhead, inimg_arr[i])
                img.close()
                return 1
            flux_scale_arr.append(fbase / flux)
        else:
            flux_scale_arr.append(1.0)
        img.close()

    # preparing weighted variance map for each image
    inverse_var_list = tmp_prefix + '_obj.lst'
    if os.access(inverse_var_list, os.R_OK):
        os.remove(inverse_var_list)
    finverse_var = open(inverse_var_list, 'w')

    for i in range(len(inimg_arr)):

        # mask frame
        msk = np.ones((y_size, x_size))
        msk[ymin - 1:ymax, xmin - 1:xmax] = 0
        hdu = pyfits.PrimaryHDU(msk)
        msk_img = pyfits.HDUList([hdu])
        msk_fits = tmp_prefix + 'mask' + os.path.basename(inimg_arr[i])
        msktr_fits = tmp_prefix + 'masktr' + os.path.basename(inimg_arr[i])
        if os.access(msk_fits, os.R_OK):
            os.remove(msk_fits)
        if os.access(msktr_fits, os.R_OK):
            os.remove(msktr_fits)
        msk_img.writeto(msk_fits)
        msk_img.close()

        # transform mask geometry
        iraf.geotran(msk_fits,
                     msktr_fits,
                     dbs_arr[i],
                     gmp2_arr[i],
                     geometr='linear',
                     boundar='constant',
                     constant=1)
        os.remove(msk_fits)
        convert_maskfits_int(msktr_fits, msktr_fits)

        # load original frame
        ffimg = pyfits.open(ffimg_arr[i])
        img = pyfits.open(inimg_arr[i])
        # object frame
        inverse_var = np.zeros((y_size, x_size))
        #print np.median(ffimg[0].data), ffimg_arr[i], gain, expt_arr[i], flux_scale_arr[i]
        #print np.median(np.sqrt(ffimg[0].data / (gain * expt_arr[i]))), flux_scale_arr[i]
        inverse_var[ymin - 1:ymax, xmin -
                    1:xmax] = (np.sqrt(ffimg[0].data / (gain * expt_arr[i])) *
                               flux_scale_arr[i])**-2
        hdu = pyfits.PrimaryHDU(inverse_var)
        inverse_var_img = pyfits.HDUList([hdu])
        inverse_var_img[0].header = img[0].header
        inverse_var_img[0].header['bpm'] = msktr_fits
        inverse_var_img[0].header.update('EXPTIME', expt_arr[i])
        #inverse_var_img[0].header.update('MASKSCAL', expt_arr[i])
        #inverse_var_img[0].header.update('MASKZERO', expt_arr[i])
        #print 'EXPT = %f' % (expt_arr[i])
        inverse_var_fits = tmp_prefix + 'var' + os.path.basename(inimg_arr[i])
        inverse_vartr_fits = tmp_prefix + 'vartr' + os.path.basename(
            inimg_arr[i])
        if os.access(inverse_var_fits, os.R_OK):
            os.remove(inverse_var_fits)
        if os.access(inverse_vartr_fits, os.R_OK):
            os.remove(inverse_vartr_fits)
        inverse_var_img.writeto(inverse_var_fits)
        inverse_var_img.close()
        iraf.geotran(inverse_var_fits,
                     inverse_vartr_fits,
                     dbs_arr[i],
                     gmp2_arr[i],
                     geometr='linear',
                     boundar='constant',
                     constant=0)
        finverse_var.write('%s\n' % inverse_vartr_fits)
        ffimg.close()

    # close file handlers
    finverse_var.close()

    # sum weighted variance images
    tmp_inverse_var_sum = tmp_prefix + 'inverse_var.fits'
    if os.access(tmp_inverse_var_sum, os.R_OK):
        os.remove(tmp_inverse_var_sum)
    tmp_sigma = tmp_prefix + 'sigma.fits'
    if os.access(tmp_sigma, os.R_OK):
        os.remove(tmp_sigma)
    tmp_exp = tmp_prefix + 'exp.fits'
    if os.access(tmp_exp, os.R_OK):
        os.remove(tmp_exp)

    if expmap != 'none':
        #iraf.hselect('@'+inverse_var_list,"$I,EXPTIME,MASKSCAL,MASKZERO","yes")
        iraf.imcombine('@' + inverse_var_list,
                       tmp_inverse_var_sum,
                       expmasks=tmp_exp,
                       combine='sum',
                       reject=reject,
                       masktype='!BPM',
                       maskvalue=0.0,
                       expname='EXPTIME')
    else:
        iraf.imcombine('@' + inverse_var_list,
                       tmp_inverse_var_sum,
                       combine='sum',
                       reject=reject,
                       masktype='!BPM',
                       maskvalue=0.0)

    # calculate sigma
    iraf.stsdas()
    iraf.imcalc(tmp_inverse_var_sum,
                tmp_sigma,
                'sqrt(1.0/im1)',
                pixtype='double')

    # cut image
    iraf.unlearn('imcopy')
    cut_sig = '%s[%d:%d,%d:%d]' % (tmp_sigma, xcmin, xcmax, ycmin, ycmax)
    iraf.imcopy(cut_sig, sigmap)
    if expmap != 'none':
        cut_exp = '%s[%d:%d,%d:%d]' % (tmp_exp, xcmin, xcmax, ycmin, ycmax)
        iraf.imcopy(cut_exp, expmap)

    # calc weight map
    if whtmap != 'none':
        cut_wht = '%s[%d:%d,%d:%d]' % (tmp_inverse_var_sum, xcmin, xcmax,
                                       ycmin, ycmax)
        iraf.imcopy(cut_wht, whtmap)

    # delete temporary object files
    os.remove(inverse_var_list)

    # remove all temporary files
    remove_temp_all(tmp_prefix)

    return 0
Beispiel #17
0
def imshiftcomb(inlist,
                outimg,
                fitgeom='shift',
                inpref='',
                objmask='none',
                combine='average',
                reject='none',
                fscale=False,
                fbase=100,
                fhead='F1',
                second=False,
                first_pref='sdfr',
                second_pref='sdf2r',
                indep=False,
                sigmap='none',
                expmap='none',
                whtmap='none',
                gain=5.6,
                ffpref=''):

    # check output image
    if os.access(outimg, os.R_OK):
        print >> sys.stderr, 'operation would overwrite existing image (%s)' % outimg
        return 1

    # check input image list
    inimg_arr = check_input(inlist, inpref)
    if isinstance(inimg_arr, int):
        return 1

    # check input image list
    if sigmap != 'none':
        ffimg_arr = check_input2(inlist, ffpref)
        if isinstance(ffimg_arr, int):
            return 1

    # check optional output image
    if sigmap != 'none':
        if os.access(sigmap, os.R_OK):
            print >> sys.stderr, 'operation would overwrite existing image (%s)' % sigmap
            return 1
    if expmap != 'none':
        if os.access(expmap, os.R_OK):
            print >> sys.stderr, 'operation would overwrite existing image (%s)' % expmap
            return 1

    if whtmap != 'none':
        if os.access(whtmap, os.R_OK):
            print >> sys.stderr, 'operation would overwrite existing image (%s)' % whtmap
            return 1

    # get array size
    im = pyfits.open(inimg_arr[0])
    nx = im[0].header['NAXIS1']
    ny = im[0].header['NAXIS2']
    im.close()

    # check geomap data
    dx = []
    dy = []
    gmp_arr = []
    gmp2_arr = []
    dbs_arr = []
    for i in range(len(inimg_arr)):
        fname, ext = os.path.splitext(inimg_arr[i])
        gmp = fname + '.gmp'
        gmp2 = fname + '.gmp2'
        dbs = fname + '.dbs'

        gmp_arr.append(gmp)
        gmp2_arr.append(gmp2)
        dbs_arr.append(dbs)

        if not os.access(gmp, os.R_OK):
            print >> sys.stderr, 'geomap file (%s) does not exist' % (gmp)
            return 1

        if os.access(dbs, os.R_OK):
            print >> sys.stderr, 'database file (%s) is already exist' % (dbs)
            return 1

        if os.access(gmp2, os.R_OK):
            print >> sys.stderr, 'modified geomap file (%s) is already exist' % (
                gmp2)
            return 1

        fgmp = open(gmp)
        nl = 1
        dx_ave = 0.0
        dy_ave = 0.0
        for line in fgmp:
            if not line.startswith('#'):
                param = line[:-1].split()
                if len(param) != 4:
                    print >> sys.stderr, 'Invalid format in line %d of %s: %s' % (
                        nl, gmp, line[:-1])
                    fgmp.close()
                    return 1
                else:
                    if isfloat(param[0]) == False or isfloat(
                            param[1]) == False or isfloat(
                                param[2]) == False or isfloat(
                                    param[3]) == False:
                        print >> sys.stderr, 'failed to decode line %d of %s: %s' % (
                            nl, gmp, line[:-1])
                        fgmp.close()
                        return 1
                    else:
                        dx_ave += float(param[0]) - float(param[2])
                        dy_ave += float(param[1]) - float(param[3])

                nl += 1
        #print inimg_arr[i],nl
        dx.append(dx_ave / (nl - 1))
        dy.append(dy_ave / (nl - 1))

    if len(inimg_arr) != len(dx):
        print >> sys.stderr, 'number of input images does not match with that of offsets'
        return 1

    #print 'debug'
    #print dx, max(dx), min(dx)
    #print dy, max(dy), min(dy)

    # check object mask
    if objmask.lower() == 'none':
        objmask = ''
    else:
        objmask_arr = check_inpref(objmask, inimg_arr)
        if isinstance(objmask_arr, int):
            return 1

    # independent run flag
    if indep:
        second = True

    # prepare for temporary file
    tmp = tempfile.NamedTemporaryFile(suffix='', prefix='', dir='/tmp')
    tmp_prefix = tmp.name
    tmp.close()

    # calculate image median for zero shift
    iraf.unlearn('imstat')
    iraf.unlearn('mimstat')
    bgmed = []
    for i in range(len(inimg_arr)):
        if objmask == '':
            ret = iraf.imstat(inimg_arr[i],
                              format='no',
                              fields='midpt',
                              nclip=50,
                              lsigma=3.,
                              usigma=3.,
                              Stdout=1)
        else:
            if not second:
                ret = iraf.mimstat(inimg_arr[i],
                                   imasks=objmask_arr[i] + '[pl]',
                                   format='no',
                                   fields='midpt',
                                   nclip=50,
                                   lsigma=3.,
                                   usigma=3.,
                                   Stdout=1)
            else:
                ret = iraf.mimstat(inimg_arr[i],
                                   imasks=objmask_arr[i],
                                   format='no',
                                   fields='midpt',
                                   nclip=50,
                                   lsigma=3.,
                                   usigma=3.,
                                   Stdout=1)
        if len(ret) == 1:
            bgmed.append(-1.0 * float(ret[0]))
        else:
            fout.close()
            remove_temp_all(tmp_prefix)
            print >> sys.stderr, 'failed to calculate median of the background in %s' % inimg_arr[
                i]
            return 1

    # get large array size and combined image size
    ret = get_large_region(nx, ny, dx, dy)
    if len(ret) != 6:
        print >> sys.stderr, 'failed to get large array size'
        return 1

    x_size = ret[0]
    y_size = ret[1]
    xcmin = ret[2]
    xcmax = ret[3]
    ycmin = ret[4]
    ycmax = ret[5]

    # calculate image region in the large format
    xmin = int((x_size - nx) / 2) + 1
    xmax = nx + int((x_size - nx) / 2)
    ymin = int((y_size - ny) / 2) + 1
    ymax = ny + int((y_size - ny) / 2)

    #print 'debug'
    #print x_size, y_size, xcmin, xcmax, ycmin, ycmax
    #print xmin, xmax, ymin, ymax

    # copy image to larger format and shift image #
    iraf.unlearn('geomap')
    iraf.unlearn('geotran')

    obj_list = tmp_prefix + '_obj.lst'
    if os.access(obj_list, os.R_OK):
        os.remove(obj_list)
    fobj = open(obj_list, 'w')

    # for exposure time weight
    expweight = tmp_prefix + '_exp.lst'
    if os.access(expweight, os.R_OK):
        os.remove(expweight)
    fexp = open(expweight, 'w')

    # for zero offset
    zeroshift = tmp_prefix + '_zeroshift.dat'
    if os.access(zeroshift, os.R_OK):
        os.remove(zeroshift)
    fzero = open(zeroshift, 'w')

    # save the original fit geometry
    fitgeom_org = fitgeom

    # preparing for the sigma list and mask
    if sigmap == 'none':
        tmp_rejmask = ''
    else:
        tmp_rejmask = tmp_prefix + 'rejmask.fits'
        if os.access(tmp_rejmask, os.R_OK):
            os.remove(tmp_rejmask)
        inverse_var_list = tmp_prefix + '_var.lst'
        if os.access(inverse_var_list, os.R_OK):
            os.remove(inverse_var_list)
        finverse_var = open(inverse_var_list, 'w')

    for i in range(len(inimg_arr)):

        # restore the original fit geometry
        fitgeom = fitgeom_org

        # geometry transformation
        fgmp = open(gmp_arr[i])
        fgmp2 = open(gmp2_arr[i], 'w')
        nobj = 0
        for line in fgmp:
            if not line.startswith('#'):
                param = line[:-1].split()
                xref = float(param[0]) + xmin - 1
                yref = float(param[1]) + ymin - 1
                xin = float(param[2]) + xmin - 1
                yin = float(param[3]) + ymin - 1
                fgmp2.write('%.3f %.3f %.3f %.3f\n' % (xref, yref, xin, yin))
                nobj += 1
        fgmp.close()
        fgmp2.close()

        # check number of objects
        if i == 0 and nobj == 1 and fitgeom == 'rotate':
            print 'Warning: Number of reference objects is not enought to measure the rotation'
            print 'Warning: Only shift applied for all images'
            fitgeom = 'shift'
            fitgeom_org = 'shift'

        if nobj == 1 and fitgeom == 'rotate':
            print 'Warning: Number of objects in %s is not enought to measure the rotation' % (
                inimg_arr[i])
            print 'Warning: Only shift applied for %s' % (inimg_arr[i])
            fitgeom = 'shift'

        # mapping geometry
        iraf.geomap(gmp2_arr[i],
                    dbs_arr[i],
                    1,
                    x_size,
                    1,
                    y_size,
                    fitgeom=fitgeom,
                    interac='no')

        # mask frame
        msk = np.ones((y_size, x_size))
        msk[ymin - 1:ymax, xmin - 1:xmax] = 0
        hdu = pyfits.PrimaryHDU(msk)
        msk_img = pyfits.HDUList([hdu])
        msk_fits = tmp_prefix + 'mask' + os.path.basename(inimg_arr[i])
        msktr_fits = tmp_prefix + 'masktr' + os.path.basename(inimg_arr[i])
        if os.access(msk_fits, os.R_OK):
            os.remove(msk_fits)
        if os.access(msktr_fits, os.R_OK):
            os.remove(msktr_fits)
        msk_img.writeto(msk_fits)
        msk_img.close()

        # transform mask geometry
        iraf.geotran(msk_fits,
                     msktr_fits,
                     dbs_arr[i],
                     gmp2_arr[i],
                     geometr='linear',
                     boundar='constant',
                     constant=1)
        os.remove(msk_fits)
        convert_maskfits_int(msktr_fits, msktr_fits)

        # load original frame
        img = pyfits.open(inimg_arr[i])
        if sigmap != 'none':
            ffimg = pyfits.open(ffimg_arr[i])

        # for exposure time weight
        try:
            t = float(img[0].header['EXP1TIME'])
            coadd = float(img[0].header['COADDS'])
            expt = t * coadd
        except KeyError:
            print >> sys.stderr, 'can not read exposure time from the header of %s' % inimg_arr[
                i]
            img.close()
            return 1

        # for flux scaling and weight
        if fscale:
            try:
                flux = float(img[0].header[fhead])
            except KeyError:
                print >> sys.stderr, 'can not read flux keyword (%s) from the header of %s' % (
                    fhead, inimg_arr[i])
                img.close()
                return 1

            flux_scale = fbase / flux
            weight = expt * (1.0 / flux_scale)**2

        else:
            flux_scale = 1.0
            weight = expt

        fzero.write('%f\n' % (bgmed[i] * flux_scale))
        fexp.write('%f\n' % weight)

        # object frame
        obj = np.zeros((y_size, x_size))
        obj[ymin - 1:ymax, xmin - 1:xmax] = img[0].data * flux_scale
        hdu = pyfits.PrimaryHDU(obj)
        obj_img = pyfits.HDUList([hdu])
        obj_img[0].header = img[0].header
        obj_img[0].header['bpm'] = msktr_fits
        obj_img[0].header['expmap'] = expt
        obj_fits = tmp_prefix + 'obj' + os.path.basename(inimg_arr[i])
        objtr_fits = tmp_prefix + 'objtr' + os.path.basename(inimg_arr[i])
        if os.access(obj_fits, os.R_OK):
            os.remove(obj_fits)
        obj_img.writeto(obj_fits)
        obj_img.close()
        iraf.geotran(obj_fits,
                     objtr_fits,
                     dbs_arr[i],
                     gmp2_arr[i],
                     geometr='linear',
                     boundar='constant',
                     constant=0)
        fobj.write('%s\n' % objtr_fits)
        img.close()

        if sigmap != 'none':
            inverse_var = np.zeros((y_size, x_size))
            inverse_var[ymin - 1:ymax, xmin -
                        1:xmax] = (np.sqrt(ffimg[0].data / (gain * expt)) *
                                   flux_scale)**-2
            hdu_var = pyfits.PrimaryHDU(inverse_var)
            inverse_var_img = pyfits.HDUList([hdu_var])
            inverse_var_img[0].header = img[0].header
            inverse_var_img[0].header['bpm2'] = '%s[*,*,%d]' % (tmp_rejmask,
                                                                i + 1)
            inverse_var_fits = tmp_prefix + 'var' + os.path.basename(
                inimg_arr[i])
            inverse_vartr_fits = tmp_prefix + 'vartr' + os.path.basename(
                inimg_arr[i])
            if os.access(inverse_var_fits, os.R_OK):
                os.remove(inverse_var_fits)
            if os.access(inverse_vartr_fits, os.R_OK):
                os.remove(inverse_vartr_fits)
            inverse_var_img.writeto(inverse_var_fits)
            inverse_var_img.close()
            iraf.geotran(inverse_var_fits,
                         inverse_vartr_fits,
                         dbs_arr[i],
                         gmp2_arr[i],
                         geometr='linear',
                         boundar='constant',
                         constant=0)
            finverse_var.write('%s\n' % inverse_vartr_fits)
            ffimg.close()

    # close file handlers
    fobj.close()
    fexp.close()
    fzero.close()
    if sigmap != 'none':
        finverse_var.close()

    # combine image
    comb_img = tmp_prefix + '_comb.fits'
    if os.access(comb_img, os.R_OK):
        os.remove(comb_img)
    if expmap == 'none':
        tmp_expmap = ''
    else:
        tmp_expmap = tmp_prefix + 'expmap.fits'
        if os.access(tmp_expmap, os.R_OK):
            os.remove(tmp_expmap)

    iraf.unlearn('imcombine')
    try:
        iraf.imcombine('@' + obj_list,
                       comb_img,
                       sigma='',
                       rejmask=tmp_rejmask,
                       expmasks=tmp_expmap,
                       combine=combine,
                       reject=reject,
                       masktype='!BPM',
                       maskvalue=0.0,
                       zero='@' + zeroshift,
                       weight='@' + expweight,
                       expname='EXPMAP')
    except:
        if os.access(comb_img, os.R_OK):
            os.remove(comb_img)
        if expmap != 'none':
            if os.access(tmp_expmap, os.R_OK):
                os.remove(tmp_expmap)
        iraf.imcombine('@' + obj_list,
                       comb_img,
                       sigma='',
                       rejmask='',
                       expmasks=tmp_expmap,
                       combine=combine,
                       reject=reject,
                       masktype='!BPM',
                       maskvalue=0.0,
                       zero='@' + zeroshift,
                       weight='@' + expweight,
                       expname='EXPMAP')

    if sigmap != 'none':
        tmp_inverse_var_sum = tmp_prefix + 'inverse_var.fits'
        if os.access(tmp_inverse_var_sum, os.R_OK):
            os.remove(tmp_inverse_var_sum)
        iraf.imcombine('@' + inverse_var_list,
                       tmp_inverse_var_sum,
                       combine='sum',
                       reject='none',
                       masktype='!BPM',
                       maskvalue=0.0)
        iraf.stsdas()
        tmp_sigma = tmp_prefix + 'sigma.fits'
        if os.access(tmp_sigma, os.R_OK):
            os.remove(tmp_sigma)
        iraf.imcalc(tmp_inverse_var_sum,
                    tmp_sigma,
                    'sqrt(1.0/im1)',
                    pixtype='double')

    # cut image
    iraf.unlearn('imcopy')
    cut_img = '%s[%d:%d,%d:%d]' % (comb_img, xcmin, xcmax, ycmin, ycmax)
    iraf.imcopy(cut_img, outimg)
    if expmap != 'none':
        cut_exp = '%s[%d:%d,%d:%d]' % (tmp_expmap, xcmin, xcmax, ycmin, ycmax)
        iraf.imcopy(cut_exp, expmap)
    if sigmap != 'none':
        cut_sigma = '%s[%d:%d,%d:%d]' % (tmp_sigma, xcmin, xcmax, ycmin, ycmax)
        iraf.imcopy(cut_sigma, sigmap)
        if whtmap != 'none':
            cut_wht = '%s[%d:%d,%d:%d]' % (tmp_inverse_var_sum, xcmin, xcmax,
                                           ycmin, ycmax)
            iraf.imcopy(cut_wht, whtmap)

    # delete temporary object files
    remove_temp_all(tmp_prefix + 'obj')
    os.remove(obj_list)
    os.remove(comb_img)

    # record relative offset between input images and combined image and rotation
    for i in range(len(inimg_arr)):
        im = pyfits.open(inimg_arr[i], mode='update')

        if second:

            if indep:

                # calculate offset
                dxc = xcmin - xmin - dx[i]
                dyc = ycmin - ymin - dy[i]

                # retrieve rotation
                rot = 0.0
                fdbs = open(dbs_arr[i])
                for line in fdbs:
                    param = line[:-1].split()
                    if param[0] == 'xrotation':
                        rot = float(param[1])

                if rot > 180.0:
                    rot = rot - 360.0

                im[0].header['dx'] = dx[i]
                im[0].header['dy'] = dy[i]
                im[0].header['dxc'] = dxc
                im[0].header['dyc'] = dyc
                im[0].header['rotation'] = rot

            else:
                # check number of objects in the geomap file
                nobj = 0
                fgmp = open(gmp_arr[0])
                for line in fgmp:
                    nobj += 1

                im1 = pyfits.open(inimg_arr[i].replace(second_pref,
                                                       first_pref),
                                  mode='update')
                for j in range(nobj):
                    key = 'XC%d' % (j + 1)
                    im[0].header[key] = float(im1[0].header[key])
                    key = 'YC%d' % (j + 1)
                    im[0].header[key] = float(im1[0].header[key])
                    key = 'PEAK%d' % (j + 1)
                    im[0].header[key] = float(im1[0].header[key])
                    key = 'FWHM%d' % (j + 1)
                    im[0].header[key] = float(im1[0].header[key])
                key = 'DX'
                im[0].header[key] = float(im1[0].header[key])
                key = 'DY'
                im[0].header[key] = float(im1[0].header[key])
                key = 'DXC'
                im[0].header[key] = float(im1[0].header[key])
                key = 'DYC'
                im[0].header[key] = float(im1[0].header[key])
                key = 'ROTATION'
                im[0].header[key] = float(im1[0].header[key])
                im1.close()

        else:

            # calculate offset
            dxc = xcmin - xmin - dx[i]
            dyc = ycmin - ymin - dy[i]

            # retrieve rotation
            rot = 0.0
            fdbs = open(dbs_arr[i])
            for line in fdbs:
                param = line[:-1].split()
                if param[0] == 'xrotation':
                    rot = float(param[1])

            if rot > 180.0:
                rot = rot - 360.0

            im[0].header['dx'] = dx[i]
            im[0].header['dy'] = dy[i]
            im[0].header['dxc'] = dxc
            im[0].header['dyc'] = dyc
            im[0].header['rotation'] = rot

        im.close()

    # remove all temporary files
    remove_temp_all(tmp_prefix)

    return 0
Beispiel #18
0
def make_iracpsf(psfname,
                 oversample=10,
                 interp='sinc',
                 iracpix=0.6,
                 radius=3.0):
    """
   Make an oversample IRAC PSF given the PSF image in the IRAC pixel scale, 
   for use in TFIT.
   psfname: the input IRAC PSF
   oversample: the oversampling factor (default is 10)
   interp: the algorithm of interpolation (default is linear)
           other options are nearest, poly3, poly5, spline3, sinc, lsinc, 
           and drizzle.
   iracpix: the pixel scale (in arcsec) of the input IRAC PSF (default is 0.6)
   radius: the radius of the circularized PSF in arcsec (beyond which is set to 0)
   """
    newscale = int(round(iracpix / oversample * 1000))
    # the new pixel scale in milli-arcsec
    interp = interp.replace('[', '_')
    interp = interp.replace(']', '_')
    psfroot = os.path.splitext(psfname)[0]
    psfroot = psfroot + '_%dmas' % newscale
    psfroot = psfroot + '_%s' % interp
    size0 = pyfits.getdata(psfname).shape[0]
    size1 = size0 * oversample
    # First, run iraf.imlintran
    factor = 1. / oversample
    print "factor", factor
    if os.path.exists('temp1.fits'):
        os.remove('temp1.fits')
    iraf.imlintran(psfname,
                   'temp1.fits',
                   0.,
                   0.,
                   factor,
                   factor,
                   ncols=size1,
                   nlines=size1,
                   interpolant=interp,
                   fluxconserve='yes')
    # Second, circularize the PSF, assume that the input PSF has equal numbers
    # of rows and columns
    imgsize = pyfits.getdata('temp1.fits').shape[0]
    imgcenter = (imgsize + 1) / 2
    print "imgcenter", imgcenter
    pixrad = radius / iracpix * oversample
    pixrad = np.round(pixrad)
    mathstr = 'if ((x-%d)**2 + (y-%d)**2) < %f**2 then im1 else 0' % (
        imgcenter, imgcenter, pixrad)
    print "mathstr", mathstr
    if os.path.exists('temp2.fits'):
        os.remove('temp2.fits')
    iraf.imcalc('temp1.fits', 'temp2.fits', mathstr)
    # Third, trim the borders; shed border with units of 100 pixels
    nborder = ((imgsize - 2 * pixrad) / 2) / 100
    print "nborder:", nborder
    if nborder > 0:
        begin = nborder * 100 + 1
        end = imgsize - nborder * 100 - 1
        iraf.imcopy('temp2.fits[%d:%d,%d:%d]' % (begin, end, begin, end),
                    psfroot + '.fits')
    else:
        os.system('mv temp2.fits %s' % (psfroot + '.fits'))
    os.system('rm temp*.fits')
Beispiel #19
0
def irac_pipeline(drz_img, unc_img, channel, rmsmode='unc', pixscale=0.6, gain=3.7,
                  zeromask=None, bgmask=None, source_threshold=3,
                  mask_growsig=5, subreg='none', aperture_width=7,
                  nnear=23, constant_bkgd=False, cluster='bullet',
                  inmask_checkrms='ch1_pf0.01_cut_mask.fits'):
   """
   drz_img: the file name of the IRAC science mosaic (in MJy/sr)
   unc_img: the file name of the IRAC uncertainty image (in MJy/sr)
   channel: the IRAC channel name (either 'ch1' or 'ch2')
   pixscale: the pixel scale of the input IRAC mosaic in arcsec/pixel
   gain: the CCD gain of the IRAC mosaic
   zeromask: the image name that provides which pixels to be zeroed out at the 
             end of this pipeline, both in the drz image and the RMS image
   bgmask: user-provided mask for background subtraction (optional)
   subreg: provide a list (or array) of four numbers (xmin, xmax, ymin, ymax)
           that specifies a region in which to measure RMS. If None, default
           to the entire image?
   rmsmode: either 'cov' (use the coverage map to calculate the RMS map) or 
            'unc' (use the uncertainty map from the MOPEX pipeline)
   """
   clean_pipeline(drz_img)
   print "Step 1: convert image units from MJy/sr to DN/sec..."   
   drz_new1 = os.path.split(drz_img)[-1][:-5] + '_MJy.fits'  # without converting to DN/sec
   iraf.imcopy(drz_img, drz_new1)

   if zeromask != None:
      # zero out the masked pixel before entering background subtraction stage
      iraf.imcalc("%s,%s" % (drz_new1, zeromask), "temp1.fits", 
                  "if im2 > 0 then 0. else im1")
      os.remove(drz_new1)
      os.system('mv temp1.fits %s' % drz_new1)

   # Run background-subtraction step
   print "Step 2: subtract background from the science mosaic..."
   print "See ch1_sub_bkgd.param for explanations of each parameter."
   # Use the default parameters here... can maybe fiddle with these numbers 
   # later?
   drz_root2 = os.getcwd() + '/' + os.path.split(drz_new1)[-1][:-5]
   drz_new3 = drz_root2 + '_bgcorr.fits'
   if constant_bkgd == True:
      print "Subtracting a constant background within the designated region..."
      if bgmask == None:
         print "Error: must supply a background mask if one wants to subtract a constant background..."
         sys.exit()
      else:
         subtract_constant_bkgd(drz_new1, bgmask, drz_new3, 
                                growsig=mask_growsig)
   else:
      bkgd_parfile = os.path.split(drz_new1)[-1][:-5] + '_bkgd.param'
      print "bkgd_parfile", bkgd_parfile
      f = open(os.getcwd() + '/' + bkgd_parfile, 'wb')
      #drz_root2 = drz_new1[:-5]
      
      f.write('IMAGE \t %s\n' % drz_new1)
      f.write('OUT_MASK \t %s\n' % (drz_root2 + '_bgmask.fits'))
      f.write('OUT_BKGD \t %s\n' % (drz_root2 + '_bkgd.fits'))
      f.write('OUT_BKGD_SUBTRACTED \t %s\n' % (drz_root2 + '_bgcorr.fits'))
      f.write('OUT_NEWCOLLAGE \t %s\n' % (drz_root2 + '_bkgdcoll.fits'))
      f.write('NNEAR \t %d\n' % nnear)
      f.write('SOURCE_THRESHOLD \t %f\n' % float(source_threshold))
      f.write('MASK_GROWSIG \t %f\n' % float(mask_growsig))
      f.write('DQEXT \t 0\n')
      f.write('CRUDESUB \t 1\n')
      if subreg == None:
         f.write('SUBREG \t %d,%d,%d,%d\n' % tuple(subreg_default[cluster]))
      elif subreg == "none":
         f.write('SUBREG \t none\n')
      else:
         f.write('SUBREG \t %d,%d,%d,%d' % tuple(subreg))
      f.write('\n')
      f.write('DILATE_FACTOR \t 2\n')
      f.write('DILATE_THRESH \t 300\n')
      f.write('DILATE_TOT_THRESH \t 500\n')
      f.write('DILATE_SATURATED \t 1\n')
      f.write('GAIN \t %f\n' % gain)
      f.write('OUT_LABEL \t %s\n' % (drz_root2 + '_bgmask_labels.fits'))
      f.write('\n')
      f.write('MAKE_RMS \t 0\n')
      f.write('OUT_RMS \t %s\n' % (drz_root2 + '_rms_estimated.fits'))
      f.write('RMS_SMOOTH \t 100\n')   
      f.write('\n')
      f.write('OUT_CRUDE \t %s\n' % (drz_root2 + '_simple_bgcorr.fits'))
      f.write('OUT_FILT  \t %s\n' % (drz_root2 + '_bkgd.filt.fits'))
      if bgmask != None:
         f.write('IN_MASK \t %s\n' % bgmask)
      f.write('APERTURE_WIDTH \t %d\n' % aperture_width)
      f.write('\n')
      f.close()
      ssb.run(bkgd_parfile)
      # Make an image of background pixels after subtraction
      #if os.path.exists(drz_root2+'_bgpix.fits'):
      #   os.remove(drz_root2+'_bgpix.fits')
      #iraf.imcalc('%s,%s' % (drz_root2+'_bgcorr.fits','ch1_pf0.01_cut_seg.fits'),
      #            drz_root2+'_bgpix.fits','if im2 > 0 then -99.0 else im1')
   
   if zeromask != None:
      # again, zero out the masked pixels in the background-subtracted image
      print "zero out the masked pixels in the background-subtracted image..."
      iraf.imcalc("%s,%s" % (drz_new3, zeromask), "temp2.fits", 
                  "if im2 > 0 then 0. else im1")
      os.remove(drz_new3)
      os.system('mv temp2.fits %s' % drz_new3)


   # print "Step 3: re-scale the uncertainty image"
   if rmsmode == 'cov':
      print "Step 3: make RMS map from coverage map, then re-scale the uncertainty image"
      cov_img = unc_img
      rms_new1 = drz_img[:-9] + '_rms.fits'
      if os.path.exists(rms_new1):
         os.remove(rms_new1)
      iraf.imcalc(cov_img, rms_new1, 
                  "if im1 == 0. then 1000. else 1./sqrt(im1*%f)" % exptimes[channel])
      # use coverage map (in units of seconds) as the weight map to calculate
      # the RMS map
      unc_new1 = rms_new1  # to comply with previous version
   else:
      print "Step 3: use the uncertainty map as the RMS map, and use FLUXCONV to convert to DN/sec "
      print "(the important point is to convert the RMS map to the same unit as the science map)"
      #unc_new1 = unc_img[:-5] + '_DNS.fits'
      unc_new1 = unc_img[:-5] + '_MJy.fits'
      #iraf.imcalc(unc_img, unc_new1, 'im1/%.4f' % (factor * fluxconv[channel]))
      #iraf.imcalc(unc_img, unc_new1, 'im1/%.4f' % (fluxconv[channel]))
      iraf.imcopy(unc_img, unc_new1)  # without converting to DN/sec
   try:
      inmask = pyfits.getdata(inmask_checkrms)
   except:
      inmask = None
   rmscheck.checkrms(pyfits.getdata(drz_new3), pyfits.getdata(unc_new1),
                     inmask=inmask, growsig=0.2)
   try:
      unc_scale = float(raw_input('Now enter the scaling factor for the uncertainty image: '))
   except:
      print 'Need to enter a number here.'
      sys.exit()
   unc_new2 = unc_new1[:-5] + '_scaled.fits'
   if os.path.exists(unc_new2):
      os.remove(unc_new2)
   print "Multiply the uncertainty image by a factor of %f" % unc_scale
   iraf.imcalc(unc_new1, unc_new2, 'im1*%f' % unc_scale)

   print "Step 4: make the source-weighted RMS map..."
   # Here both the science image and the (re-scaled) RMS map are in units of 
   # DN/sec
   #mathstr = 'sqrt(im1**2 + im2/%f)' % gain
   #mathstr = 'sqrt(im1**2 + im2)'  # a different weighting scheme
   #mathstr = 'sqrt((im1*%f)**2 + im2*%f) / %f' % (gain, gain, gain)   
   #mathstr = 'sqrt((im1*%f)**2 + im2) / %f' % (gain, gain)
   mathstr = 'im1 * 1.0'
   # convert everything into photon units
   unc_new4 = unc_new2[:-5] + '_src.fits'
   if os.path.exists(unc_new4):
      os.remove(unc_new4)
   iraf.imcalc('%s,%s' % (unc_new2,drz_new3), unc_new4, mathstr)
   #iraf.imreplace(unc_new4, 1000., lo=0., up=0.)
   iraf.imcalc('%s,%s' % (unc_new4,unc_new2), 'temp_unc.fits', 
              'if im1 <= 0. then im2 else im1')
   os.system('mv temp_unc.fits %s' % unc_new4)   
   # Extra steps


   print "All steps finished. A summary:"
   print "Background-subtracted science mosaic: %s" % drz_new3
   print "Source-weighted, re-scaled RMS map: %s" % unc_new4
   print "All images converted into DN/sec."
Beispiel #20
0
def combine(do_cti=False, doreduce=True, doshifts=True):

    if do_cti:
        os.system('stis_cti --crds_update')
    if doreduce:
        # Defringing didn't seem to converge because of the low S/N
        stistools.ocrreject.ocrreject('oc0102070_flc.fits','oc0102070_crc.fits')
        iraf.normspflat(inflat='oc0102070_crc.fits',outflat='oc0102070_nsp.fits', do_cal='no')

        iraf.imcalc(input='oc0102070_nsp.fits', output='temp_nsp.fits', equals='if(x .lt. 250) then 1 else im1')
        iraf.imcopy('temp_nsp.fits[1][1:250,*]', 'oc0102070_nsp.fits[1][1:250,*]')

        #iraf.defringe('oc0102060_flc.fits', 'oc0102070_nsp.fits', 'oc0102060_dfr.fits')
        #for each image
        for im in ['oc0102050_flc','oc0102060_flc']:
            outbase = 'blue'
            if im[:-4] == 'oc0102060':
                outbase = 'red'
            #reset the aperture table to the newer file (we maybe should check this)
            pyfits.setval(im +'.fits','APERTAB',value='oref$y2r1559to_apt.fits')
            pyfits.setval(im +'.fits', 'SPTRCTAB', value='oref$qa31608go_1dt.fits')

            # fixpix any negative values. In principle some of this noise
            # could be real, but I have found that is often not the case
            hdu = fits.open(im+ '.fits')
            mask1 = hdu[1].data < -20
            mask2 = hdu[4].data < -20
            hdu.close()
            fits.writeto(outbase+'mask1.fits', mask1.astype('i'), clobber=True)
            fits.writeto(outbase+'mask2.fits', mask2.astype('i'), clobber=True)

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im+'[1]', outbase+'mask1.fits')

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im+'[4]', outbase+'mask2.fits')

            # Subtract off the median value
            hdu = fits.open(im+ '.fits', mode='update')
            hdu[1].data -= np.median(hdu[1].data)
            hdu[4].data -= np.median(hdu[4].data)

            readnoise1 = 1.4826 * np.median(np.abs(hdu[1].data))
            readnoise2 = 1.4826 * np.median(np.abs(hdu[4].data))

            # Cosmic ray reject both images using scrappy
            # Make sure to treat the noise in a sensible way
            crmask1, clean1 = detect_cosmics(hdu[1].data, readnoise=readnoise1,
                                             sigclip=5, objlim=5, sigfrac=0.8,
                                             fsmode='median', psfmodel='gaussy',
                                             psffwhm=2., cleantype='idw')

            crmask2, clean2 = detect_cosmics(hdu[4].data, readnoise=readnoise2,
                                             sigclip=5, objlim=5, sigfrac=0.8,
                                             fsmode='median', psfmodel='gaussy',
                                             psffwhm=2., cleantype='idw')

            hdu.flush()
            hdu.close()

            fits.writeto(outbase + '_crmask1.fits', crmask1.astype('i'), clobber=True)
            fits.writeto(outbase + '_crmask2.fits', crmask2.astype('i'), clobber=True)
            # Run fixpix on the frames
            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im+'[1]', outbase+'_crmask1.fits')

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(im+'[4]', outbase+'_crmask2.fits')

            if outbase=='red':
                iraf.mkfringeflat('oc0102060_flc.fits', 'oc0102070_nsp.fits', 'oc0102070_frr.fits',
                                  beg_scale=0.6, end_scale=1.5, scale_step=0.01,
                                  beg_shift=-3.0, end_shift=3.0,shift_step=0.05)
                iraf.defringe('oc0102060_flc.fits', 'oc0102070_frr.fits', 'oc0102060_dfr.fits')
                #Run x2d on the flt frame
                stistools.x2d.x2d(input='oc0102060_dfr.fits',output=im[:-4]+'x2d.fits')
            else:
                stistools.x2d.x2d(input='oc0102050_flc.fits',output=im[:-4]+'x2d.fits')

            h = pyfits.open(im[:-4]+'x2d.fits', mode='update')
            #Replace all of the bad pixels in the image by -666 based on the DQ array
            #save them to a new file
            #Throw away bad reference file pixels and saturated pixels. None of the other error codes 
            #were in the first file so I haven't included them here, but we might want to
            d = h[3].data
            badpix = logical_and(bitwise_and(d,256) == 256,bitwise_and(d,512) == 512)
            h[1].data[badpix] = -10000
            d = h[6].data
            badpix = logical_and(bitwise_and(d,256) == 256,bitwise_and(d,512) == 512)
            h[4].data[badpix] = -10000
            h.flush()

            # Trim the images
            for i in range(1,7):
                h[i].data = h[i].data[100:-100, 100:-100]
                h[i].header['CRPIX1'] -= 100
                h[i].header['CRPIX2'] -= 100

            h.flush()
            h.close()

            # Combine the images
            iraf.unlearn(iraf.imcombine)
            iraf.imcombine(input=im[:-4]+'x2d[1],'+im[:-4]+'x2d[4]', output=outbase+'_com.fits',
                            reject='crreject')

            hdu = pyfits.open(outbase +'_com.fits')
            mask = hdu[0].data == 0.0
            hdu.close()
            fits.writeto(outbase+'_mask.fits', mask.astype('i'), clobber=True)

            iraf.unlearn(iraf.fixpix)
            iraf.fixpix(outbase+'_com.fits', outbase+'_mask.fits')

            iraf.unlearn(iraf.apall)
            iraf.apall(input=outbase+'_com',output='13dh_'+outbase, review='no',
                       nsum = -500, b_order = 1,
                       b_function='legendre',b_niterate=30, b_naverage = -21,
                       nfind=1,t_order=3,background='median',weights='variance',
                       skybox=100 )
            iraf.splot(outbase+'[SCI]')