Beispiel #1
0
    def _fixIrafHeader(self, fitsfile=None):
        """fix up the header of the detection Image."""

        # This is the list of keywords that will be copied out of what
        # the iraf drizzle task produced. Ascards for these keywords
        # will be laid down in this order.
        # New keywords added for astrometrically corrected CRVALs.
        # See Bugzilla bug #
        if fitsfile == None:
            fitsfile = self.detImName

        keylist = [
            'SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'TELESCOP',
            'INSTRUME', 'DETECTOR', 'EQUINOX', 'CRPIX1', 'CRPIX2', 'CRVAL1',
            'CRVAL2', 'CTYPE1', 'CTYPE2', 'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2',
            'LTV1', 'LTV2', 'LTM1_1', 'LTM2_2', 'PA_V3', 'PA_FINAL'
            'AMDRA'
            'AMDDEC'
            'AMNTCH'
            'AMSGRA'
            'AMSGDEC'
        ]

        oldfits = pyfits.open(fitsfile, "update")
        oldfits_headerKeys = oldfits[0].header.ascard.keys()

        for key in oldfits_headerKeys:
            if key not in keylist:
                del oldfits[0].header.ascard[key]

        oldfits[0].header.update('FILENAME', fitsfile)
        oldfits[0].header.update('DATASET', self.obsName)
        ostring = self.modName + ' Ver. ' + __version__
        oldfits[0].header.update(
            'ORIGIN', 'Properietary data by CLASH pipeline:' + ostring)  #WZ
        #oldfits[0].header.update('ORIGIN','ACS Science Data Pipeline:'+ostring)
        oldfits[0].header.update('DATE', pUtil.ptime())
        try:
            oldfits[0].header.update('OBJECT', oldfits[0].header['OBJECT'])
        except:
            pass

        # Now figure out the total exposure time (sum) of the images in the detection image.

        exptime = 0
        for im in self.sciImageList:
            fo = pyfits.open(im)
            exptime += fo[0].header['EXPTIME']
            fo.close()
            del fo
        oldfits[0].header.update('EXPTIME', exptime)
        oldfits[0].header.update('TEXPTIME', exptime)
        oldfits.close()
        del oldfits
        #   os.remove(self.detImName)
        #   os.rename('temp.fits',self.detImName)
        return
Beispiel #2
0
def xmlStartCat(outfile, name, imgfile=None):
    """Initialise the xml header of a catalog file. imgfile is a passed fits file.
    """
    xmlcatalog = open(outfile, 'w+')
    xmlcatalog.write(
        "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n")
    xmlcatalog.write('<!DOCTYPE catalog SYSTEM ' +\
            '\n\t"http://acs.pha.jhu.edu/science/pipeline/DTD/Catalog.dtd">\n')
    if imgfile == None:
        xmlcatalog.write("<catalog type=\"xml/text\" dataset=\"" + name +
                         "\" date=\"" + ptime() + "\">\n")
    else:
        img = os.path.basename(imgfile)
        tel = fUtil.getTel(imgfile)
        inst = fUtil.getInstr(imgfile)
        det = fUtil.getDetector(imgfile)
        filter1 = fUtil.getFilter1(imgfile)
        filter2 = fUtil.getFilter2(imgfile)
        acs = filters.ACSFilterInfo()
        if filter1 and filter2:
            if filter1 not in acs.clear and filter2 not in acs.clear:
                xmlcatalog.write("<catalog type=\"xml/text\" imgsrc=\"" +img+ \
                         "\" dataset=\""+name+"\" telescope=\"" +tel+  \
                         "\" instrument=\""+inst+"\" detector=\"" +det+\
                         "\" filtername=\""+filter1+"/"+filter2+"\" date=\""+ptime()+"\">\n")
            elif filter1 in acs.clear:
                xmlcatalog.write("<catalog type=\"xml/text\" imgsrc=\"" +img+ \
                         "\" dataset=\""+name+"\" telescope=\"" +tel+  \
                         "\" instrument=\""+inst+"\" detector=\"" +det+\
                         "\" filtername=\""+filter2+"\" date=\""+ptime()+"\">\n")
            elif filter2 in acs.clear:
                xmlcatalog.write("<catalog type=\"xml/text\" imgsrc=\"" +img+ \
                         "\" dataset=\""+name+"\" telescope=\"" +tel+  \
                         "\" instrument=\""+inst+"\" detector=\"" +det+\
                         "\" filtername=\""+filter1+"\" date=\""+ptime()+"\">\n")
        else:
            print "No filters found in image:", img, "for markup."
            xmlcatalog.write("<catalog type=\"xml/text\" dataset=\""+name+"\" imgsrc=\"" \
                     +img+ "\" date=\""+ptime()+"\">\n")
    xmlcatalog.close()
    return
 def _writeHeader(self,cat,header):
     """Write the header to the color catalog file.
     """
     self.logfile.write('Writing multicolor catalog header...')
     hd = open(cat,'w')
     hd.write('## ' + pUtil.ptime() + '\n')
     hd.write('## ' + self.modName + ' Catalog file for Observation: ' + self.obsName + '\n')
     hd.write('## (This file was generated automatically by the ACS Pipeline.  Do not edit.)\n##\n')
     for pair in header:
         name,col = pair
         hd.write('# '+ str(col) + '\t' + name + '\n')
     hd.close()
     return
Beispiel #4
0
 def _writeHeader(self,cat,header):
     """Write the header to the color catalog file.
     """
     self.logfile.write('Writing multicolor catalog header...')
     hd = open(cat,'w')
     hd.write('## ' + pUtil.ptime() + '\n')
     hd.write('## ' + self.modName + ' Catalog file for Observation: ' + self.obsName + '\n')
     hd.write('## This proprietary file was by the CLASH Pipeline.\n##\n')
     for pair in header:
         name,col = pair
         hd.write('# '+ str(col) + '\t' + name + '\n')
     hd.close()
     return
    def __init__(self,obs): #WZ Nov 2011
        
        self.outputdir = obs.prodir
        self.catdir = obs.catdir
        self.sciImageList = []
        self.rmsImageList = []
        self.whtImageList = []
        self.filterList = []
        date = string.split(ptime(),'T')[0]
        date = date.replace('-','')
        stamp= date[2:]
        pscale = numpy.rint(obs.ref.pscale * 1000)/1000.
        ps = string.split(str(pscale),'.')[1]
        os.chdir(obs.fitsdir)
        i=0
        for im in obs.sciImageList:
            substr=string.split(im,'_')
            j=len(substr)
            target=substr[j-3]
            filter=substr[j-2]
            # inpfits = pyfits.open(im)
            #detector = inpfits[0].header.get('DETECTOR')
            detector = drutil.getPrimaryKeyword(im+'[0]','DETECTOR') 
            if (detector == 'WFC'):
                f = target+'_acs_'+filter+'_sci_'+ps+'mas_'+stamp+'.fits'
            elif (detector == 'IR'):
                f = target+'_ir_'+filter+'_sci_'+ps+'mas_'+stamp+'.fits'
            elif (detector == 'UVIS'):
                f = target+'_uvis_'+filter+'_sci_'+ps+'mas_'+stamp+'.fits'
            else:
                obs.logfile.write("Unlisted detector name: "+detector)
            outfile = os.path.join(self.outputdir,f)
            shutil.copy(im,outfile)

            im=obs.weightImageList[i]
            whtfile=string.replace(outfile,'_sci_','_wht_')
            shutil.copy(im,whtfile)

            im=obs.rmsImageList[i]
            rmsfile=string.replace(outfile,'_sci_','_rms_')
            shutil.copy(im,rmsfile)

            self.sciImageList.append(outfile)
            self.whtImageList.append(whtfile)
            self.rmsImageList.append(rmsfile)
            self.filterList.append(filter)
            i = i + 1

        for band in ['opt','nir']:
        # for band in ['nir']:
            im='detection_'+band+'.fits'
            detfile = os.path.join(self.outputdir,target+'_detection_'+band+'.fits')
            shutil.copy(im,detfile)
        obs.logfile.write("Image copying done. Start gzipping")
        os.chdir(obs.prodir)
        cmd = 'gzip *fits'
        sproc  = popen2.Popen3(cmd,1)
        output = sproc.fromchild.readlines()
        errs   = sproc.childerr.readlines()

        for band in ['opt','nir']:                                       
            dname='detection_'+band+'.cat'
            shutil.copy(os.path.join(self.catdir, dname),target+'_'+dname)
            mname='multicolor_'+band+'.cat'
            shutil.copy(os.path.join(self.catdir, mname),target+'_'+mname)
            nname='multicolor_'+band+'.columns'
            shutil.copy(os.path.join(self.catdir, nname),target+'_'+nname)
            bname='bpz_'+band+'.cat'
            shutil.copy(os.path.join(self.catdir, bname),target+'_'+bname)
            for filt in self.filterList:
                fname=target+'_'+filt+'_'+band+'.cat'
                shutil.copy(os.path.join(self.catdir, fname),fname)
        obs.logfile.write("Catalog copying done")

        os.chdir(obs.fitsdir)
        im='detection_red.fits'
        detfile = os.path.join(obs.reddir,target+'_detection_red.fits')
        shutil.copy(im,detfile)

        os.chdir(obs.reddir)
        dname='detection_red.cat'
        shutil.copy(os.path.join(self.catdir, dname),target+'_'+dname)
        mname='multicolor_red.cat'
        shutil.copy(os.path.join(self.catdir, mname),target+'_'+mname)
        nname='multicolor_red.columns'
        shutil.copy(os.path.join(self.catdir, nname),target+'_'+nname)
        bname='bpz_red.cat'
        shutil.copy(os.path.join(self.catdir, bname),target+'_'+bname)
        for filt in self.filterList:
            fname=target+'_'+filt+'_red.cat'
            shutil.copy(os.path.join(self.catdir, fname),fname)
        obs.logfile.write("Catalog copying done")

        cmd = 'gzip *fits'
        sproc  = popen2.Popen3(cmd,1)
        output = sproc.fromchild.readlines()
        errs   = sproc.childerr.readlines()
        cmd = 'chmod 755 *'
        sproc  = popen2.Popen3(cmd,1)
        output = sproc.fromchild.readlines()
        errs   = sproc.childerr.readlines()
        obs.logfile.write("Fits files compressed. Open permission set")
        del bname,dname,fname,mname,nname

        # pdb.set_trace()
        os.chdir(obs.prodir)
        os.chdir(obs.fitsdir)
        return
Beispiel #6
0
    def _writeColumns(self, header):
        """Write the .columns file for BPZ. will match the data in the header of the
        multicolor catalog.
        """
        xFilts = []
        badFilts = []
        ignoreFilters = [
            'FR388N', 'FR423N', 'FR462N', 'FR505N', 'FR551N', 'FR601N',
            'FR656N', 'FR716N', 'FR782N', 'FR853N', 'FR931N', 'FR1016N',
            'FR459M', 'FR647', 'FR914M'
        ]

        # columnsFile = os.path.join(self.obsCats,'multicolor.columns')
        columnsFile = string.replace(self.colorcat, '.cat', '.columns')  #WZ
        cfile = open(columnsFile, 'w')
        cfile.write('## ' + pUtil.ptime() + '\n')
        cfile.write('## ' + self.modName + ' .columns file for Observation: ' +
                    self.obsName + '\n')
        cfile.write(
            '## (This file was generated automatically by the APLUS Pipeline.  Do not edit.)\n##\n'
        )
        cfile.write(
            '## N.B. The columns listed are the extinction corrected/aperture corrected\n'
        )
        cfile.write(
            '## magnitude columns which appear as MAG_BPZ and MAGERR_BPZ of the \n'
        )
        cfile.write(
            '## multicolor catalog file. These columns are now sent to BPZ.\n')
        cfile.write('## (See Bugzilla bug #2708.)\n##\n')
        i = 0
        for i in range(len(header)):
            name, col = header[i]
            line = name.split('_')
            if len(line) < 3:
                continue
            elif line[-1] == "CORR":
                continue
            elif len(line) > 6:
                if (line[3] + "_" + line[4]) not in xFilts:
                    xFilts.append(line[3] + "_" + line[4])
                continue
            elif line[3] in ignoreFilters:
                if line[3] not in badFilts:
                    badFilts.append(line[3])
                continue
            elif line[-2] != "MAG":
                continue
            elif line[-1] != "BPZ":
                continue
            else:
                # colname=line[3]+'_'+line[2] # WZ  Only runs if F814W or F850Lp images are present
                colname = line[0] + '_' + line[1] + '_' + line[2] + '_' + line[
                    3]
                if colname == 'HST_ACS_WFC_F814W' or colname == 'HST_ACS_WFC_F850LP':
                    n_mo = str(col)

                cfile.write(colname + '\t' + str(col) + ',' + str(col + 1) +
                            '\tAB\t%.2f\t0.0\n' % zp_error)

        for filt in xFilts:
            self.logfile.write("Excluding complex filter, " + filt +
                               " from bpz processing")
            self.errorList.append(
                (self.modName,
                 "Complex Filter " + filt + " excluded from bpz processing"))
        for badfilt in badFilts:
            self.logfile.write(
                "Excluding catalog from bpz interface for filter, " + badfilt)
            self.errorList.append(
                (self.modName,
                 "Filter " + badfilt + " excluded from bpz processing"))

        ## From Benitez email, Fri, 24 May 2002 19:04:55
        ## Hack to add the F814W column
        try:
            cfile.write('M_0\t%s\nID\t1\n' % n_mo)  #WZ
        except:
            pass
        cfile.close()
        self.outputList[os.path.basename(columnsFile)] = [
            os.path.basename(self.colorcat)
        ]
        return
Beispiel #7
0
    def writeMsg(self, outfile):
        """ Make and write the pipeline message from the passed information.
        """

        # call for an xmlMessage object to build the message and then write
        # the message

        self.xmlMsg = xmlMessage()
        self.xmlMsg.docTypeTag("Module")
        self.xmlMsg.startTag('modulemessage', date=ptime())
        endTagList = []
        endTagList.append('modulemessage')
        for key in self.keys:

            # Don't write an end tag for the module level tags
            # this needs to wrap the whole message
            while len(endTagList) > 2:
                self.xmlMsg.endTag(endTagList.pop())
            tagList = self.metaInfo[key]
            configTag = None
            for tag in tagList:
                name = tag[0]
                if name == self.metaElements[0]:
                    configTag = 1
                if name == self.metaElements[1] and configTag:
                    self.xmlMsg.endTag(endTagList.pop())
                    configTag = None
                val = ''
                attr = ''
                kdic = {
                }  # this will be a dictionary of tag attributes for use in apply()

                # A problem with this method of writing the xml just based solely on tag structure is that
                # one cannot determine that some element is not inclusive of another.  This, of course, is
                # what a DTD does for you.  So, in this case, an explicit element <errs> is specified because
                # there is no a priori way to determine that it is not an element of the <depend> element.

                if endTagList:
                    if name == endTagList[-1]:
                        self.xmlMsg.endTag(endTagList.pop())

                for item in tag[1:]:
                    if type(item) == StringType:
                        if " " in item:  # if there are spaces in item, it MUST be a value string.
                            val = item
                            continue
                        elif "=" in item:
                            part1, part2 = string.split(
                                item, "=")  # attributes in the tuple
                            attr += part1 + '=' + part2 + ' '
                        else:
                            val = item
                    else:
                        val = str(item)
                # OK, now we have a tag name (name), maybe a value (val) and maybe some attributes (attr)
                # pass these to the xmlMessage object.

                if attr:
                    pairs = string.split(attr)
                    for i in range(len(pairs)):
                        atnam, atval = string.split(pairs[i], "=")
                        kdic[atnam] = atval
                    if val:
                        apply(self.xmlMsg.startTag, (name, val), kdic)
                    else:
                        apply(self.xmlMsg.startTag, (name, ), kdic)
                        endTagList.append(name)
                else:
                    if val:
                        self.xmlMsg.startTag(name, val)
                    else:
                        self.xmlMsg.startTag(name)
                        endTagList.append(name)

            if key != 'module':
                self.xmlMsg.endTag(endTagList.pop())
        while endTagList:
            self.xmlMsg.endTag(endTagList.pop())

        self.xmlMsg.write(outfile)
        return
Beispiel #8
0
def runMessage(switches, errorlist=None, *pObjs):
    """make a run-level message for a run of the pipeline.  The caller
    (pipeline) sends this function a couple lists (command line switches, errorList)
    and a bunch of pipeline module objects which have everything needed to build the 
    run message.
    """
    # this figures out what the dataset name is for the dataset attribute
    # on the pipelinemessage tag
    #pdb.set_trace()
    if pObjs:
        for obj in pObjs:
            root = obj.root
            logfile = os.path.basename(obj.logfile.logFileName)
            if obj.modName == "ingest" or 1:  # by xingxing
                obsname = obj.newobs
                break
            else:
                obsname = obj.obsName
                break
    else:
        obsname = "N/A"
    runMsg = xmlMessage()
    runMsg.docTypeTag("Runmessage")
    runMsg.startTag("pipelinemessage",
                    version=apsis_release,
                    date=ptime(),
                    dataset=obsname)
    runMsg.startTag("meta")
    runMsg.startTag("user", os.environ['USER'])
    runMsg.startTag("host", os.uname()[1])
    runMsg.startTag("description", "Basic Imaging")
    if switches:
        for sw in switches:
            runMsg.startTag("switch", sw)
    runMsg.startTag("root", root)
    runMsg.startTag("modulelist")
    for ob in pObjs:
        runMsg.startTag("module",
                        ob.modName,
                        message=os.path.join(os.path.basename(ob.messagedir),
                                             ob.modName + "_module.xml"))
    runMsg.endTag("modulelist")
    runMsg.endTag("meta")
    runMsg.startTag("input")
    runMsg.startTag("dataset", name=obsname)
    for f in obj.fitslist:
        if string.find(f, "_asn") != -1:
            runMsg.startTag("file", "Images/" + f, type="image/x-fits")
        else:
            runMsg.startTag("file", "Images/" + f, type="image/x-fits")
    runMsg.endTag("dataset")
    runMsg.endTag("input")
    runMsg.startTag("output")
    #
    # OK, now we search through the outputList first picking out the stuff
    # we don't want in the archive and signal that.  Need a better way to do
    # this to allow for a selection.
    # RMS and detectionWeight images now appear commented.  This will cause
    # these images to be marked archive="yes" in the runMessage. See Bugzilla
    # bug #1245. The FLAG images are now being marked as archive="no" . See
    # same bug.
    # K Anderson 10-may-2002

    for obj in pObjs:
        for f in obj.outputList.keys():
            if string.find(f, "_shifts_asn_fits.xml") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="text/xml",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_shifts_asn.fits") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_cr.fits") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_SCI_") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_ERR_") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_DQ_") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_FLAG.fits") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_FLAG_fits.xml") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="text/xml",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "medriz_") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "_drz_sci_context.fits.CON") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="text/xml",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "Edgemask.fits") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "detectionImage_APER.fits") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "detectionImage_APER_fits.xml") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="text/xml",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "fits.xml") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="text/xml",
                                archive="yes",
                                module=obj.modName)
                continue
            elif string.find(f, "fits.hdr") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="text/xml",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, ".fits") != -1:
                runMsg.startTag("file",
                                "Images/" + f,
                                type="image/x-fits",
                                archive="yes",
                                module=obj.modName)
                continue
            elif string.find(f, ".xml") != -1:
                runMsg.startTag("file",
                                "Catalogs/" + f,
                                type="text/xml",
                                archive="yes",
                                module=obj.modName)
                continue
            elif string.find(f, ".cat.old") != -1:
                runMsg.startTag("file",
                                "Catalogs/" + f,
                                type="text/ascii",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, ".cat") != -1:
                runMsg.startTag("file",
                                "Catalogs/" + f,
                                type="text/ascii",
                                archive="yes",
                                module=obj.modName)
                continue
            elif string.find(f, ".matchin") != -1:
                runMsg.startTag("file",
                                "align/" + f,
                                type="text/ascii",
                                archive="no",
                                module=obj.modName)
                continue
            elif string.find(f, "Match") != -1:
                runMsg.startTag("file",
                                "align/" + f,
                                type="text/ascii",
                                archive="no",
                                module=obj.modName)
                continue
            else:
                runMsg.startTag("file",
                                f,
                                type="text/ascii",
                                archive="no",
                                module=obj.modName)

    runMsg.startTag("file", logfile, type="text/ascii", archive="yes")
    runMsg.endTag("output")
    if errorlist:
        runMsg.startTag("errorlist")
        for i in errorlist:
            runMsg.startTag("erroritem", i)
        runMsg.endTag("errorlist")
    runMsg.endTag("pipelinemessage")
    msgfile = os.path.join(root, obsname + '_runMessage.xml')
    runMsg.write(msgfile)
    return msgfile
Beispiel #9
0
 TimeDetectionImage = time.time()  # XX
 print "==================================================================="
 print "Now making detection image in ", band, " band"
 print "==================================================================="
 try:
     detImage = combFilter.detectionImage(obs,
                                          Band=band,
                                          excludeFilters=excludefilt,
                                          noContext=noContext)
     obdict[detImage] = "detection_" + band
 except combFilter.detectionImageError, err:
     print err.value
     obs.logfile.write(
         "detectionImage constructor threw a detectionImageError exception")
     obs.logfile.write("APLUS, v" + drex_release + " exiting at  " +
                       ptime() + "\n")
     errorList.append(err.value)
     errorList.append(warntxt)
     msg.runMessage(cl_switches, errorList, obs, alImage, drzImage)
     sys.exit(
         "Exit spawned by detectionImageError Exception. \nNo non-grism images in dataset."
     )
 except Exception, err:
     warntxt = "Error encountered making detectionImage object..." + band
     errorList.append(warntxt)
     errorList.append(str(err))
     print warntxt
     if debug:
         show_tb()
     msg.runMessage(cl_switches, errorList, obs, alImage, drzImage)
     jar(obdict, obs.root)
Beispiel #10
0
    def _fixHeader(self, fitsfile=None):
        """  Fix up the header of the detection Image (default).
            optional parameter 'fitsfile' can be used to specify some other
            image (notably, the detectionWeight image) for header fix.
            """
        # the list of keywords that will be copied out of what the drizzle
        # task produced.  ASCards for these keywords copied in this order.
        # NOTE: I'm throwing EXPTIME and TEXPTIME into this lot (jpb, 4/Oct/O1).
        if fitsfile == None:
            fitsfile = self.detImName

        keylist = [ 'SIMPLE', 
                'BITPIX', 
                'NAXIS' , 
                'NAXIS1',  
                'NAXIS2', 
                'TELESCOP', 
                'INSTRUME', 
                'DETECTOR',
                'EXPTIME',
                'TEXPTIME',
                'EQUINOX', 
                'CRPIX1', 
                'CRPIX2', 
                'CRVAL1', 
                'CRVAL2', 
                'CTYPE1', 
                'CTYPE2', 
                'CD1_1', 
                'CD1_2', 
                'CD2_1', 
                'CD2_2', 
                'LTV1', 
                'LTV2', 
                'LTM1_1', 
                'LTM2_2',
                #'PA_V3',
                #'PA_FINAL',
                'AMDRA',
                'AMDDEC',
                'AMNTCH',
                'AMSGRA',
                'AMSGDEC'
                ]

        oldfits = pyfits.open(fitsfile,"update")
        oldfits_headerKeys = oldfits[0].header.ascard.keys()

        for key in oldfits_headerKeys:
                if key not in keylist:
                    del oldfits[0].header.ascard[key]

        oldfits[0].header.update('FILENAME',fitsfile)
        oldfits[0].header.update('DATASET',self.obsName)
        ostring = self.modName + ' Ver. '+__version__
        oldfits[0].header.update('ORIGIN','Properietary data by CLASH pipeline:'+ostring) #WZ
        oldfits[0].header.update('DATE',pUtil.ptime())
        try:
            oldfits[0].header.update('OBJECT',oldfits[0].header['OBJECT'])
        except:
            pass
        oldfits.close()
        del oldfits
        return
Beispiel #11
0
    def splice(self):
        """Method splices the photo-z catalog (.bpz) file and the multicolor.cat
	   file to produce a final photometric redshift catalog.  Raises an exception
	   if one or both files cannot be found.  All these files will be in dir defined 
	   by self.obsCats.
        """

        self.bpzCat = os.path.join(self.obsCats, 'bpz.cat')
        if not os.path.exists(self.colorCat):
            raise IOError, "Multicolor catalog file not found."
        elif not os.path.exists(self.bpzCat):
            raise IOError, "BPZ catalog file not found."

        # Use the fillHeader function to get a list of header lines from each catalog.

        bpzHeaders = fillHeader(self.bpzCat)
        colorHeaders = fillHeader(self.colorCat)
        allH = bpzHeaders + colorHeaders

        # delete the extra element containing the 'NUMBER' column.

        for i in range(len(allH)):
            col, name = allH[i]
            if name == 'NUMBER':
                del allH[i]
                break

        # Renumber the columns via a counter

        for i in range(len(allH)):
            col, name = allH[i]
            allH[i] = (i + 1, name)

        # open the new catalog file and write these headers

        newCat = open(os.path.join(self.obsCats, 'final_photometry.cat'), 'w')
        newCat.write('## Photometry Catalog for Observation: ' + self.obsName +
                     '\n')
        newCat.write('## Generated by the ACS Pipeline, ' + ptime() + '\n')
        newCat.write('##\n')

        f1 = open(self.bpzCat)
        while 1:
            line = f1.readline()
            fields = string.split(line)
            if fields[0] == '##':
                newCat.write(line)
            else:
                break

        f1.close()
        del f1

        for col, name in allH:
            newCat.write('# ' + str(col) + '\t' + name + '\n')

        # slurp up the data from each catalog.

        cat1 = open(self.bpzCat).readlines()
        cat2 = open(self.colorCat).readlines()

        # grab just the data lines
        cat1Data = []
        cat2Data = []

        for line in cat1:
            if '#' in line:
                pass
            else:
                cat1Data.append(string.rstrip(line))

        # Delete the extra field 'NUMBER' from colorCat data as was done (above) for the header.

        for line in cat2:
            if '#' in line:
                pass
            else:
                fields = string.split(string.rstrip(line))
                del fields[0]
                newline = string.joinfields(fields)
                cat2Data.append(newline)

        # Write the concatenated line to the new catalog

        if len(cat1Data) != len(cat2Data):
            raise IndexError, ("Catalog length mismatch.")

        for i in range(len(cat1Data)):
            newCat.write(cat1Data[i] + '  ' + cat2Data[i] + '\n')
        newCat.close()
        return
Beispiel #12
0
    def _magFix(self, catalogFile):
        """This private method receives a path to a catalog file and sifts through the
        MAGERR field looking for values > 10.  It sets the corresponding MAG field = -99 and
        sets that object's MAGERR field to 0.0.  catalogFile is a path not a file object."""

        # fillHeader will return a list of tuples where which looks like
        #
        # [(1, 'NUMBER'),
        # (2, 'X_IMAGE'),
        # (3, 'Y_IMAGE'),
        # ...
        # (12, 'MAG_ISOCOR'),
        # (13, 'MAGERR_ISOCOR'),
        # (14, 'FLUX_APER', 1)
        # (15, 'FLUX_APER', 2),
        # (16, 'FLUX_APER', 3),
        # ...
        # ]
        #
        # The tuples are either of length 2 or 3.  If len is 3, the 3rd item of the
        # tuple is the nth occurance of that column identifier.  This occurs on those
        # columns of MAGs and MAGERRs for a series of increasingly larger apertures.

        # newFieldList will be a list of Numeric arrays containing the columns of the catalogs.
        # This list will contain fields which have not been altered, i.e. all fields other than
        # MAG_* and MAGERR_*, and the new MAG and MAGERR fields which have been corrected.
        # Once the list is complete, it is tuple-ized and send to the tableio pu_data function.

        newFieldList = []
        newMagsList = []
        newMagErrsList = []
        newMagHeaders = []
        newMagErrHeaders = []
        newHeaders = []
        magCols = []
        magErrCols = []
        selectSet = fillHeader(catalogFile)

        print "Searching catalog for required columns, MAG, MAGERR"
        for i in range(len(selectSet)):
            if len(selectSet[i]) == 2:
                column, name = selectSet[i]
                paramNames = name.split("_")
                if "MAG" in paramNames:
                    magCols.append((column, name))
                elif "MAGERR" in paramNames:
                    magErrCols.append((column, name))
                else:
                    oldField = tableio.get_data(catalogFile, (column - 1))
                    newFieldList.append(oldField)
                    newHeaders.append(name)
                    continue
            else:
                column, name, id = selectSet[i]
                paramNames = name.split("_")
                if "MAG" in paramNames:
                    magCols.append((column, name, id))
                elif "MAGERR" in paramNames:
                    magErrCols.append((column, name, id))
                else:
                    oldField = tableio.get_data(catalogFile, (column - 1))
                    newFieldList.append(oldField)
                    newHeaders.append(name)
                    continue

        # We now have
        #  catalog field  --> list
        # --------------------------------
        #        MAG_*    --> magCols
        #     MAGERR_*    --> magErrCols
        #
        # The algorithm will be to step through the magErrCols columns, extracting those fields
        # via get_data and getting Numeric arrays.  The matching mag columns are slurped as well.
        # We search the magErrCols arrays looking for >= 10 values and then marking the those mags
        # as -99.0 and the matching magerrs as 0.0
        # See Bugzilla bug #2700

        for item in magErrCols:
            magErrAperId = None
            # item may be of len 2 or 3
            if len(item) == 2:
                magErrColId, magErrColName = item
            else:
                magErrColId, magErrColName, magErrAperId = item

            magErrKind = magErrColName.split("_")[1]  # ISO, ISOCORR, etc.

            print "\n\nMAG type:", magErrKind
            if magErrAperId: print magErrColName, "Aper id is", magErrAperId
            print "Getting\t", magErrColName, "\tfield", magErrColId

            # MAGERR array:
            magErrs = tableio.get_data(catalogFile, magErrColId - 1)

            matchingMagColName = None
            matchingMagColId = None

            #----------------------- Search for matching MAG_* field -----------------------#

            for magitems in magCols:

                # We know that the magErrColName is MAGERR and if magErrNameId is true then
                # the tuple is of len 3, i.e. a MAGERR_APER field.  We look for the matching
                # MAG_APER field id, 1, 2, 3... etc.

                if len(magitems) == 3:
                    magColId, magColName, magAperId = magitems
                    if magColName == "MAG_" + magErrKind:
                        matchingMagColName = magColName
                        #print "Found matching field type:",magColName,"in field",magColId
                        if magAperId == magErrAperId:
                            print "Found matching aperture id."
                            print "MAG_APER id: ", magAperId, "MAGERR_APER id: ", magErrAperId
                            matchingMagColId = magColId
                            matchingMags = tableio.get_data(
                                catalogFile, magColId - 1)
                            break
                    else:
                        continue
                else:
                    magColId, magColName = magitems
                    if magColName == "MAG_" + magErrKind:
                        print "Found matching field type:", magColName, "in field", magColId
                        matchingMagColName = magColName
                        matchingMagColId = magColId
                        matchingMags = tableio.get_data(
                            catalogFile, magColId - 1)
                        break
                    else:
                        continue

            #--------------------------------------------------------------------------------#

            print " MAG err field:", magErrColName, magErrColId
            print "     Mag field:", matchingMagColName, matchingMagColId

            # Now the grunt work on the arrays,
            # magErrs, matchingMags
            #
            # update: flagging all MAGs as -99 when the corresponding MAGERR > 10
            # introduced a bug which unintentionally reset the magnitudes
            # SExtractor had flagged with a MAG = 99.0 and a MAGERR = 99.0
            # This now checks for a MAGERR of 99 and does not reset the MAG value
            # if MAGERR = 99.0 but does for all other MAGERRS > 10.0

            badMagErrs1 = Numeric.where(magErrs >= 10, 1, 0)
            badMagErrs2 = Numeric.where(magErrs != 99.0, 1, 0)
            badMagErrs = badMagErrs1 * badMagErrs2
            del badMagErrs1, badMagErrs2
            newMags = Numeric.where(badMagErrs, -99.0, matchingMags)
            newMagErrs = Numeric.where(badMagErrs, 0.0, magErrs)

            newMagsList.append(newMags)
            newMagHeaders.append(matchingMagColName)
            newMagErrsList.append(newMagErrs)
            newMagErrHeaders.append(magErrColName)

        # concatenate the lists.  This is done to preserve the MAG_APER and MAGERR_APER
        # grouping of the original SExtractor catalog.

        newFieldList = newFieldList + newMagsList
        newFieldList = newFieldList + newMagErrsList
        newHeaders = newHeaders + newMagHeaders
        newHeaders = newHeaders + newMagErrHeaders

        newVariables = tuple(newFieldList)

        # rename the old catalog file as catalogFile.old
        os.rename(catalogFile, catalogFile + ".old")
        self.outputList[os.path.basename(catalogFile) +
                        ".old"] = [os.path.basename(catalogFile)]
        fob = open(catalogFile, 'w')
        fob.write("## " + ptime() + "\n")
        fob.write("## " + self.modName +
                  " catalog regenerated by _magFix method.\n")
        fob.write(
            '## (This file was generated automatically by the ACS Pipeline.)\n##\n'
        )
        fob.write(
            "## This catalog has been photometrically corrected to remove\n")
        fob.write("## 'bad' magnitude values.\n")
        fob.write("##\n")
        for i in range(len(newHeaders)):
            fob.write("# " + str(i + 1) + "\t" + newHeaders[i] + "\n")
        fob.close()
        tableio.put_data(catalogFile, newVariables, append="yes")

        return
Beispiel #13
0
    def _hackit(self, cat, keep_apertures=[1, 2, 3]):
        """hack the detectionCatalog.cat file to take out a bunch of the aperture data. 
        Default is only to keep the first three apertures in the final catalog but caller
        can change this by passing a keep_apertures list (this is aperture number and *not*
        the radius).  This will hack the columns indicated by 

        MAG_APER
        MAGERR_APER
        FLUX_APER
        FLUXERR_APER
        """
        dir, old_file = os.path.split(cat)
        headerList = []
        headerList = pUtil.fillHeader(
            cat)  # this returns a list of the catalog header.

        # go through the header and find the columns to keep.  We are looking for
        #'FLUX_APER', 1)
        #'FLUX_APER', 2)
        #'FLUX_APER', 3)
        #'FLUXERR_APER', 1)
        #'FLUXERR_APER', 2)
        #'FLUXERR_APER', 3)
        #'MAG_APER', 1)
        #'MAG_APER', 2)
        #'MAG_APER', 3)
        #'MAGERR_APER', 1)
        #'MAGERR_APER', 2)
        #'MAGERR_APER', 3)

        newheader = []
        for i in headerList:
            if len(i) == 2:
                newheader.append(i)
            else:
                if i[2] not in keep_apertures:
                    continue
                else:
                    newheader.append(i)

        #return newheader

        cols = []
        for i in newheader:
            cols.append(i[0] - 1)

        new_rows = []
        for row in open(cat).readlines():
            if '#' in row: continue
            fields = row.split()
            arow = ''
            for column in cols:
                arow += '  ' + fields[column]
            new_rows.append(arow)

        # OK, we have the newheader and the new data .
        # We need to renumber the columns in the header.  the newheader
        # list has the old catalog's column identifiers and that needs to get fixed.

        new_newheader = []
        for i in range(len(newheader)):
            if len(newheader[i]) == 2:
                new_newheader.append((i + 1, newheader[i][1]))
            else:
                new_newheader.append((i + 1, newheader[i][1], newheader[i][2]))

        # Now we are just going to overwrite the original detectionImage.cat file
        # (well, at least whatever was passed to this func, anyway)

        file = open(cat, 'w')
        self.logfile.write(
            "private method _hackit, trimming aperture parameters")
        self.logfile.write("_hackit overwriting detectionImage.cat file.")
        file.write("## Date: " + pUtil.ptime() + "\n")
        file.write(
            "## This file has been modified from its original form by the WFP Pipeline.\n"
        )
        file.write("## Some aperture fields have been removed.\n")
        file.write("## This file written by the WFP Pipeline.  Do not edit.\n")
        for item in new_newheader:
            file.write('# ' + str(item[0]) + '\t' + str(item[1]) + '\n')
        for row in new_rows:
            file.write(row + '\n')
        file.close()
        return