示例#1
0
def specselfid(images, outimages, outpref, refimage=None, ystart='middlerow',
               rstep=3, clobber=False, logfile='salt.log', verbose=True):

    with logging(logfile, debug) as log:

        # set up the variables
        infiles = []
        outfiles = []

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outfiles = saltio.listparse(
            'Outimages',
            outimages,
            outpref,
            infiles,
            '')

        # set up defaults
        if saltio.checkfornone(refimage) is not None:
            rhdu = saltio.openfits(refimage)
        else:
            refimage = None

        # read in rectify each image
        for img, oimg, in zip(infiles, outfiles):
            hdu = saltio.openfits(img)
            log.message(
                'Performing self-identification and rectification on %s' %
                img)
            for i in range(1, len(hdu)):
                if hdu[i].name == 'SCI':
                    if refimage is None:
                        sdata = hdu[i].data
                    else:
                        sdata = rhdu[i].data
                    hdu[i].data = selfid(
                        hdu[i].data,
                        sdata,
                        ystart=ystart,
                        rstep=rstep)
                    if saltkey.found('VAREXT', hdu[i]):
                        varext = saltkey.get('VAREXT', hdu[i])
                        hdu[varext].data = selfid(
                            hdu[varext].data,
                            sdata,
                            ystart=ystart,
                            rstep=rstep)
                    if saltkey.found('BPMEXT', hdu[i]):
                        bpmext = saltkey.get('BPMEXT', hdu[i])
                        hdu[bpmext].data = selfid(
                            hdu[bpmext].data,
                            sdata,
                            ystart=ystart,
                            rstep=rstep)

            # write out the oimg
            saltio.writefits(hdu, oimg, clobber=clobber)
示例#2
0
def flat(struct,fstruct):
    """flat applies a flatfield to salt CCD data

       return  struct
    """
    # Determine the number of extensions
    nextend=len(struct)

    #flat field the data
    for i in range(nextend):
       if struct[i].name=='SCI' or len(struct)==1:
            #Account for variance frames 
            if saltkey.found('VAREXT', struct[i]):
               varext=saltkey.get('VAREXT', struct[i])
               if saltkey.found('VAREXT', fstruct[i]):
                   fvarext=saltkey.get('VAREXT', fstruct[i])
                   fvar=fstruct[fvarext].data
               else:
                   fvar=0
               struct[varext].data=(struct[i].data/fstruct[i].data)*(struct[varext].data/struct[i].data**2+fvar/fstruct[i].data**2)

            #flatten the data
            struct[i].data = struct[i].data / fstruct[i].data

    return struct
示例#3
0
def stack(hdu):
    """Convert MEF HRS data into a single extension.   
    """

    #set the detector
    detname=saltkey.get('DETNAM', hdu[0])
    print detname
    if detname=='08443-03-01' or detname=='HRDET':
       detector='hrdet'
    elif detname=='04434-23-02' or detname=='HBDET':
       detector='hbdet'
    else:
       raise SaltError('%s is not an HRS detector' % detnam)
    print detector

    #get key parameters
    try:
       nccd=saltkey.get('CCDAMPS', hdu[0]) 
    except:
       nccd=saltkey.get('CCDNAMPS', hdu[0]) 

    #determine the shape of the CCD
    if detector=='hbdet':
       toty=hdu[1].shape[0]
       totx=hdu[1].shape[1]+hdu[2].shape[1]
    elif detector=='hrdet':
       toty=hdu[1].shape[0]+hdu[3].shape[0]
       totx=hdu[1].shape[1]+hdu[2].shape[1]
    data=np.zeros((toty,totx),np.float32)
    #embed the ccds
    for j in range(nccd):
        i=j+1
        y2,x2=hdu[i].data.shape
        if detector=='hbdet':
           if i==1:
              y1=0
              x1=0
              y2=y2
              x2=x2
           else:
              y1=0
              x1=x2
              x2=x1+x2
        elif detector=='hrdet':
           y1=0
           if i%2==1: x1=0
           if i%2==0: 
              x1=x2
              x2=x1+x2
           if i>2:
              y1=y2
              y2=y1+y2 
              
              
        data[y1:y2,x1:x2]=hdu[i].data
    
    ihdu = pyfits.ImageHDU(data)
    nhdu = pyfits.HDUList([hdu[0], ihdu])

    return nhdu
示例#4
0
def stack(hdu):
    """Convert MEF HRS data into a single extension.   
    """

    #set the detector
    detname = saltkey.get('DETNAM', hdu[0])
    print detname
    if detname == '08443-03-01' or detname == 'HRDET':
        detector = 'hrdet'
    elif detname == '04434-23-02' or detname == 'HBDET':
        detector = 'hbdet'
    else:
        raise SaltError('%s is not an HRS detector' % detnam)
    print detector

    #get key parameters
    try:
        nccd = saltkey.get('CCDAMPS', hdu[0])
    except:
        nccd = saltkey.get('CCDNAMPS', hdu[0])

    #determine the shape of the CCD
    if detector == 'hbdet':
        toty = hdu[1].shape[0]
        totx = hdu[1].shape[1] + hdu[2].shape[1]
    elif detector == 'hrdet':
        toty = hdu[1].shape[0] + hdu[3].shape[0]
        totx = hdu[1].shape[1] + hdu[2].shape[1]
    data = np.zeros((toty, totx), np.float32)
    #embed the ccds
    for j in range(nccd):
        i = j + 1
        y2, x2 = hdu[i].data.shape
        if detector == 'hbdet':
            if i == 1:
                y1 = 0
                x1 = 0
                y2 = y2
                x2 = x2
            else:
                y1 = 0
                x1 = x2
                x2 = x1 + x2
        elif detector == 'hrdet':
            y1 = 0
            if i % 2 == 1: x1 = 0
            if i % 2 == 0:
                x1 = x2
                x2 = x1 + x2
            if i > 2:
                y1 = y2
                y2 = y1 + y2

        data[y1:y2, x1:x2] = hdu[i].data

    ihdu = pyfits.ImageHDU(data)
    nhdu = pyfits.HDUList([hdu[0], ihdu])

    return nhdu
示例#5
0
def specselfid(images,
               outimages,
               outpref,
               refimage=None,
               ystart='middlerow',
               rstep=3,
               clobber=False,
               logfile='salt.log',
               verbose=True):

    with logging(logfile, debug) as log:

        # set up the variables
        infiles = []
        outfiles = []

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outfiles = saltio.listparse('Outimages', outimages, outpref, infiles,
                                    '')

        # set up defaults
        if saltio.checkfornone(refimage) is not None:
            rhdu = saltio.openfits(refimage)
        else:
            refimage = None

        # read in rectify each image
        for img, oimg, in zip(infiles, outfiles):
            hdu = saltio.openfits(img)
            log.message(
                'Performing self-identification and rectification on %s' % img)
            for i in range(1, len(hdu)):
                if hdu[i].name == 'SCI':
                    if refimage is None:
                        sdata = hdu[i].data
                    else:
                        sdata = rhdu[i].data
                    hdu[i].data = selfid(hdu[i].data,
                                         sdata,
                                         ystart=ystart,
                                         rstep=rstep)
                    if saltkey.found('VAREXT', hdu[i]):
                        varext = saltkey.get('VAREXT', hdu[i])
                        hdu[varext].data = selfid(hdu[varext].data,
                                                  sdata,
                                                  ystart=ystart,
                                                  rstep=rstep)
                    if saltkey.found('BPMEXT', hdu[i]):
                        bpmext = saltkey.get('BPMEXT', hdu[i])
                        hdu[bpmext].data = selfid(hdu[bpmext].data,
                                                  sdata,
                                                  ystart=ystart,
                                                  rstep=rstep)

            # write out the oimg
            saltio.writefits(hdu, oimg, clobber=clobber)
示例#6
0
def extract(hdu, ext=1, method='normal', section=[],
            minsize=3.0, thresh=3.0, convert=True):
    """For a given image, extract a 1D spectra from the image
       and write the spectra to the output file

    """

    ap_list = []
    i = ext
    if hdu[i].name == 'SCI':
        # set up the data, variance, and bad pixel frames
        # first step is to find the region to extract
        data_arr = hdu[i].data
        try:
            var_arr = hdu[hdu[i].header['VAREXT']].data
        except:
            var_arr = None
        try:
            bpm_arr = hdu[hdu[i].header['BPMEXT']].data
        except:
            bpm_arr = None
        var_arr = None
        bpm_arr = None

        xarr = np.arange(len(data_arr[0]))

        # convert using the WCS information
        try:
            w0 = saltkey.get('CRVAL1', hdu[i])
            dw = saltkey.get('CD1_1', hdu[i])
        except Exception as e:
            msg = 'Error on Ext %i: %s' % (i, e)
            raise SALTSpecError(msg)
        warr = w0 + dw * xarr

        # convert from air to vacuum
        if convert:
            warr = Spectrum.air2vac(warr)

        # set up the sections in case of findobj
        if section is None:
            section = findobj.findObjects(
                data_arr,
                method='median',
                specaxis=1,
                minsize=minsize,
                thresh=thresh,
                niter=5)

        # extract all of the  regions
        for sec in section:
            ap = apext.apext(warr, data_arr, ivar=var_arr)
            y1, y2 = sec
            ap.flatten(y1, y2)
            ap_list.append(ap)

    return ap_list
示例#7
0
def clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=None, ampccd=2,
          xdict=[], subover=True,trim=True, subbias=False, bstruct=None,
         median=False, function='polynomial',order=3,rej_lo=3,rej_hi=3,niter=10,
         plotover=False, log=None, verbose=True):

   infile=struct

   #prepare the files
   struct=prepare(struct, createvar=createvar, badpixelstruct=badpixelstruct)

   #reset the names in the structures
   for i in range(1,len(struct)):
       struct[i].name=struct[i].header['EXTNAME']


   #gain correct the files
   usedb=False
   if dblist:  usedb=True
   struct=gain(struct, mult=mult,usedb=usedb, dblist=dblist, ampccd=ampccd, log=log, verbose=verbose)

   #xtalk correct the files
   usedb=False
   if xdict:  
       obsdate=saltkey.get('DATE-OBS', struct[0])
       try:
           obsdate=int('%s%s%s' % (obsdate[0:4],obsdate[5:7], obsdate[8:]))
           xkey=np.array(xdict.keys())
           date=xkey[abs(xkey-obsdate).argmin()]
           xcoeff=xdict[date]
       except Exception,e : 
           msg='WARNING--Can not find xtalk coefficient for %s because %s' % (e, infile)
           if log: log.warning(msg)
           xcoeff=xdict[xdict.keys()[-1]]
示例#8
0
def surface_fit(struct,order=3, mask=True, minlevel=0):
    """Fit a surface to an image 

       return  struct
    """
    # Determine the number of extensions
    nextend=len(struct)

    #flat field the data
    for i in range(nextend):
       if struct[i].name=='SCI' or len(struct)==1:

            #create the median image
            if mask: 
               bpmext = saltkey.get('BPMEXT', struct[i])
               mask_arr = (struct[bpmext].data == 0) * (struct[i].data > minlevel)
            else:
               mask_arr = (struct[i].data < minlevel)

            #create the data
            coef=fit_surface(struct[i].data, mask=mask_arr, xorder=order, yorder=order, xyorder=0)
            y, x = np.indices(struct[i].data.shape)
            sf=surface(coef, xorder=order, yorder=order, xyorder=0)
            print coef
            struct[i].data=sf(x,y)

            #Account for variance frames 
            if mask:
               struct[bpmext].data = struct[bpmext].data*0


    return struct
示例#9
0
def saltprepare(images,outimages,outpref,createvar=False, badpixelimage=None, clobber=True,logfile='salt.log',verbose=True):

   with logging(logfile,debug) as log:

       # Check the input images 
       infiles = saltio.argunpack ('Input',images)

       # create list of output files 
       outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')

       #verify that the input and output lists are the same length
       saltio.comparelists(infiles,outfiles,'Input','output')

       # open the badpixel image
       if saltio.checkfornone(badpixelimage) is None:
          badpixelstruct=None
       else:
           try:
               badpixelstruct = saltio.openfits(badpixelimage)
           except saltio.SaltIOError,e:
               msg='badpixel image must be specificied\n %s' % e
               raise SaltError(msg)

       # open each raw image file
       for img, oimg, in zip(infiles, outfiles):

           #open the fits file
           struct=saltio.openfits(img)

           #if createvar, throw a warning if it is using slotmode
           if saltkey.fastmode(saltkey.get('DETMODE', struct[0])) and createvar:
               msg='Creating variance frames for slotmode data in %s' % img
               log.warning(msg)
  
           # identify instrument
           instrume,keyprep,keygain,keybias,keyxtalk,keyslot = saltkey.instrumid(struct)

           # has file been prepared already?
           try:
               key = struct[0].header[keyprep]
               message = 'ERROR -- SALTPREPARE: File ' + infile
               message += ' has already been prepared'
               raise SaltError(message)
           except:
               pass


           # prepare file
           struct=prepare(struct,createvar=createvar, badpixelstruct=badpixelstruct)

           # housekeeping keywords
           fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref'])
           saltkey.housekeeping(struct[0],keyprep, 'File prepared for IRAF', hist)

           # write FITS file
           saltio.writefits(struct,oimg, clobber=clobber)
           saltio.closefits(struct)

           message = 'SALTPREPARE -- %s => %s' % (img, oimg)
           log.message(message, with_header=False)
示例#10
0
def saltprepare(images,outimages,outpref,createvar=False, badpixelimage=None, clobber=True,logfile='salt.log',verbose=True):

   with logging(logfile,debug) as log:

       # Check the input images 
       infiles = saltio.argunpack ('Input',images)

       # create list of output files 
       outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')

       #verify that the input and output lists are the same length
       saltio.comparelists(infiles,outfiles,'Input','output')

       # open the badpixel image
       if saltio.checkfornone(badpixelimage) is None:
          badpixelstruct=None
       else:
           try:
               badpixelstruct = saltio.openfits(badpixelimage)
           except saltio.SaltIOError,e:
               msg='badpixel image must be specificied\n %s' % e
               raise SaltError(msg)

       # open each raw image file
       for img, oimg, in zip(infiles, outfiles):

           #open the fits file
           struct=saltio.openfits(img)

           #if createvar, throw a warning if it is using slotmode
           if saltkey.fastmode(saltkey.get('DETMODE', struct[0])) and createvar:
               msg='Creating variance frames for slotmode data in %s' % img
               log.warning(msg)
  
           # identify instrument
           instrume,keyprep,keygain,keybias,keyxtalk,keyslot = saltkey.instrumid(struct)

           # has file been prepared already?
           try:
               key = struct[0].header[keyprep]
               message = 'ERROR -- SALTPREPARE: File ' + infile
               message += ' has already been prepared'
               raise SaltError(message)
           except:
               pass


           # prepare file
           struct=prepare(struct,createvar=createvar, badpixelstruct=badpixelstruct)

           # housekeeping keywords
           fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref'])
           saltkey.housekeeping(struct[0],keyprep, 'File prepared for IRAF', hist)

           # write FITS file
           saltio.writefits(struct,oimg, clobber=clobber)
           saltio.closefits(struct)

           message = 'SALTPREPARE -- %s => %s' % (img, oimg)
           log.message(message, with_header=False)
示例#11
0
def illum_cor(struct,mbox):
    """Apply an illumination correction to a set of images

       return  struct
    """
    # Determine the number of extensions
    nextend=len(struct)

    #flat field the data
    for i in range(nextend):
       if struct[i].name=='SCI' or len(struct)==1:

            #create the median image
            mdata=median_filter(struct[i].data, size=(mbox, mbox))

            #create the data
            struct[i].data=struct[i].data/mdata

            #Account for variance frames 
            if saltkey.found('VAREXT', struct[i]):
               varext=saltkey.get('VAREXT', struct[i])
               struct[varext].data=struct[varext].data/mdata


    return struct
示例#12
0
def checkforpropid(image,propids):
    """subroutine to check to see if the propid keyword exists

       returns status
    """

    #open up the image
    struct=pyfits.open(image, mode='update')
    if struct:
        #get proposal code
        propid=saltkey.get('PROPID', struct[0])

        #check to see if it is none
        if saltio.checkfornone(propid) is None or propid is 'None':
           message='\nNo value in PROPID keyword for %s' % image
           raise SaltError(message)
 
        #check to see if it is an eng or cal proposal
        if propid.count('ENG_') or propid.count('CAL_'):
           return
 
        #clean up junk ones
        if propid in ['BIAS','COMMON','JUNK','TEST','UNKNOWN']:
           return 

        #check to see if it is in the sdb
        if propid not in propids:
           message='\n%s for PROPID keyword for %s is invalid' % (propid, image)
           raise SaltError(message)
示例#13
0
def skysubtract(hdu, method='normal', section=[], funct='polynomial', order=2):
    """For a given image, extract a measurement of the sky from
       the image and then subtract that measurement from the
       overall image
       and write the spectra to the output file

    """

    for i in range(len(hdu)):
        if hdu[i].name == 'SCI':
            # set up the data, variance, and bad pixel frames
            # first step is to find the region to extract
            data_arr = hdu[i].data

            if saltkey.found('VAREXT', hdu[i]):
                var_ext = saltkey.get('VAREXT', hdu[i])
                var_arr = hdu[var_ext].data
            else:
                var_arr = None
                var_ext = None

            try:
                bpm_ext = saltkey.get('BPMEXT', hdu[i])
                bpm_arr = hdu[hdu[i].header['BPMEXT']].data
            except:
                bpm_ext = None
                bpm_arr = None

            # creat the xarr for the purposes of extracting the data
            xarr = np.arange(len(data_arr[0]))

            # TODO:  The variance array does not fully work at the moment
            var_arr = None

            if method == 'normal':
                sdata = normalsky(xarr, data_arr, var_arr, section)
            elif method == 'fit':
                sdata = fitsky(xarr, data_arr, var_arr, section)

            # subtract the data
            hdu[i].data = data_arr - sdata

            # correct the variance frame
            if var_ext:
                hdu[var_ext].data = hdu[var_ext].data  # +ap.lvar/nrows

    return hdu
示例#14
0
def skysubtract(hdu, method='normal', section=[], funct='polynomial', order=2):
    """For a given image, extract a measurement of the sky from
       the image and then subtract that measurement from the
       overall image
       and write the spectra to the output file

    """

    for i in range(len(hdu)):
        if hdu[i].name == 'SCI':
            # set up the data, variance, and bad pixel frames
            # first step is to find the region to extract
            data_arr = hdu[i].data

            if saltkey.found('VAREXT', hdu[i]):
                var_ext = saltkey.get('VAREXT', hdu[i])
                var_arr = hdu[var_ext].data
            else:
                var_arr = None
                var_ext = None

            try:
                bpm_ext = saltkey.get('BPMEXT', hdu[i])
                bpm_arr = hdu[hdu[i].header['BPMEXT']].data
            except:
                bpm_ext = None
                bpm_arr = None

            # creat the xarr for the purposes of extracting the data
            xarr = np.arange(len(data_arr[0]))

            # TODO:  The variance array does not fully work at the moment
            var_arr = None

            if method == 'normal':
                sdata = normalsky(xarr, data_arr, var_arr, section)
            elif method == 'fit':
                sdata = fitsky(xarr, data_arr, var_arr, section)

            # subtract the data
            hdu[i].data = data_arr - sdata

            # correct the variance frame
            if var_ext:
                hdu[var_ext].data = hdu[var_ext].data  # +ap.lvar/nrows

    return hdu
示例#15
0
def obslog(infiles, log=None):
    """For a set of input files, create a dictionary contain all the header 
      information from the files.  Will print things to a saltlog if log is
      not None
    
      returns Dictionary
   """
    #create the header dictionary
    headerDict = {}
    for k in headerList:
        headerDict[k] = []
    for k in scamheaderList:
        headerDict[k] = []
    for k in rssheaderList:
        headerDict[k] = []

    # interate over and open image files
    infiles.sort()
    for infile in infiles:

        #open the file
        struct = saltio.openfits(infile)

        # instrument
        scam = False
        rss = False
        instrume = saltkey.get('INSTRUME', struct[0])
        if (instrume == 'RSS'): rss = True
        if (instrume == 'SALTICAM'): scam = True

        #add in the image name
        headerDict['FILENAME'].append(os.path.basename(infile))

        # ingest primary keywords from files in the image list
        for k, f in zip(headerList[1:], formatList[1:]):
            default = finddefault(f)
            headerDict[k].append(
                getkey(struct[0], k, default=default, log=log, warn=True))

        # ingest scam specific primary keywords from files in the image list
        for k, f in zip(scamheaderList[1:], scamformatList[1:]):
            default = finddefault(f)
            headerDict[k].append(
                getkey(struct[0], k, default=default, log=log, warn=scam))

        # ingest rss specific primary keywords from files in the image list
        for k, f in zip(rssheaderList[1:], rssformatList[1:]):
            default = finddefault(f)
            headerDict[k].append(
                getkey(struct[0], k, default=default, log=log, warn=rss))

        # close image files
        saltio.closefits(struct)

        if log:
            log.message('SALTOBSLOG -- read %s' % infile, with_header=False)

    return headerDict
示例#16
0
def prepare(struct,createvar=False, badpixelstruct=None, namps=2):

   #set up the file name
   infile=saltkey.getimagename(struct[0])

   #include information about which headers are science extensoisn
   #and check to make sure each array is a data array
   nextend=len(struct)-1
   nsciext=nextend
   for i in range(1,nextend+1):
       try:
           struct[i].header['XTENSION'] == 'IMAGE'
       except:
           msg='Extension %i in %s is not an image data'
           raise  SaltError(msg)
       saltkey.new('EXTNAME','SCI','Extension name',struct[i])
       saltkey.new('EXTVER',i,'Extension number',struct[i])

   #check the number of amplifiers
   #TODO:  This is current hardwired at a value of 2
   nccds = saltkey.get('NCCDS',struct[0])
   if (nextend%(nccds*namps) != 0):
        message = 'ERROR -- SALTPREPARE: Number of file extensions and '
        message += 'number of amplifiers are not consistent in ' + infile
        raise SaltError(message)

   #Add the inv. variance frame if requested--assumes no variance frame
   #and the science frames are in the first n frames
   if createvar:
       #create the inv. variance frames
       for i in range(1, nsciext+1):
           try:
               hdu=CreateVariance(struct[i], i, nextend+i)
           except Exception as e: 
               msg='Cannot create variance frame in extension %i of  %s because %s' % (nextend+i, infile, e)
               raise SaltError(msg) 
           struct[i].header.update('VAREXT',nextend+i, comment='Extension for Variance Frame')
           struct.append(hdu)
       nextend+=nsciext

       #create the badpixelframes
       for i in range(1, nsciext+1):
           try:
               hdu=createbadpixel(struct, badpixelstruct, i, nextend+i)
           except Exception as e: 
               msg='Could not create bad pixel extension in ext %i of %s because %s' % (nextend+i, infile, e)
               raise SaltError(msg) 
           struct[i].header.update('BPMEXT',nextend+i, comment='Extension for Bad Pixel Mask')
           struct.append(hdu)
       nextend+=nsciext


   #update the number of extensions
   saltkey.new('NSCIEXT',nsciext,'Number of science extensions', struct[0])
   saltkey.new('NEXTEND',nextend,'Number of data extensions', struct[0])


   return struct
示例#17
0
def clean(struct,
          createvar=False,
          badpixelstruct=None,
          mult=True,
          dblist=None,
          ampccd=2,
          xdict=[],
          subover=True,
          trim=True,
          subbias=False,
          bstruct=None,
          median=False,
          function='polynomial',
          order=3,
          rej_lo=3,
          rej_hi=3,
          niter=10,
          plotover=False,
          log=None,
          verbose=True):

    infile = struct

    #prepare the files
    struct = prepare(struct,
                     createvar=createvar,
                     badpixelstruct=badpixelstruct)

    #reset the names in the structures
    for i in range(1, len(struct)):
        struct[i].name = struct[i].header['EXTNAME']

    #gain correct the files
    usedb = False
    if dblist: usedb = True
    struct = gain(struct,
                  mult=mult,
                  usedb=usedb,
                  dblist=dblist,
                  ampccd=ampccd,
                  log=log,
                  verbose=verbose)

    #xtalk correct the files
    usedb = False
    if xdict:
        obsdate = saltkey.get('DATE-OBS', struct[0])
        try:
            obsdate = int('%s%s%s' % (obsdate[0:4], obsdate[5:7], obsdate[8:]))
            xkey = np.array(xdict.keys())
            date = xkey[abs(xkey - obsdate).argmin()]
            xcoeff = xdict[date]
        except Exception, e:
            msg = 'WARNING--Can not find xtalk coefficient for %s because %s' % (
                e, infile)
            if log: log.warning(msg)
            xcoeff = xdict[xdict.keys()[-1]]
示例#18
0
def saltxtalk(images,outimages,outpref,xtalkfile=None, usedb=False,
              clobber=True, logfile='salt.log',verbose=True):

   #start logging
   with logging(logfile,debug) as log:

       # Check the input images 
       infiles = saltio.argunpack ('Input',images)

       # create list of output files 
       outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')

       # are input and output lists the same length?
       saltio.comparelists(infiles,outfiles,'Input','output')

       # does crosstalk coefficient data exist
       if usedb:
           xtalkfile = xtalkfile.strip()
           xdict = saltio.readxtalkcoeff(xtalkfile)
       else:
           xdict=None

       for img, oimg in zip(infiles, outfiles):

           #open the fits file
           struct=saltio.openfits(img)

           #find the best xcoeff for the image if using the db
           if usedb:
              obsdate=saltkey.get('DATE-OBS', struct[0])
              obsdate=int('%s%s%s' % (obsdate[0:4],obsdate[5:7], obsdate[8:]))
              xkey=np.array(xdict.keys())
              date=xkey[abs(xkey-obsdate).argmin()]
              xcoeff=xdict[date]
           else:
              xcoeff=[]

           # identify instrument
           instrume,keyprep,keygain,keybias,keyxtalk,keyslot = saltkey.instrumid(struct)

           # has file been prepared already?
           if saltkey.found(keyxtalk, struct[0]):
               message='%s has already been xtalk corrected' % img
               raise SaltError(message)


           #apply the cross-talk correction
           struct = xtalk(struct, xcoeff, log=log, verbose=verbose)

           # housekeeping keywords
           fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref'])
           saltkey.housekeeping(struct[0], 'SXTALK', 'Images have been xtalk corrected', hist)

           # write FITS file
           saltio.writefits(struct,oimg, clobber=clobber)
           saltio.closefits(struct)
示例#19
0
def readtimefix(hdu, dsteps=7, transtime=4e-3):
    """Update the hdu with the correct time for when the exposure started 
       and add the READTIME keyword

       dsteps--the number of readouts to correct for
       transtime--the transfer time between each frame
    """

    #check for if the data has already been processed
    if saltkey.found('READTIME', hdu):
        raise SaltIOError(' has already been processed')

    #determine the UTC time
    utctime = saltkey.get('UTC-OBS', hdu)
    timeobs = saltkey.get('TIME-OBS', hdu)
    dateobs = saltkey.get('DATE-OBS', hdu)
    exptime = float(saltkey.get('EXPTIME', hdu))

    #add the readtime header
    saltkey.new("READTIME", utctime, 'Time of the readout of the frame', hdu)

    #correct the utctime--first switch to datetime to properly handle
    #dates around changes in hours
    y, m, d = dateobs.split('-')
    H, M, S = utctime.split(':')
    s = int(float(S))
    ms = int(1e6 * (float(S) - s))
    newtime = datetime.datetime(int(y), int(m), int(d), int(H), int(M), s, ms)

    #correct the datetime
    dtime = dsteps * (exptime + transtime)
    s = int(dtime)
    ms = int(1e6 * (dtime - s))
    newtime = newtime - datetime.timedelta(0, s, ms)

    #update the headkeywords
    hdu.header["UTC-OBS"] = str(newtime.time())
    saltkey.put("UTC-OBS", str(newtime.time()), hdu)
    saltkey.put("TIME-OBS", str(newtime.time()), hdu)
    saltkey.put("DATE-OBS", str(newtime.date()), hdu)

    return hdu
示例#20
0
def readtimefix(hdu, dsteps=7, transtime=4e-3):
    """Update the hdu with the correct time for when the exposure started 
       and add the READTIME keyword

       dsteps--the number of readouts to correct for
       transtime--the transfer time between each frame
    """

    #check for if the data has already been processed
    if saltkey.found('READTIME', hdu):
        raise SaltIOError(' has already been processed')

    #determine the UTC time 
    utctime=saltkey.get('UTC-OBS', hdu)
    timeobs=saltkey.get('TIME-OBS', hdu)
    dateobs=saltkey.get('DATE-OBS', hdu)
    exptime=float(saltkey.get('EXPTIME', hdu))

    #add the readtime header
    saltkey.new("READTIME",utctime,'Time of the readout of the frame', hdu)

    #correct the utctime--first switch to datetime to properly handle
    #dates around changes in hours
    y,m,d=dateobs.split('-')
    H,M,S=utctime.split(':')
    s=int(float(S))
    ms=int(1e6*(float(S)-s))
    newtime=datetime.datetime(int(y),int(m),int(d),int(H),int(M),s,ms)

    #correct the datetime
    dtime=dsteps*(exptime+transtime)
    s=int(dtime)
    ms=int(1e6*(dtime-s))
    newtime=newtime-datetime.timedelta(0, s, ms)

    #update the headkeywords
    hdu.header.update("UTC-OBS", str(newtime.time()))
    saltkey.put("UTC-OBS", str(newtime.time()), hdu)
    saltkey.put("TIME-OBS", str(newtime.time()), hdu)
    saltkey.put("DATE-OBS", str(newtime.date()), hdu)

    return hdu
示例#21
0
def calc_resolution(hdu):
    """Calculate the resolution for a setup"""
    instrume=saltkey.get('INSTRUME', hdu[0]).strip()
    grating=saltkey.get('GRATING', hdu[0]).strip()
    grang=saltkey.get('GR-ANGLE', hdu[0])
    grasteps=saltkey.get('GRTILT', hdu[0])
    arang=saltkey.get('AR-ANGLE', hdu[0])
    arsteps=saltkey.get('CAMANG', hdu[0])
    rssfilter=saltkey.get('FILTER', hdu[0])
    specmode=saltkey.get('OBSMODE', hdu[0])
    masktype=saltkey.get('MASKTYP', hdu[0]).strip().upper()
    slitname=saltkey.get('MASKID', hdu[0])
    xbin, ybin = saltkey.ccdbin( hdu[0], '')
    slit=st.getslitsize(slitname)
    
    #create RSS Model
    rss=RSSModel.RSSModel(grating_name=grating.strip(), gratang=grang, \
                          camang=arang,slit=slit, xbin=xbin, ybin=ybin, \
                          )
    res=1e7*rss.calc_resolelement(rss.alpha(), -rss.beta())
    return res 
示例#22
0
文件: saltfirst.py 项目: rirze/pysalt
def getimagedetails(hdu):
   """Return all the pertinant image header details"""
   filename=hdu._HDUList__file.name
   imlist=[filename]
   print filename
   for k in headerList[1:]:
       try:
           value=saltkey.get(k, hdu[0])
       except SaltIOError:
           value=''
       imlist.append(value)
   return imlist
示例#23
0
def calc_resolution(hdu):
    """Calculate the resolution for a setup"""
    instrume=saltkey.get('INSTRUME', hdu[0]).strip()
    grating=saltkey.get('GRATING', hdu[0]).strip()
    grang=saltkey.get('GR-ANGLE', hdu[0])
    grasteps=saltkey.get('GRTILT', hdu[0])
    arang=saltkey.get('AR-ANGLE', hdu[0])
    arsteps=saltkey.get('CAMANG', hdu[0])
    rssfilter=saltkey.get('FILTER', hdu[0])
    specmode=saltkey.get('OBSMODE', hdu[0])
    masktype=saltkey.get('MASKTYP', hdu[0]).strip().upper()
    slitname=saltkey.get('MASKID', hdu[0])
    xbin, ybin = saltkey.ccdbin( hdu[0], '')
    slit=st.getslitsize(slitname)
    
    #create RSS Model
    rss=RSSModel.RSSModel(grating_name=grating.strip(), gratang=grang, \
                          camang=arang,slit=slit, xbin=xbin, ybin=ybin, \
                          )
    res=1e7*rss.calc_resolelement(rss.alpha(), -rss.beta())
    return res 
示例#24
0
def getimagedetails(hdu):
   """Return all the pertinant image header details"""
   filename=hdu._HDUList__file.name
   imlist=[filename]
   print filename
   for k in headerList[1:]:
       try:
           value=saltkey.get(k, hdu[0])
       except SaltIOError:
           value=''
       imlist.append(value)
   return imlist
示例#25
0
def make_calring(hdu, method=None, thresh=5, niter=3, conv=0.05, minsize=10, axc=None, ayc=None):
    """Open each image and measure the position of the ring including its center and radius
     
      Return the information about the calibration ring
   """

    # setup the data
    data = hdu[0].data
    # extract the time and convert to decimal hours
    utctime = saltkey.get("UTC-OBS", hdu[0])
    utctime = salttime.time_obs2hr((utctime.split()[-1]))

    # determine the correct etalon and information to extract
    etstate = saltkey.get("ET-STATE", hdu[0])

    if etstate.count("S2"):
        etz = saltkey.get("ET1Z", hdu[0])
    elif etstate.count("S3"):
        etz = saltkey.get("ET2Z", hdu[0])
    else:
        msg = "This etalon state is not currently supported"
        raise SaltError(msg)

    # extract the ring
    ring_list = findrings(data, thresh=thresh, niter=niter, minsize=minsize, axc=axc, ayc=ayc)

    # assumes only one ring in the data set
    ring = ring_list[0]

    # determine the center and radius of the ring
    if method is not None:
        ring = findcenter(data, ring, method, niter=niter, conv=conv)

    if axc:
        ring.xc = axc
    if ayc:
        ring.yc = ayc

    return ring.xc, ring.yc, ring.prad, ring.prad_err, etz, utctime
示例#26
0
def obslog(infiles, log=None):
   """For a set of input files, create a dictionary contain all the header 
      information from the files.  Will print things to a saltlog if log is
      not None
    
      returns Dictionary
   """
   #create the header dictionary
   headerDict={}
   for k in headerList: headerDict[k]=[]
   for k in scamheaderList: headerDict[k]=[]
   for k in rssheaderList: headerDict[k]=[]

   # interate over and open image files
   infiles.sort()
   for infile in infiles:

       #open the file
       struct = saltio.openfits(infile)

       # instrument
       scam = False
       rss = False
       instrume = saltkey.get('INSTRUME', struct[0])
       if (instrume=='RSS'): rss = True
       if (instrume=='SALTICAM'): scam=True

       #add in the image name
       headerDict['FILENAME'].append(os.path.basename(infile))

       # ingest primary keywords from files in the image list
       for k,f in zip(headerList[1:], formatList[1:]):
           default=finddefault(f)
           headerDict[k].append(getkey(struct[0], k, default=default, log=log, warn=True))

       # ingest scam specific primary keywords from files in the image list
       for k,f in zip(scamheaderList[1:], scamformatList[1:]):
           default=finddefault(f)
           headerDict[k].append(getkey(struct[0], k, default=default, log=log, warn=scam))

       # ingest rss specific primary keywords from files in the image list
       for k,f in zip(rssheaderList[1:], rssformatList[1:]):
           default=finddefault(f)
           headerDict[k].append(getkey(struct[0], k, default=default, log=log, warn=rss))

       # close image files
       saltio.closefits(struct)

       if log: log.message('SALTOBSLOG -- read %s' % infile, with_header=False)

   return headerDict
示例#27
0
def saltembed(images,outimages,outpref, clobber=True,logfile='salt.log',verbose=True): 

   with logging(logfile,debug) as log:

       # Check the input images 
       infiles = saltio.argunpack ('Input',images)

       # create list of output files 
       outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')

       #verify that the input and output lists are the same length
       saltio.comparelists(infiles,outfiles,'Input','output')

       # open each raw image file
       for img, oimg, in zip(infiles, outfiles):

           #open the fits file
           struct=saltio.openfits(img)

           #get the number of CCDs
           nccd=saltkey.get('NCCDS', struct[0])

           #set the number of amps per CCD--*TODO* Read from header
           namps=2

           #get the number of windows--try to read from header
           try:
              nwindows=saltkey.get('NWINDOWS', struct[0])
           except:
              nwindows=len(struct)/(nccd*namps)
 
           outstruct=embedimage(struct, nccd, namps, nwindows)

           saltio.writefits(outstruct, oimg, clobber=clobber)

           message = 'SALTEMBED -- %s => %s' % (img, oimg)
           log.message(message, with_header=False)
示例#28
0
def saltembed(images,outimages,outpref, clobber=True,logfile='salt.log',verbose=True): 

   with logging(logfile,debug) as log:

       # Check the input images 
       infiles = saltio.argunpack ('Input',images)

       # create list of output files 
       outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')

       #verify that the input and output lists are the same length
       saltio.comparelists(infiles,outfiles,'Input','output')

       # open each raw image file
       for img, oimg, in zip(infiles, outfiles):

           #open the fits file
           struct=saltio.openfits(img)

           #get the number of CCDs
           nccd=saltkey.get('NCCDS', struct[0])

           #set the number of amps per CCD--*TODO* Read from header
           namps=2

           #get the number of windows--try to read from header
           try:
              nwindows=saltkey.get('NWINDOWS', struct[0])
           except:
              nwindows=len(struct)/(nccd*namps)
 
           outstruct=embedimage(struct, nccd, namps, nwindows)

           saltio.writefits(outstruct, oimg, clobber=clobber)

           message = 'SALTEMBED -- %s => %s' % (img, oimg)
           log.message(message, with_header=False)
示例#29
0
def prepare(struct,createvar=False, badpixelstruct=None, namps=2):

   #set up the file name
   infile=saltkey.getimagename(struct[0])

   #include information about which headers are science extensoisn
   #and check to make sure each array is a data array
   nextend=len(struct)-1
   nsciext=nextend
   for i in range(1,nextend+1):
       try:
           struct[i].header['XTENSION'] == 'IMAGE'
       except:
           msg='Extension %i in %s is not an image data'
           raise  SaltError(msg)
       saltkey.new('EXTNAME','SCI','Extension name',struct[i])
       saltkey.new('EXTVER',i,'Extension number',struct[i])

   #check the number of amplifiers
   #TODO:  This is current hardwired at a value of 2
   nccds = saltkey.get('NCCDS',struct[0])
   if (nextend%(nccds*namps) != 0):
        message = 'ERROR -- SALTPREPARE: Number of file extensions and '
        message += 'number of amplifiers are not consistent in ' + infile
        raise SaltError(message)

   #Add the inv. variance frame if requested--assumes no variance frame
   #and the science frames are in the first n frames
   if createvar:
       #create the inv. variance frames
       for i in range(1, nsciext+1):
           try:
               hdu=CreateVariance(struct[i], i, nextend+i)
           except Exception, e: 
               msg='Cannot create variance frame in extension %i of  %s because %s' % (nextend+i, infile, e)
               raise SaltError(msg) 
           struct[i].header.update('VAREXT',nextend+i, comment='Extension for Variance Frame')
           struct.append(hdu)
       nextend+=nsciext

       #create the badpixelframes
       for i in range(1, nsciext+1):
           try:
               hdu=createbadpixel(struct, badpixelstruct, i, nextend+i)
           except Exception, e: 
               msg='Could not create bad pixel extension in ext %i of %s because %s' % (nextend+i, infile, e)
               raise SaltError(msg) 
           struct[i].header.update('BPMEXT',nextend+i, comment='Extension for Bad Pixel Mask')
           struct.append(hdu)
示例#30
0
def updateheaders(struct, ext, tdiff, real_expt, utc, infile):
    # exit if tdiff wasn't updated
    if tdiff == real_expt:
        msg='No adequate correction found for frame %i in file %s' % (ext, infile)
        raise SaltError(msg)

        return struct

    # calculate the new utc value
    try:
        ntime=salttime.sex2dec(utc)
        ntime=ntime-tdiff/3600.0
        newutc=salttime.dec2sex(ntime)
    except Exception as e:
        msg='Could not update UTC in %i header of image %s because %s' % (ext, infile, e)
        raise SaltError(msg)

        return struct

    # update the headers
    if utc==saltsafekey.get('UTC-OBS', struct):
        expt_string='%5.4f' % real_expt
        td_string='%5.4f' % tdiff
        if not saltsafekey.found('DUTC', struct):
            try:
                saltsafekey.put('UTC-OBS', newutc, struct, infile)
                saltsafekey.put('TIME-OBS', newutc, struct, infile)
                saltsafekey.new('DWETIME', expt_string, 'Dwell Time', struct, infile)
                saltsafekey.new('DUTC', td_string, 'Change in UTC time', struct, infile)
            except Exception as e:
                msg='Could not update %i header of image %s because %s' % (ext, infile, e)
                raise SaltIOError(msg)
        else:
            try:
                saltsafekey.put('UTC-OBS', newutc, struct, infile)
                saltsafekey.put('TIME-OBS', newutc, struct, infile)
                saltsafekey.put('DWETIME', real_expt, struct, infile)
                saltsafekey.put('DUTC', tdiff, struct, infile)
            except Exception as e:
                msg='Could not update %i header of image %s because %s' % (ext, infile, e)
                raise SaltError(msg)
    else:
        raise SaltIOError('Frame missing from list of times')

    return struct
示例#31
0
def read_slits_from_fits(simg):
    """Read the slit definitions from a FITS fiel where the slit
       definitions have been stored in a table in the FITS file under an
       extension with the name
    """

    # first check if the file exists
    saltio.fileexists(simg)

    # open the slit image
    struct = saltio.openfits(simg)

    # get the extension of the slit table
    slitext = saltkey.get('SLITEXT', struct[0])

    # extract the order of the fit and the positions of each of the slits
    order, slit_positions = mt.read_slits_HDUtable(struct[slitext])

    return order, slit_positions
示例#32
0
def getkey(struct, keyword, default, warn=True, log=None):
    """Return the keyword value.  Throw a warning if it doesn't work """

    try:
        value = saltkey.get(keyword, struct)
        if isinstance(default, str): value = value.strip()
    except SaltIOError:
        value = default
        infile = struct._file.name
        message = 'WARNING: cannot find keyword %s in %s' % (keyword, infile)
        if warn and log: log.message(message, with_header=False)
    if (str(value).strip() == ''): value = default
    #if (type(value) != type(default)):
    #     infile=struct._file.name
    #     message='WARNING: Type mismatch for %s for  %s in %s[0]' % (str(value), keyword, infile)
    #     if warn and log: log.message(message, with_header=False)
    #     value=default

    return value
示例#33
0
def getkey(struct,keyword,default,warn=True, log=None):
   """Return the keyword value.  Throw a warning if it doesn't work """

   try:
        value = saltkey.get(keyword, struct)
        if isinstance(default, str):  value=value.strip()
   except SaltIOError:
        value = default
        infile=struct._file.name
        message = 'WARNING: cannot find keyword %s in %s' %(keyword, infile)
        if warn and log: log.message(message, with_header=False)
   if (str(value).strip() == ''): value = default
   #if (type(value) != type(default)):
   #     infile=struct._file.name
   #     message='WARNING: Type mismatch for %s for  %s in %s[0]' % (str(value), keyword, infile)
   #     if warn and log: log.message(message, with_header=False)
   #     value=default

   return value
示例#34
0
def read_slits_from_fits(simg):
    """Read the slit definitions from a FITS fiel where the slit
       definitions have been stored in a table in the FITS file under an
       extension with the name
    """

    # first check if the file exists
    saltio.fileexists(simg)

    # open the slit image
    struct = saltio.openfits(simg)

    # get the extension of the slit table
    slitext = saltkey.get('SLITEXT', struct[0])

    # extract the order of the fit and the positions of each of the slits
    order, slit_positions = mt.read_slits_HDUtable(struct[slitext])

    return order, slit_positions
示例#35
0
def illum_cor(struct, mbox):
    """Apply an illumination correction to a set of images

       return  struct
    """
    # Determine the number of extensions
    nextend = len(struct)

    #flat field the data
    for i in range(nextend):
        if struct[i].name == 'SCI' or len(struct) == 1:

            #create the median image
            mdata = median_filter(struct[i].data, size=(mbox, mbox))

            #create the data
            struct[i].data = struct[i].data / mdata

            #Account for variance frames
            if saltkey.found('VAREXT', struct[i]):
                varext = saltkey.get('VAREXT', struct[i])
                struct[varext].data = struct[varext].data / mdata

    return struct
示例#36
0
def surface_fit(struct, order=3, mask=True, minlevel=0):
    """Fit a surface to an image 

       return  struct
    """
    # Determine the number of extensions
    nextend = len(struct)

    #flat field the data
    for i in range(nextend):
        if struct[i].name == 'SCI' or len(struct) == 1:

            #create the median image
            if mask:
                bpmext = saltkey.get('BPMEXT', struct[i])
                mask_arr = (struct[bpmext].data
                            == 0) * (struct[i].data > minlevel)
            else:
                mask_arr = (struct[i].data < minlevel)

            #create the data
            coef = fit_surface(struct[i].data,
                               mask=mask_arr,
                               xorder=order,
                               yorder=order,
                               xyorder=0)
            y, x = np.indices(struct[i].data.shape)
            sf = surface(coef, xorder=order, yorder=order, xyorder=0)
            print coef
            struct[i].data = sf(x, y)

            #Account for variance frames
            if mask:
                struct[bpmext].data = struct[bpmext].data * 0

    return struct
示例#37
0
def slotback(images,outfits,extension,imgtype='image',subbacktype='median',
             sigback=3,mbin=7,sorder=3,niter=5,ampperccd=2,ignorexp=6,
             clobber=False,logfile='salt.log',verbose=True):

    with logging(logfile,debug) as log:
        # set up the variables
        order=sorder
        plotfreq=0.01
        ftime=plotfreq
    
        # is the input file specified?
        infiles = saltio.argunpack ('Input',images)

        # is the output file specified?
        saltio.filedefined('Output',outfits)
    
        #open the first file and check its charactistics
        struct=saltio.openfits(infiles[0])

        # how many extensions?
        nextend=saltkey.get('NEXTEND',struct[0])
        if nextend < extension:
            msg='Insufficient number of extensions in %s' % (infile)
            raise SaltIOError(msg)
    
        # how many amplifiers?
        amplifiers=saltkey.get('NCCDS',struct[0])
        amplifiers = int(ampperccd*float(amplifiers))
        if ampperccd>0:
            nframes = nextend/amplifiers
            nstep=amplifiers
        else:
            nframes = nextend
            nstep=1
        ntotal=nframes*len(infiles)
    
        # image size
        naxis1=saltkey.get('NAXIS1',struct[extension])
        naxis2=saltkey.get('NAXIS2',struct[extension])
    
        # CCD binning
        ccdsum=saltkey.get('CCDSUM',struct[0])
        binx=int(ccdsum.split(' ')[0])
        biny=int(ccdsum.split(' ')[1])
    
        # If a total file is to written out, create it and update it
        hdu = 1

        # delete the file if clobber is on and the file exists
        if os.path.isfile(outfits):
           if clobber:
               saltio.delete(outfits)
           else:
               raise SaltIOError('Output fits file '+writenewfits+'already exists. Use Clobber=yes to overwrite file')
    
        # create the output file
        try:
           hdu = pyfits.PrimaryHDU()
           hdu.header=struct[0].header
           hdu.header['NCCDS']=1
           hdu.header['NSCIEXT']=ntotal-ignorexp
           hdu.header['NEXTEND']=ntotal-ignorexp
           hduList = pyfits.HDUList(hdu)
           hduList.verify()
           hduList.writeto(outfits)
        except:
           raise SaltIOError('Could not create new fits file named '+writenewfits)
    
        # read it back in for updating
        hduList = saltio.openfits(outfits,mode='update')
    
        # set up the completeness tracker
        if verbose:
            j=0
            x2=float(j)/float(ntotal)
            ctext='Percentage Complete: %3.2f' % x2
            sys.stdout.write(ctext)
            sys.stdout.flush()
    
        for infile in infiles:
           struct=saltio.openfits(infile)

           # Skip through the frames and process each frame individually
           for i in range(nframes):
                if not (infile==infiles[0] and i < ignorexp):
                    hdu=extension+i*nstep
                    try:
                        header=struct[hdu].header
                        array=struct[hdu].data
                        array=array*1.0
                    except Exception as e:
                        msg='Unable to open extension %i in image %s because %s' % (hdu, infile, e)
                        raise SaltIOError(msg)
    
                    # start the analysis of each frame
                    # gain and readout noise
                    try:
                        gain=float(header['GAIN'])
                    except:
                        gain=1
                        log.warning('Gain not specified in image header')
                    try:
                        rdnoise=float(header['RDNOISE'])
                    except:
                        rdnoise=0
                        log.warning('RDNOISE not specified in image header')

                    # background subtraction
                    if not subbacktype=='none':
                       try:
                           narray=subbackground(array, sigback, mbin, order, niter, subbacktype)
                       except:
                           log.warning('Image '+infile+' extention '+str(i)+' is blank, skipping')
                           continue


                    # create output array
                    try:
                       if imgtype=='background':
                           oarray=narray-array
                       else:
                           oarray=narray
                    except:
                       oarray=array
    
                    # print progress
                    if verbose:
                       x2=float(j)/float(ntotal)
                       if x2 > ftime:
                           ctext='\b\b\b\b\b %3.2f' % x2
                           sys.stdout.write(ctext)
                           sys.stdout.flush()
                           ftime += plotfreq
    

                    # update the header values with the image name and extension number
                    try:
                       hdue = pyfits.ImageHDU(oarray)
                       hdue.header=header
                       hdue.header.update('ONAME',infile,'Original image name')
                       hdue.header.update('OEXT',hdu,'Original extension number')
                    except Exception as e:
                       msg='SALTPHOT--WARNING:  Could not update image in newfits file for %s ext %i because %s' \
                           % (infile, hdu, e)
                       raise SaltIOError(msg)

                    hduList.append(hdue)
                    j +=1
    
           # close FITS file
           saltio.closefits(struct)
    
        # close the output fits file:
        try:
           # write out the file
           hduList.flush()
           hduList.close()
        except Exception as e:
           raise SaltIOError('Failed to write %s because %s' % (outfits, e))
示例#38
0
def saltxtalk(images,
              outimages,
              outpref,
              xtalkfile=None,
              usedb=False,
              clobber=True,
              logfile='salt.log',
              verbose=True):

    #start logging
    with logging(logfile, debug) as log:

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outfiles = saltio.listparse('Outfile', outimages, outpref, infiles, '')

        # are input and output lists the same length?
        saltio.comparelists(infiles, outfiles, 'Input', 'output')

        # does crosstalk coefficient data exist
        if usedb:
            xtalkfile = xtalkfile.strip()
            xdict = saltio.readxtalkcoeff(xtalkfile)
        else:
            xdict = None

        for img, oimg in zip(infiles, outfiles):

            #open the fits file
            struct = saltio.openfits(img)

            #find the best xcoeff for the image if using the db
            if usedb:
                obsdate = saltkey.get('DATE-OBS', struct[0])
                obsdate = int('%s%s%s' %
                              (obsdate[0:4], obsdate[5:7], obsdate[8:]))
                xkey = np.array(xdict.keys())
                date = xkey[abs(xkey - obsdate).argmin()]
                xcoeff = xdict[date]
            else:
                xcoeff = []

            # identify instrument
            instrume, keyprep, keygain, keybias, keyxtalk, keyslot = saltkey.instrumid(
                struct)

            # has file been prepared already?
            if saltkey.found(keyxtalk, struct[0]):
                message = '%s has already been xtalk corrected' % img
                raise SaltError(message)

            #apply the cross-talk correction
            struct = xtalk(struct, xcoeff, log=log, verbose=verbose)

            # housekeeping keywords
            fname, hist = history(level=1,
                                  wrap=False,
                                  exclude=['images', 'outimages', 'outpref'])
            saltkey.housekeeping(struct[0], 'SXTALK',
                                 'Images have been xtalk corrected', hist)

            # write FITS file
            saltio.writefits(struct, oimg, clobber=clobber)
            saltio.closefits(struct)
示例#39
0
def hducombine(hdu_list, outhdu, ext, method='average', datasec=None, reject=None, mask=False, weight=False, scale=None, statsec=None, blank=0, lthresh=3, hthresh=3):
   """Combine a set of images in imlist 

   """
   #set up i as the extionsion variable as  shortcut
   i=ext
   nimages=len(hdu_list)
   #set all the data arrays
   data_list=[]
   gain=np.zeros(nimages)
   rdnoise=np.zeros(nimages)
   varext=None
   bpmext=None


   #set the datasec in case it is none as the full image
   if datasec is None:
      sh=outhdu[ext].data.shape
      y1,x1=(0,0)
      y2,x2=outhdu[i].data.shape
   else:
      y1,y2,x1,x2=datasec
        
   dshape=outhdu[i].data[y1:y2,x1:x2].shape
   dtype=outhdu[i].data[y1:y2,x1:x2].dtype
   wei_arr=outhdu[i].data[y1:y2,x1:x2]*0.0

   #check for variance frame
   if saltkey.found('VAREXT', outhdu[i]) and weight:
       varext=saltkey.get('VAREXT', outhdu[i])
       var_list=[]
   #check for bad pixel mask
   if saltkey.found('BPMEXT', outhdu[i]) and mask:
       bpmext=saltkey.get('BPMEXT',outhdu[i])
       bpm_list=[]

   #create the lists of arrays and scale the arrays if requests
   mean_scale=0
   data_arr=np.zeros((nimages, dshape[0], dshape[1]), dtype=dtype)
   for j in range(nimages):
       data_arr[j,:,:]=hdu_list[j][i].data[y1:y2,x1:x2]
       #calculate the scale 
       if scale:
          scale_val=CalculateScale(data_arr, scale, statsec)
          mean_scale += scale_val
       else:
          scale_val=1
          mean_scale+=1

       if varext:
           var_list.append(hdu_list[j][varext].data[y1:y2,x1:x2]/scale_val)
       if bpmext:
           bpm_list.append(hdu_list[j][bpmext].data[y1:y2,x1:x2])

       #get the gain and rdnoise
       if reject=='ccdclip':
           if saltkey.found('GAINMULT', hdu_list[j][i]):
               gain[j]=1
           else:
               gain[j]=saltkey.get('GAIN', hdu_list[j][i])
           rdnoise[j]=saltkey.get('RDNOISE', hdu_list[j][i])

   #convert the lists to arrays
   if varext:
       var_arr=np.array(var_list)
       ivar_arr=1.0/var_arr
   else:
       var_arr=None
       ivar_arr=None

   if bpmext:
       bpm_arr=np.array(bpm_list)
   else:
       bpm_arr=None
   
   #reject outliers if set
   bpm_arr=RejectArray(data_arr, reject=reject, var=var_arr, bpm=bpm_arr, \
                       lthresh=lthresh, hthresh=hthresh, gain=gain, rdnoise=rdnoise)

   #calculate the average values
   outdata, outwei=CombineArray(data_arr, method=method, ivar=ivar_arr, bpm=bpm_arr)
   outhdu[i].data[y1:y2,x1:x2]=outdata
   if scale is not None:
       mean_scale = mean_scale/nimages
       outhdu[i].data[y1:y2,x1:x2] *= mean_scale

   #create the combine variance frame
   if varext:
       outhdu[varext].data[y1:y2,x1:x2], tmp_arr=CombineArray(var_arr, method=method, ivar=ivar_arr, bpm=bpm_arr)
       del tmp_arr
       if scale is not None:
           outhdu[varext].data *= mean_scale

   #check to see if any of the pixels have no values and replace with blank
   #if wei_arr.any()==0:
   #    wmask=(wei_arr==0)
   #    outhdu[i].data[wmask]=blank


   #create the combine BPM frames
   if bpmext:
       outhdu[bpmext].data=1.0*(wei_arr==0)

   return outhdu
示例#40
0
def specslit(image, outimage, outpref, exttype='auto', slitfile='', outputslitfile='',
             regprefix='', sections=3, width=25, sigma=2.2, thres=6, order=3, padding=5, yoffset=0,
             inter=False, clobber=True, logfile='salt.log', verbose=True):

    with logging(logfile, debug) as log:

        # check all the input and make sure that all the input needed is provided
        # by the user

        # read the image or image list and check if each in the list exist
        infiles = saltio.argunpack('Input', image)

        # unpack the outfiles
        outfiles = saltio.listparse(
            'Outimages',
            outimage,
            outpref,
            infiles,
            '')

        # from the extraction type, check whether the input file is specified.
        # if the slitfile parameter is specified then use the slit files for
        # the extraction. if the extraction type is auto then use image for the
        # detection and the slit extraction

        if exttype == 'rsmt' or exttype == 'fits' or exttype == 'ascii' or exttype == 'ds9':
            slitfiles = saltio.argunpack('Slitfile', slitfile)
            if len(slitfiles) == 1:
                slitfiles = slitfiles * len(infiles)
            saltio.comparelists(infiles, slitfiles, 'image', 'slitfile')
        elif exttype == 'auto':
            slitfiles = infiles
            log.message(
                'Extraction type is AUTO. Slit detection will be done from image')

        # read in if an optional ascii file is requested
        if len(outputslitfile) > 0:
            outslitfiles = saltio.argunpack('Outslitfiles', outputslitfile)
            saltio.comparelists(
                infiles,
                outslitfiles,
                'image',
                'outputslitfile')
        else:
            outslitfiles = [''] * len(infiles)

        # check if the width and sigma parameters were specified.
        # default is 25 and 2.2
        if width < 10.:
            msg = 'The width parameter needs be a value larger than 10'
            raise SALTSpecError(msg)

        if sigma < 0.0:
            msg = 'Sigma must be greater than zero'
            raise SaltSpecError(msg)

        # check the treshold parameter. this needs to be specified by the user
        if thres <= 0.0:
            msg = 'Threshold must be greater than zero'
            raise SaltSpecError(msg)

        # check to make sure that the sections are greater than the order
        if sections <= order:
            msg = 'Number of sections must be greater than the order for the spline fit'
            raise SaltSpecError(msg)

        # run through each of the images and extract the slits
        for img, oimg, sfile, oslit in zip(
                infiles, outfiles, slitfiles, outslitfiles):
            log.message('Proccessing image %s' % img)

            # open the image
            struct = saltio.openfits(img)
            ylen, xlen = struct[1].data.shape
            xbin, ybin = saltkey.ccdbin(struct[0], img)
            # setup the VARIANCE and BPM frames
            if saltkey.found('VAREXT', struct[1]):
                varext = saltkey.get('VAREXT', struct[1])
                varlist = []
            else:
                varext = None

            # setup the BPM frames
            if saltkey.found('BPMEXT', struct[1]):
                bpmext = saltkey.get('BPMEXT', struct[1])
                bpmlist = []
            else:
                bpmext = None

            # open the slit definition file or identify the slits in the image
            slitmask = None
            ycheck = False
            if exttype == 'rsmt':
                log.message('Using slits from %s' % sfile)
                if yoffset is None:
                   yoffset = 0 
                   ycheck = True
                slitmask = mt.read_slitmask_from_xml(sfile)
                xpos = -0.3066
                ypos = 0.0117
                cx = int(xlen / 2.0)
                cy = int(ylen / 2.0) + ypos / 0.015 / ybin + yoffset
                order, slit_positions = mt.convert_slits_from_mask(
                    slitmask, order=1, xbin=xbin, ybin=ybin, pix_scale=0.1267, cx=cx, cy=cy)
                sections = 1
            elif exttype == 'fits':
                log.message('Using slits from %s' % sfile)
                order, slit_positions = read_slits_from_fits(sfile)
            elif exttype == 'ascii':
                log.message('Using slits from %s' % sfile)
                order, slit_positions = mt.read_slits_from_ascii(sfile)
            elif exttype == 'ds9':
                log.message('Using slits from %s' % sfile)
                order, slit_positions, slitmask = mt.read_slits_from_ds9(
                    sfile, order=order)
                slitmask = None
                sections = 1
            elif exttype == 'auto':
                log.message('Identifying slits in %s' % img)
                # identify the slits in the image
                order, slit_positions = identify_slits(
                    struct[1].data, order, sections, width, sigma, thres)

                # write out the slit identifications if ofile has been supplied
                if oslit:
                    log.message('Writing slit positions to %s' % oslit)
                    mt.write_outputslitfile(slit_positions, oslit, order)

            if ycheck:
               slit_positions, dy = check_ypos(slit_positions, struct[1].data) 
               log.message('Using an offset of {}'.format(dy))

            # extract the slits
            spline_x = mt.divide_image(struct[1].data, sections)
            spline_x = 0.5 * (np.array(spline_x[:-1]) + np.array(spline_x[1:]))
            extracted_spectra, spline_positions = mt.extract_slits(slit_positions,
                                                                   spline_x, struct[1].data, order=order, padding=padding)
            if varext:
                extracted_var, var_positions = mt.extract_slits(slit_positions,
                                                                spline_x, struct[varext].data, order=order, padding=padding)
            if bpmext:
                extracted_bpm, bpm_positions = mt.extract_slits(slit_positions,
                                                                spline_x, struct[bpmext].data, order=order, padding=padding)

            # write out the data to the new array
            # create the new file
            hdulist = fits.HDUList([struct[0]])

            # log the extracted spectra if needed
            log.message('', with_stdout=verbose)

            # setup output ds9 file
            if regprefix:
                regout = open(
                    regprefix +
                    os.path.basename(img).strip('.fits') +
                    '.reg',
                    'w')
                regout.write('# Region file format: DS9 version 4.1\n')
                regout.write('# Filename: %s\n' % img)
                regout.write(
                    'global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\nphysical\n')

            # add each
            imglist = []
            nslits = len(spline_positions)
            for i in range(nslits):
                y1 = spline_positions[i][0].min()
                y2 = spline_positions[i][1].max()
                msg = 'Extracted Spectra %i between %i to %i' % (i + 1, y1, y2)
                # log.message(msg, with_header=False, with_stdout=verbose)
                sdu = fits.ImageHDU(
                    extracted_spectra[i],
                    header=struct[1].header)
                if varext:
                    vdu = fits.ImageHDU(
                        extracted_var[i],
                        header=struct[varext].header)
                    sdu.header['VAREXT'] = i + nslits + 1
                    varlist.append(vdu)
                if bpmext:
                    bdu = fits.ImageHDU(
                        extracted_bpm[i],
                        header=struct[bpmext].header)
                    sdu.header['BPMEXT']= i + 2 * nslits + 1
                    bpmlist.append(bdu)
                imglist.append(sdu)

                # add in some additional keywords
                imglist[i].header['MINY'] = (y1,
                    'Lower Y value in original image')
                imglist[i].header['MAXY'] = (y2,
                    'Upper Y value in original image')
                if regprefix:
                    xsize = struct[1].data.shape[1]
                    xsize = int(0.5 * xsize)
                    rtext = ''
                    if slitmask:
                        # rtext='%s, %8.7f, %8.7f, %3.2f' % (slitmask.slitlets.data[i]['name'], slitmask.slitlets.data[i]['targ_ra'], slitmask.slitlets.data[i]['targ_dec'], slitmask.slitlets.data[i]['slit_width'])
                        pass
                    regout.write('box(%i,%i, %i, %i) #text={%s}\n' % (
                        xsize, 0.5 * (y1 + y2), 2 * xsize, y2 - y1, rtext))

                # add slit information
                if slitmask:
                    imglist[i].header['SLITNAME'] = (slitmask.slitlets.data[i]['name'],
                        'Slit Name')
                    imglist[i].header['SLIT_RA'] = (slitmask.slitlets.data[i]['targ_ra'],
                        'Slit RA')
                    imglist[i].header['SLIT_DEC'] = (slitmask.slitlets.data[i]['targ_dec'],
                        'Slit DEC')
                    imglist[i].header['SLIT'] = (slitmask.slitlets.data[i]['slit_width'],
                        'Slit Width')

            # add to the hdulist
            hdulist += imglist
            if varext:
                hdulist += varlist
            if bpmext:
                hdulist += bpmlist

            # write the slit positions to the header
            # create the binary table HDU that contains the split positions
            tbhdu = mt.slits_HDUtable(slit_positions, order)
            bintable_hdr = tbhdu.header

            # add the extname parameter to the extension
            tbhdu.header['EXTNAME'] = 'BINTABLE'

            # add the extname parameter to the extension
            hdulist[0].header['SLITEXT'] = len(hdulist)
            hdulist.append(tbhdu)

            # add addition header information about the mask
            if slitmask:
                hdulist[0].header['MASKNAME'] = (slitmask.mask_name, 'SlitMask Name')
                hdulist[0].header['MASK_RA'] = (slitmask.center_ra,
                    'SlitMask RA')
                hdulist[0].header['MASK_DEC'] = ( slitmask.center_dec,
                    'SlitMask DEC')
                hdulist[0].header['MASK_PA'] = ( slitmask.position_angle,
                    'SlitMask Position Angle')

            # write out the image
            saltio.writefits(hdulist, oimg, clobber)
示例#41
0
def gain(struct,
         mult=True,
         usedb=False,
         dblist=None,
         ampccd=2,
         log=None,
         verbose=True):
    """gain processes a image hduList and gain corrects each amplifier.  It can
      either use gain settings in the header or those supplied in a config file
      which would be suppleid in the dblist (see helpfile for structure of the
      config file).  If variance frames exist, it will update those for changes 
      in the header value as well.   

      In the end, it will update the gain with a value of one signfing the data
      has been transformed into e- from ADU

      The program will look for the non-linear gain settings which are given by:
      e  = GAIN*(1 + GAIN1*E-6*ADU)*ADU

      mult--if true, multiple the gains
      usedb--use the values in the dblist, if false use the header values
      dblist--values for the gain and readnoise from the 
      ampccd--number of amplifiers per ccd

      dblist should have the following lists:  speed, rate, gain, noise, bias, amp
   """
    #get the infile name
    infile = saltkey.getimagename(struct[0])

    #how many science extensions
    nsciext = saltkey.get('NSCIEXT', struct[0])
    #how many data extensions
    nextend = saltkey.get('NSCIEXT', struct[0])

    # how many amplifiers?
    amplifiers = ampccd * saltkey.get('NCCDS', struct[0])

    #read the gain and rospeed for the image
    gainset = saltkey.get('GAINSET', struct[0])
    rospeed = saltkey.get('ROSPEED', struct[0])

    #loop through each amplifier and gain correct it
    if log:
        message = '%28s %6s %5s %3s %5s %5s' \
            % ('HDU','GAIN','SPEED','AMP','GAIN','NOISE')
        log.message('\n      ---------------------------------------------------', \
                    with_header=False, with_stdout=verbose)
        log.message(message, with_header=False, with_stdout=verbose)
        log.message('      ---------------------------------------------------', \
                    with_header=False, with_stdout=verbose)
    for i in range(nsciext):
        hdu = i + 1
        amp = i % amplifiers + 1
        #get the gain and rdnoise values for the array
        if usedb:
            gain, rdnoise = get_values(dblist, gainset, rospeed, amp)
            gain1 = 0
        else:
            gain = saltkey.get('GAIN', struct[hdu])
            rdnoise = saltkey.get('RDNOISE', struct[hdu])
            try:
                gain1 = saltkey.get('GAIN1', struct[hdu])
            except:
                gain1 = 0

        if mult:
            #correct the gain
            gainmult = 1
            try:
                data = struct[hdu].data
                struct[hdu].data = gain * data + gain1 * data**2
            except Exception, e:
                msg = 'Cannot gain correct %s[%i] because %s' % (infile, hdu,
                                                                 e)
                raise SaltError(msg)

            #correct the variance frame
            if saltkey.found('VAREXT', struct[hdu]):
                vhdu = saltkey.get('VAREXT', struct[hdu])
                try:
                    vdata = struct[vhdu].data
                    struct[vhdu].data = vdata * gain * (
                        1 + 2 * gain1 * 1e-6 * data)
                except Exception, e:
                    msg = 'Cannot update the variance frame in %s[%i] because %s' % (
                        infile, vhdu, e)
                    raise SaltError(msg)
示例#42
0
def hducombine(hdu_list,
               outhdu,
               ext,
               method='average',
               datasec=None,
               reject=None,
               mask=False,
               weight=False,
               scale=None,
               statsec=None,
               blank=0,
               lthresh=3,
               hthresh=3):
    """Combine a set of images in imlist 

   """
    #set up i as the extionsion variable as  shortcut
    i = ext
    nimages = len(hdu_list)
    #set all the data arrays
    data_list = []
    gain = np.zeros(nimages)
    rdnoise = np.zeros(nimages)
    varext = None
    bpmext = None

    #set the datasec in case it is none as the full image
    if datasec is None:
        sh = outhdu[ext].data.shape
        y1, x1 = (0, 0)
        y2, x2 = outhdu[i].data.shape
    else:
        y1, y2, x1, x2 = datasec

    dshape = outhdu[i].data[y1:y2, x1:x2].shape
    dtype = outhdu[i].data[y1:y2, x1:x2].dtype
    wei_arr = outhdu[i].data[y1:y2, x1:x2] * 0.0

    #check for variance frame
    if saltkey.found('VAREXT', outhdu[i]) and weight:
        varext = saltkey.get('VAREXT', outhdu[i])
        var_list = []
    #check for bad pixel mask
    if saltkey.found('BPMEXT', outhdu[i]) and mask:
        bpmext = saltkey.get('BPMEXT', outhdu[i])
        bpm_list = []

    #create the lists of arrays and scale the arrays if requests
    mean_scale = 0
    data_arr = np.zeros((nimages, dshape[0], dshape[1]), dtype=dtype)
    for j in range(nimages):
        data_arr[j, :, :] = hdu_list[j][i].data[y1:y2, x1:x2]
        #calculate the scale
        if scale:
            scale_val = CalculateScale(data_arr, scale, statsec)
            mean_scale += scale_val
        else:
            scale_val = 1
            mean_scale += 1

        if varext:
            var_list.append(hdu_list[j][varext].data[y1:y2, x1:x2] / scale_val)
        if bpmext:
            bpm_list.append(hdu_list[j][bpmext].data[y1:y2, x1:x2])

        #get the gain and rdnoise
        if reject == 'ccdclip':
            if saltkey.found('GAINMULT', hdu_list[j][i]):
                gain[j] = 1
            else:
                gain[j] = saltkey.get('GAIN', hdu_list[j][i])
            rdnoise[j] = saltkey.get('RDNOISE', hdu_list[j][i])

    #convert the lists to arrays
    if varext:
        var_arr = np.array(var_list)
        ivar_arr = 1.0 / var_arr
    else:
        var_arr = None
        ivar_arr = None

    if bpmext:
        bpm_arr = np.array(bpm_list)
    else:
        bpm_arr = None

    #reject outliers if set
    bpm_arr=RejectArray(data_arr, reject=reject, var=var_arr, bpm=bpm_arr, \
                        lthresh=lthresh, hthresh=hthresh, gain=gain, rdnoise=rdnoise)

    #calculate the average values
    outdata, outwei = CombineArray(data_arr,
                                   method=method,
                                   ivar=ivar_arr,
                                   bpm=bpm_arr)
    outhdu[i].data[y1:y2, x1:x2] = outdata
    if scale is not None:
        mean_scale = mean_scale / nimages
        outhdu[i].data[y1:y2, x1:x2] *= mean_scale

    #create the combine variance frame
    if varext:
        outhdu[varext].data[y1:y2,
                            x1:x2], tmp_arr = CombineArray(var_arr,
                                                           method=method,
                                                           ivar=ivar_arr,
                                                           bpm=bpm_arr)
        del tmp_arr
        if scale is not None:
            outhdu[varext].data *= mean_scale

    #check to see if any of the pixels have no values and replace with blank
    #if wei_arr.any()==0:
    #    wmask=(wei_arr==0)
    #    outhdu[i].data[wmask]=blank

    #create the combine BPM frames
    if bpmext:
        outhdu[bpmext].data = 1.0 * (wei_arr == 0)

    return outhdu
示例#43
0
def xtalk(struct, xcoeff, namps=2, log=None, verbose=False):
    """xtalk cross-talk corrects the amplifies.  It takes 

    """
    infile = saltkey.getimagename(struct[0])

    # how many extensions?
    nsciext = saltkey.get('NSCIEXT', struct[0])
    nextend = saltkey.get('NEXTEND', struct[0])
    nccd = saltkey.get('NCCDS', struct[0])

    # how many amplifiers?--this is hard wired
    amplifiers = namps * nccd

    #setup the log
    if log:
        message = '%28s %23s' % ('HDU', 'Correction')
        message += '\n      ----------------------------------------------'
        log.message(message, with_header=False, with_stdout=verbose)

    #Loop over the image extensions and subtract one
    #set from the other--still hardwired at 2
    for i in range(1, nsciext, 2):
        if struct[i].name == 'SCI' and struct[i + 1].name == 'SCI':
            #set up the first amplifier
            dat1 = struct[i].data.copy()
            ext1 = saltkey.get('EXTVER', struct[i])
            if xcoeff:
                j = (ext1 - 1) % amplifiers
                xc1 = float(xcoeff[j])
            else:
                xc1 = 1.0 / saltkey.get('XTALK', struct[i])

            #set up the second amplifier
            dat2 = struct[i + 1].data.copy()
            ext2 = saltkey.get('EXTVER', struct[i + 1])
            if xcoeff:
                j = (ext2 - 1) % amplifiers
                xc2 = float(xcoeff[j])
            else:
                xc2 = 1.0 / saltkey.get('XTALK', struct[i + 1])

            #subtract one from the other
            struct[i].data = struct[i].data - xc2 * dat2[:, ::-1]
            struct[i + 1].data = struct[i + 1].data - xc1 * dat1[:, ::-1]

            #correct the variance frame
            if saltkey.found('VAREXT', struct[i]):
                vhdu1 = saltkey.get('VAREXT', struct[i])
                vhdu2 = saltkey.get('VAREXT', struct[i + 1])
                try:
                    struct[vhdu1].data += xc2 * struct[vhdu2].data
                    struct[vhdu2].data += xc1 * struct[vhdu1].data
                except Exception, e:
                    msg = 'Cannot update the variance frame in %s[%i] because %s' % (
                        infile, vhdu1, e)
                    raise SaltError(msg)

            #print the message
            if log:
                message = '%25s[%1d]  Amp%1d - Amp%1d * %8.6f' % \
                         (infile, i, ext1, ext2, xc2)
                log.message(message, with_header=False, with_stdout=verbose)
                message = '%25s[%1d]  Amp%1d - Amp%1d * %8.6f' % \
                         (infile, i+1, ext2, ext1, xc1)
                log.message(message, with_header=False, with_stdout=verbose)
示例#44
0
def make_mosaic(struct,
                gap,
                xshift,
                yshift,
                rotation,
                interp_type='linear',
                boundary='constant',
                constant=0,
                geotran=True,
                fill=False,
                cleanup=True,
                log=None,
                verbose=False):
    """Given a SALT image struct, combine each of the individual amplifiers and
        apply the geometric CCD transformations to the image
    """

    # get the name of the file
    infile = saltkey.getimagename(struct[0], base=True)
    outpath = './'

    # identify instrument
    instrume, keyprep, keygain, keybias, keyxtalk, keyslot = \
        saltkey.instrumid(struct)

    # how many amplifiers?
    nsciext = saltkey.get('NSCIEXT', struct[0])
    nextend = saltkey.get('NEXTEND', struct[0])
    nccds = saltkey.get('NCCDS', struct[0])
    amplifiers = nccds * 2

    if nextend > nsciext:
        varframe = True
    else:
        varframe = False

    # CCD geometry coefficients
    if (instrume == 'RSS' or instrume == 'PFIS'):
        xsh = [0., xshift[0], 0., xshift[1]]
        ysh = [0., yshift[0], 0., yshift[1]]
        rot = [0., rotation[0], 0., rotation[1]]
    elif instrume == 'SALTICAM':
        xsh = [0., xshift[0], 0.]
        ysh = [0., yshift[0], 0.]
        rot = [0., rotation[0], 0]

    # how many extensions?
    nextend = saltkey.get('NEXTEND', struct[0])

    # CCD on-chip binning
    xbin, ybin = saltkey.ccdbin(struct[0])

    # create temporary primary extension
    outstruct = []
    outstruct.append(struct[0])
    # define temporary FITS file store tiled CCDs

    tilefile = saltio.tmpfile(outpath)
    tilefile += 'tile.fits'
    if varframe:
        tilehdu = [None] * (3 * int(nsciext / 2) + 1)
    else:
        tilehdu = [None] * int(nsciext / 2 + 1)
    tilehdu[0] = fits.PrimaryHDU()
    #tilehdu[0].header = struct[0].header

    if log:
        log.message('', with_stdout=verbose)

    # iterate over amplifiers, stich them to produce file of CCD images
    for i in range(int(nsciext / 2)):
        hdu = i * 2 + 1
        # amplifier = hdu%amplifiers
        # if (amplifier == 0): amplifier = amplifiers

        # read DATASEC keywords
        datasec1 = saltkey.get('DATASEC', struct[hdu])
        datasec2 = saltkey.get('DATASEC', struct[hdu + 1])
        xdsec1, ydsec1 = saltstring.secsplit(datasec1)
        xdsec2, ydsec2 = saltstring.secsplit(datasec2)

        # read images
        imdata1 = saltio.readimage(struct, hdu)
        imdata2 = saltio.readimage(struct, hdu + 1)

        # tile 2n amplifiers to yield n CCD images
        outdata = numpy.zeros(
            (int(ydsec1[1] + abs(ysh[i + 1] / ybin)),
             int(xdsec1[1] + xdsec2[1] + abs(xsh[i + 1] / xbin))),
            numpy.float32)

        # set up the variance frame
        if varframe:
            vardata = outdata.copy()
            vdata1 = saltio.readimage(struct, struct[hdu].header['VAREXT'])
            vdata2 = saltio.readimage(struct, struct[hdu + 1].header['VAREXT'])

            bpmdata = outdata.copy()
            bdata1 = saltio.readimage(struct, struct[hdu].header['BPMEXT'])
            bdata2 = saltio.readimage(struct, struct[hdu + 1].header['BPMEXT'])

        x1 = xdsec1[0] - 1
        if x1 != 0:
            msg = 'The data in %s have not been trimmed prior to mosaicking.' \
                  % infile
            log.error(msg)
        if xsh[i + 1] < 0:
            x1 += int(abs(xsh[i + 1] / xbin))
        x2 = x1 + xdsec1[1]
        y1 = ydsec1[0] - 1
        if ysh[i + 1] < 0:
            y1 += int(abs(ysh[i + 1] / ybin))
        y2 = y1 + ydsec1[1]
        outdata[y1:y2, x1:x2] =\
            imdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        if varframe:
            vardata[y1:y2, x1:x2] =\
                vdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]
            bpmdata[y1:y2, x1:x2] =\
                bdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        x1 = x2
        x2 = x1 + xdsec2[1]
        y1 = ydsec2[0] - 1
        if ysh[i + 1] < 0:
            y1 += abs(ysh[i + 1] / ybin)
        y2 = y1 + ydsec2[1]
        outdata[y1:y2, x1:x2] =\
            imdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        if varframe:
            vardata[y1:y2, x1:x2] =\
                vdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]
            bpmdata[y1:y2, x1:x2] =\
                bdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        # size of new image
        naxis1 = str(xdsec1[1] + xdsec2[1])
        naxis2 = str(ydsec1[1])

        # add image and keywords to HDU list
        tilehdu[i + 1] = fits.ImageHDU(outdata)
        tilehdu[i + 1].header = struct[hdu].header
        #tilehdu[
        #    i + 1].header['DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'

        if varframe:
            vext = i + 1 + int(nsciext / 2.)
            tilehdu[vext] = fits.ImageHDU(vardata)
            #tilehdu[vext].header = struct[struct[hdu].header['VAREXT']].header
            #tilehdu[vext].header[
            #    'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'

            bext = i + 1 + 2 * int(nsciext / 2.)
            tilehdu[bext] = fits.ImageHDU(bpmdata)
            #tilehdu[bext].header = struct[struct[hdu].header['BPMEXT']].header
            #tilehdu[bext].header[
            #    'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'

        # image tile log message #1
        if log:
            message = os.path.basename(infile) + '[' + str(hdu) + ']['
            message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','
            message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '
            message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['
            message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','
            message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + ']'
            log.message(message, with_stdout=verbose, with_header=False)
            message = os.path.basename(infile) + '[' + str(hdu + 1) + ']['
            message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','
            message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '
            message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['
            message += str(xdsec1[1] + 1) + ':' + \
                str(xdsec1[1] + xdsec2[1]) + ','
            message += str(ydsec2[0]) + ':' + str(ydsec2[1]) + ']'
            log.message(message, with_stdout=verbose, with_header=False)

    # write temporary file of tiled CCDs
    hdulist = fits.HDUList(tilehdu)
    hdulist.writeto(tilefile)

    # iterate over CCDs, transform and rotate images
    yrot = [None] * 4
    xrot = [None] * 4

    tranfile = [' ']
    tranhdu = [0]
    if varframe:
        tranfile = [''] * (3 * int(nsciext / 2) + 1)
        tranhdu = [0] * (3 * int(nsciext / 2) + 1)
    else:
        tranfile = [''] * int(nsciext / 2 + 1)
        tranhdu = [0] * int(nsciext / 2 + 1)

    # this is hardwired for SALT where the second CCD is considered the
    # fiducial
    for hdu in range(1, int(nsciext / 2 + 1)):
        tranfile[hdu] = saltio.tmpfile(outpath)
        tranfile[hdu] += 'tran.fits'
        if varframe:
            tranfile[hdu + nccds] = saltio.tmpfile(outpath) + 'tran.fits'
            tranfile[hdu + 2 * nccds] = saltio.tmpfile(outpath) + 'tran.fits'

        ccd = hdu % nccds
        if (ccd == 0):
            ccd = nccds

        # correct rotation for CCD binning
        yrot[ccd] = rot[ccd] * ybin / xbin
        xrot[ccd] = rot[ccd] * xbin / ybin
        dxshift = xbin * int(float(int(gap) / xbin) + 0.5) - gap

        # transformation using geotran IRAF task
        # if (ccd == 1):
        if (ccd != 2):

            if geotran:
                message = '\nSALTMOSAIC -- geotran ' + tilefile + \
                    '[' + str(ccd) + '] ' + tranfile[hdu]
                message += ' \"\" \"\" xshift=' + \
                    str((xsh[ccd] + (2 - ccd) * dxshift) / xbin) + ' '
                message += 'yshift=' + \
                    str(ysh[ccd] / ybin) + ' xrotation=' + str(xrot[ccd]) + ' '
                message += 'yrotation=' + \
                    str(yrot[ccd]) + ' xmag=1 ymag=1 xmin=\'INDEF\''
                message += 'xmax=\'INDEF\' ymin=\'INDEF\' ymax=\'INDEF\' '
                message += 'ncols=\'INDEF\' '
                message += 'nlines=\'INDEF\' verbose=\'no\' '
                message += 'fluxconserve=\'yes\' nxblock=2048 '
                message += 'nyblock=2048 interpolant=\'' + \
                    interp_type + '\' boundary=\'constant\' constant=0'
                log.message(message, with_stdout=verbose)

                yd, xd = tilehdu[ccd].data.shape
                ncols = 'INDEF'  # ncols=xd+abs(xsh[ccd]/xbin)
                nlines = 'INDEF'  # nlines=yd+abs(ysh[ccd]/ybin)
                geo_xshift = xsh[ccd] + (2 - ccd) * dxshift / xbin
                geo_yshift = ysh[ccd] / ybin
                iraf.images.immatch.geotran(tilefile + "[" + str(ccd) + "]",
                                            tranfile[hdu],
                                            "",
                                            "",
                                            xshift=geo_xshift,
                                            yshift=geo_yshift,
                                            xrotation=xrot[ccd],
                                            yrotation=yrot[ccd],
                                            xmag=1,
                                            ymag=1,
                                            xmin='INDEF',
                                            xmax='INDEF',
                                            ymin='INDEF',
                                            ymax='INDEF',
                                            ncols=ncols,
                                            nlines=nlines,
                                            verbose='no',
                                            fluxconserve='yes',
                                            nxblock=2048,
                                            nyblock=2048,
                                            interpolant="linear",
                                            boundary="constant",
                                            constant=0)
                if varframe:
                    var_infile = tilefile + "[" + str(ccd + nccds) + "]"
                    iraf.images.immatch.geotran(var_infile,
                                                tranfile[hdu + nccds],
                                                "",
                                                "",
                                                xshift=geo_xshift,
                                                yshift=geo_yshift,
                                                xrotation=xrot[ccd],
                                                yrotation=yrot[ccd],
                                                xmag=1,
                                                ymag=1,
                                                xmin='INDEF',
                                                xmax='INDEF',
                                                ymin='INDEF',
                                                ymax='INDEF',
                                                ncols=ncols,
                                                nlines=nlines,
                                                verbose='no',
                                                fluxconserve='yes',
                                                nxblock=2048,
                                                nyblock=2048,
                                                interpolant="linear",
                                                boundary="constant",
                                                constant=0)
                    var2_infile = tilefile + "[" + str(ccd + 2 * nccds) + "]"
                    iraf.images.immatch.geotran(var2_infile,
                                                tranfile[hdu + 2 * nccds],
                                                "",
                                                "",
                                                xshift=geo_xshift,
                                                yshift=geo_yshift,
                                                xrotation=xrot[ccd],
                                                yrotation=yrot[ccd],
                                                xmag=1,
                                                ymag=1,
                                                xmin='INDEF',
                                                xmax='INDEF',
                                                ymin='INDEF',
                                                ymax='INDEF',
                                                ncols=ncols,
                                                nlines=nlines,
                                                verbose='no',
                                                fluxconserve='yes',
                                                nxblock=2048,
                                                nyblock=2048,
                                                interpolant="linear",
                                                boundary="constant",
                                                constant=0)

                # open the file and copy the data to tranhdu
                tstruct = fits.open(tranfile[hdu])
                tranhdu[hdu] = tstruct[0].data
                tstruct.close()
                if varframe:
                    tranhdu[hdu + nccds] = fits.open(tranfile[hdu +
                                                              nccds])[0].data
                    tranhdu[hdu + 2 * nccds] = fits.open(
                        tranfile[hdu + 2 * nccds])[0].data

            else:
                log.message("Transform CCD #%i using dx=%s, dy=%s, rot=%s" %
                            (ccd, xsh[ccd] / 2.0, ysh[ccd] / 2.0, xrot[ccd]),
                            with_stdout=verbose,
                            with_header=False)
                tranhdu[hdu] = geometric_transform(
                    tilehdu[ccd].data,
                    tran_func,
                    prefilter=False,
                    order=1,
                    extra_arguments=(xsh[ccd] / 2, ysh[ccd] / 2, 1, 1,
                                     xrot[ccd], yrot[ccd]))
                tstruct = fits.PrimaryHDU(tranhdu[hdu])
                tstruct.writeto(tranfile[hdu])
                if varframe:
                    tranhdu[hdu + nccds] = geometric_transform(
                        tilehdu[hdu + 3].data,
                        tran_func,
                        prefilter=False,
                        order=1,
                        extra_arguments=(xsh[ccd] / 2, ysh[ccd] / 2, 1, 1,
                                         xrot[ccd], yrot[ccd]))
                    tranhdu[hdu + 2 * nccds] = geometric_transform(
                        tilehdu[hdu + 6].data,
                        tran_func,
                        prefilter=False,
                        order=1,
                        extra_arguments=(xsh[ccd] / 2, ysh[ccd] / 2, 1, 1,
                                         xrot[ccd], yrot[ccd]))

        else:
            log.message("Transform CCD #%i using dx=%s, dy=%s, rot=%s" %
                        (ccd, 0, 0, 0),
                        with_stdout=verbose,
                        with_header=False)
            tranhdu[hdu] = tilehdu[ccd].data
            if varframe:
                tranhdu[hdu + nccds] = tilehdu[ccd + nccds].data
                tranhdu[hdu + 2 * nccds] = tilehdu[ccd + 2 * nccds].data

    # open outfile
    if varframe:
        outlist = 4 * [None]
    else:
        outlist = 2 * [None]

    #outlist[0] = struct[0].copy()
    outlist[0] = fits.PrimaryHDU()
    outlist[0].header = struct[0].header

    naxis1 = int(gap / xbin * (nccds - 1))
    naxis2 = 0
    for i in range(1, nccds + 1):
        yw, xw = tranhdu[i].shape
        naxis1 += xw + int(abs(xsh[ccd] / xbin)) + 1
        naxis2 = max(naxis2, yw)
    outdata = numpy.zeros((naxis2, naxis1), numpy.float32)
    outdata.shape = naxis2, naxis1
    if varframe:
        vardata = outdata * 0
        bpmdata = outdata * 0 + 1

    # iterate over CCDs, stich them to produce a full image
    hdu = 0
    totxshift = 0
    for hdu in range(1, nccds + 1):

        # read DATASEC keywords
        ydsec, xdsec = tranhdu[hdu].shape

        # define size and shape of final image
        # tile CCDs to yield mosaiced image
        x1 = int((hdu - 1) * (xdsec + gap / xbin)) + int(totxshift)
        x2 = xdsec + x1
        y1 = int(0)
        y2 = int(ydsec)
        outdata[y1:y2, x1:x2] = tranhdu[hdu]
        totxshift += int(abs(xsh[hdu] / xbin)) + 1
        if varframe:
            vardata[y1:y2, x1:x2] = tranhdu[hdu + nccds]
            bpmdata[y1:y2, x1:x2] = tranhdu[hdu + 2 * nccds]

    # make sure to cover up all the gaps include bad areas
    if varframe:
        baddata = (outdata == 0)
        baddata = nd.maximum_filter(baddata, size=3)
        bpmdata[baddata] = 1

    # fill in the gaps if requested
    if fill:
        if varframe:
            outdata = fill_gaps(outdata, 0)
        else:
            outdata = fill_gaps(outdata, 0)

    # add to the file
    outlist[1] = fits.ImageHDU(outdata)
    if varframe:
        outlist[2] = fits.ImageHDU(vardata, name='VAR')
        outlist[3] = fits.ImageHDU(bpmdata, name='BPM')

    # create the image structure
    outstruct = fits.HDUList(outlist)

    # update the head informaation
    # housekeeping keywords
    saltkey.put('NEXTEND', 2, outstruct[0])
    saltkey.new('EXTNAME', 'SCI', 'Extension name', outstruct[1])
    saltkey.new('EXTVER', 1, 'Extension number', outstruct[1])
    if varframe:
        saltkey.new('VAREXT', 2, 'Variance frame extension', outstruct[1])
        saltkey.new('BPMEXT', 3, 'BPM Extension', outstruct[1])

    try:
        saltkey.copy(struct[1], outstruct[1], 'CCDSUM')
    except:
        pass

    # Add keywords associated with geometry
    saltkey.new('SGEOMGAP', gap, 'SALT Chip Gap', outstruct[0])
    c1str = '{:3.2f} {:3.2f} {:3.4f}'.format(xshift[0], yshift[0], rotation[0])
    saltkey.new('SGEOM1', c1str, 'SALT Chip 1 Transform', outstruct[0])
    c2str = '{:3.2f} {:3.2f} {:3.4f}'.format(xshift[1], yshift[1], rotation[1])
    saltkey.new('SGEOM2', c2str, 'SALT Chip 2 Transform', outstruct[0])

    # WCS keywords
    saltkey.new('CRPIX1', 0, 'WCS: X reference pixel', outstruct[1])
    saltkey.new('CRPIX2', 0, 'WCS: Y reference pixel', outstruct[1])
    saltkey.new('CRVAL1', float(xbin), 'WCS: X reference coordinate value',
                outstruct[1])
    saltkey.new('CRVAL2', float(ybin), 'WCS: Y reference coordinate value',
                outstruct[1])
    saltkey.new('CDELT1', float(xbin), 'WCS: X pixel size', outstruct[1])
    saltkey.new('CDELT2', float(ybin), 'WCS: Y pixel size', outstruct[1])
    saltkey.new('CTYPE1', 'pixel', 'X type', outstruct[1])
    saltkey.new('CTYPE2', 'pixel', 'Y type', outstruct[1])

    # cleanup temporary files
    if cleanup:
        for tfile in tranfile:
            if os.path.isfile(tfile):
                saltio.delete(tfile)
        if os.path.isfile(tilefile):
            status = saltio.delete(tilefile)

    # return the file
    return outstruct
示例#45
0
def bias(struct,
         subover=True,
         trim=True,
         subbias=False,
         bstruct=None,
         median=False,
         function='polynomial',
         order=3,
         rej_lo=3,
         rej_hi=3,
         niter=10,
         plotover=False,
         log=None,
         verbose=True):
    """Bias subtracts the bias levels from a frame.  It will fit and subtract the overscan
      region, trim the images, and subtract a master bias if required.

      struct--image structure 
      subover--subtract the overscan region
      trim--trim the image
      subbias--subtract master bias
      bstruct--master bias image structure
      median--use the median instead of mean in image statistics
      function--form to fit to the overscan region
      order--order for the function
      rej_lo--sigma  of low points to reject in the fit
      rej_hi--sigma of high points to reject in the fit
      niter--number of iterations
      log--saltio log for recording information
      verbose--whether to print to stdout 
   """
    infile = saltkey.getimagename(struct[0])

    # how many extensions?
    nsciext = saltkey.get('NSCIEXT', struct[0])
    nextend = saltkey.get('NEXTEND', struct[0])
    nccd = saltkey.get('NCCDS', struct[0])

    # how many amplifiers?--this is hard wired
    amplifiers = 2 * nccd

    #log the process
    if subover and log:
        message = '%28s %7s %5s %4s %6s' % \
            ('HDU','Overscan','Order','RMS','Niter')
        log.message(
            '\n     --------------------------------------------------',
            with_header=False,
            with_stdout=verbose)
        log.message(message, with_header=False, with_stdout=verbose)
        log.message('     --------------------------------------------------',
                    with_header=False,
                    with_stdout=verbose)

    if (plotover):
        plt.figure(1)
        plt.axes([0.1, 0.1, 0.8, 0.8])
        plt.xlabel('CCD Column')
        plt.ylabel('Pixel Counts (e-)')
        plt.ion()

    #loop through the extensions and subtract the bias
    for i in range(1, nsciext + 1):
        if struct[i].name == 'SCI':

            #get the bias section
            biassec = saltkey.get('BIASSEC', struct[i])
            y1, y2, x1, x2 = saltio.getSection(biassec, iraf_format=True)
            #get the data section
            datasec = saltkey.get('DATASEC', struct[i])
            dy1, dy2, dx1, dx2 = saltio.getSection(datasec, iraf_format=True)

            #setup the overscan region
            if subover:
                yarr = np.arange(y1, y2, dtype=float)
                data = struct[i].data
                odata = struct[i].data[y1:y2, x1:x2]
                if median:
                    odata = np.median((struct[i].data[y1:y2, x1:x2]), axis=1)
                    olevel = np.median((struct[i].data[y1:y2, x1:x2]))
                    saltkey.new('OVERSCAN', '%f' % (olevel),
                                'Overscan median value', struct[i])
                else:
                    odata = np.mean((struct[i].data[y1:y2, x1:x2]), axis=1)
                    olevel = np.mean((struct[i].data[y1:y2, x1:x2]))
                    saltkey.new('OVERSCAN', '%f' % (olevel),
                                'Overscan mean value', struct[i])

                #fit the overscan region
                ifit=saltfit.interfit(yarr, odata, function=function, \
                                      order=order, thresh=rej_hi, niter=niter)
                try:
                    ifit.interfit()
                    coeffs = ifit.coef
                    ofit = ifit(yarr)
                    omean, omed, osigma = saltstat.iterstat((odata - ofit),
                                                            sig=3,
                                                            niter=5)
                except ValueError:
                    #catch the error if it is a zero array
                    ofit = np.array(yarr) * 0.0
                    osigma = 0.0
                except TypeError:
                    #catch the error if it is a zero array
                    ofit = np.array(yarr) * 0.0
                    osigma = 0.0

                #if it hasn't been already, convert image to
                #double format
                struct[i].data = 1.0 * struct[i].data
                try:
                    struct[i].header.remove('BZERO')
                    struct[i].header.remove('BSCALE')
                except:
                    pass

                #subtract the overscan region
                for j in range(len(struct[i].data[0])):
                    struct[i].data[y1:y2, j] -= ofit

                #report the information
                if log:
                    message = '%25s[%1d] %8.2f %3d %7.2f %3d' % \
                        (infile, i, olevel, order, osigma, niter)
                    log.message(message,
                                with_stdout=verbose,
                                with_header=False)

                #add the statistics to the image header
                saltkey.new('OVERRMS', '%f' % (osigma), 'Overscan RMS value',
                            struct[i])

                #update the variance frame
                if saltkey.found('VAREXT', struct[i]):
                    vhdu = saltkey.get('VAREXT', struct[i])
                    try:
                        vdata = struct[vhdu].data
                        #The bias level should not be included in the noise from the signal
                        for j in range(len(struct[i].data[0])):
                            vdata[y1:y2, j] -= ofit
                        #add a bit to make sure that the minimum error is the rednoise
                        rdnoise = saltkey.get('RDNOISE', struct[i])
                        vdata[vdata < rdnoise**2] = rdnoise**2
                        struct[vhdu].data = vdata + osigma**2

                    except Exception, e:
                        msg = 'Cannot update the variance frame in %s[%i] because %s' % (
                            infile, vhdu, e)
                        raise SaltError(msg)

                #plot the overscan region
                if plotover:
                    plt.plot(yarr, odata)
                    plt.plot(yarr, ofit)

            #trim the data and update the headers
            if trim:
                struct[i].data = struct[i].data[dy1:dy2, dx1:dx2]
                datasec = '[1:' + str(dx2 - dx1) + ',1:' + str(dy2 - dy1) + ']'
                saltkey.put('DATASEC', datasec, struct[i])

                #update the variance frame
                if saltkey.found('VAREXT', struct[i]):
                    vhdu = saltkey.get('VAREXT', struct[i])
                    struct[vhdu].data = struct[vhdu].data[dy1:dy2, dx1:dx2]
                    datasec = '[1:' + str(dx2 - dx1) + ',1:' + str(dy2 -
                                                                   dy1) + ']'
                    saltkey.put('DATASEC', datasec, struct[vhdu])
                #update the BPM frame
                if saltkey.found('BPMEXT', struct[i]):
                    bhdu = saltkey.get('BPMEXT', struct[i])
                    struct[bhdu].data = struct[bhdu].data[dy1:dy2, dx1:dx2]
                    datasec = '[1:' + str(dx2 - dx1) + ',1:' + str(dy2 -
                                                                   dy1) + ']'
                    saltkey.put('DATASEC', datasec, struct[bhdu])

            #subtract the master bias if necessary
            if subbias and bstruct:
                struct[i].data -= bstruct[i].data

                #update the variance frame
                if saltkey.found('VAREXT', struct[i]):
                    vhdu = saltkey.get('VAREXT', struct[i])
                    try:
                        vdata = struct[vhdu].data
                        struct[vhdu].data = vdata + bstruct[vhdu].data
                    except Exception, e:
                        msg = 'Cannot update the variance frame in %s[%i] because %s' % (
                            infile, vhdu, e)
                        raise SaltError(msg)
示例#46
0
def slotmerge(images, outimages, outpref, geomfile, clobber, logfile, verbose):

    with logging(logfile, debug) as log:
        # are the arguments defined
        saltsafeio.argdefined('images', images)
        saltsafeio.argdefined('geomfile', geomfile)
        saltsafeio.argdefined('logfile', logfile)

        # if the input file is a list, does it exist?
        if images[0] == '@':
            saltsafeio.listexists('Input', images)

        # parse list of input files
        infiles = saltsafeio.listparse('Raw image', images, '', '', '')

        # check input files exist
        saltsafeio.filesexist(infiles, '', 'r')

        # load output name list: @list, * and comma separated
        outimages = outimages.strip()
        outpref = outpref.strip()
        if len(outpref) == 0 and len(outimages) == 0:
            raise SaltIOError('Output file(s) not specified')

        # test output @filelist exists
        if len(outimages) > 0 and outimages[0] == '@':
            saltsafeio.listexists('Output', outimages)

        # parse list of output files
        outfiles = saltsafeio.listparse('Output image', outimages, outpref,
                                        infiles, '')

        # are input and output lists the same length?
        saltsafeio.comparelists(infiles, outfiles, 'Input', 'output')

        # do the output files already exist?
        if not clobber:
            saltsafeio.filesexist(outfiles, '', 'w')

        # does CCD geometry definition file exist
        geomfilefile = geomfile.strip()
        saltsafeio.fileexists(geomfile)

        # read geometry definition file
        gap = 0
        xshift = [0, 0]
        yshift = [0, 0]
        rotation = [0, 0]

        gap, xshift, yshift, rotation = saltsafeio.readccdgeom(geomfile)
        for ro in rotation:
            if ro != 0:
                log.warning('SLOTMERGE currently ignores CCD rotation')

        # Begin processes each file
        for infile, outfile in zip(infiles, outfiles):
            # determine the name for the output file
            outpath = outfile.rstrip(os.path.basename(outfile))
            if (len(outpath) == 0):
                outpath = '.'

            # open each raw image
            struct = saltsafeio.openfits(infile)

            # identify instrument
            instrume, keyprep, keygain, keybias, keyxtalk, keyslot = saltsafekey.instrumid(
                struct, infile)

            # how many amplifiers?
            nccds = saltsafekey.get('NCCDS', struct[0], infile)
            amplifiers = nccds * 2
            #if (nccds != 2):
            #    raise SaltError('Can not currently handle more than two CCDs')

            # CCD geometry coefficients
            if instrume == 'RSS' or instrume == 'PFIS':
                xsh = [xshift[0], 0., xshift[1]]
                ysh = [yshift[0], 0., yshift[1]]
                rot = [rotation[0], 0., rotation[1]]
                refid = 1
            if instrume == 'SALTICAM':
                xsh = [xshift[0], 0.]
                ysh = [yshift[0], 0.]
                rot = [rotation[0], 0]
                refid = 1

            # how many extensions?
            nextend = saltsafekey.get('NEXTEND', struct[0], infile)

            # how many exposures
            exposures = nextend / amplifiers

            # CCD on-chip binning
            xbin, ybin = saltsafekey.ccdbin(struct[0], infile)
            gp = int(gap / xbin)

            # create output hdu structure
            outstruct = [None] * int(exposures + 1)
            outstruct[0] = struct[0]

            # iterate over exposures, stitch them to produce file of CCD images
            for i in range(exposures):
                # Determine the total size of the image
                xsize = 0
                ysize = 0
                for j in range(amplifiers):
                    hdu = i * amplifiers + j + 1
                    try:
                        xsize += len(struct[hdu].data[0])
                        if ysize < len(struct[hdu].data):
                            ysize = len(struct[hdu].data)
                    except:
                        msg = 'Unable to access extension %i ' % hdu
                        raise SaltIOError(msg)
                xsize += gp * (nccds - 1)
                maxxsh, minxsh = determineshifts(xsh)
                maxysh, minysh = determineshifts(ysh)
                xsize += (maxxsh - minxsh)
                ysize += (maxysh - minysh)

                # Determine the x and y origins for each frame
                xdist = 0
                ydist = 0
                shid = 0
                x0 = np.zeros(amplifiers)
                y0 = np.zeros(amplifiers)
                for j in range(amplifiers):
                    x0[j] = xdist + xsh[shid] - minxsh
                    y0[j] = ysh[shid] - minysh
                    hdu = i * amplifiers + j + 1
                    darr = struct[hdu].data
                    xdist += len(darr[0])
                    if j % 2 == 1:
                        xdist += gp
                        shid += 1

                # make the out image
                outarr = np.zeros((ysize, xsize), np.float64)

                # Embed each frame into the output array
                for j in range(amplifiers):
                    hdu = i * amplifiers + j + 1
                    darr = struct[hdu].data
                    outarr = salttran.embed(darr, x0[j], y0[j], outarr)

                # Add the outimage to the output structure
                hdu = i * amplifiers + 1
                outhdu = i + 1
                outstruct[outhdu] = pyfits.ImageHDU(outarr)
                outstruct[outhdu].header = struct[hdu].header

                # Fix the headers in each extension
                datasec = '[1:%4i,1:%4i]' % (xsize, ysize)
                saltsafekey.put('DATASEC', datasec, outstruct[outhdu], outfile)
                saltsafekey.rem('DETSIZE', outstruct[outhdu], outfile)
                saltsafekey.rem('DETSEC', outstruct[outhdu], outfile)
                saltsafekey.rem('CCDSEC', outstruct[outhdu], outfile)
                saltsafekey.rem('AMPSEC', outstruct[outhdu], outfile)

                # add housekeeping key words
                outstruct[outhdu] = addhousekeeping(outstruct[outhdu], outhdu,
                                                    outfile)

            # close input FITS file
            saltsafeio.closefits(struct)

            # housekeeping keywords
            keymosaic = 'SLOTMERG'
            fname, hist = history(level=1, wrap=False)
            saltsafekey.housekeeping(struct[0], keymosaic,
                                     'Amplifiers have been mosaiced', hist)
            #saltsafekey.history(outstruct[0],hist)

            # this is added for later use by
            saltsafekey.put('NCCDS', 0.5, outstruct[0])
            saltsafekey.put('NSCIEXT', exposures, outstruct[0])
            saltsafekey.put('NEXTEND', exposures, outstruct[0])

            # write FITS file of mosaiced image
            outstruct = pyfits.HDUList(outstruct)
            saltsafeio.writefits(outstruct, outfile, clobber=clobber)
示例#47
0
                    sys.stdout.write(ctext)
                    sys.stdout.flush()

                struct=saltsafeio.openupdatefits(infile)

                # Skip through the frames and read in the utc
                istart=1
                if infile==infiles[0]:
                    istart=ignorexp*amplifiers+1
                for i in range(istart,len(struct), amplifiers):
                    for k in range(0,amplifiers):
                        if update:
                            struct[i+k]=updateheaders(struct[i+k],i+k, t_diff[j], real_expt, utc_list[j], infile)

                    if outfile:
                        utc_new=saltsafekey.get('UTC-OBS', struct[i+k], infile)
                        utc_new_sec=slottool.getobstime(struct[i], infile)
                        fout.write('%25s %2i %12s %12s %7.3f  %5.4f %4i \n' % (infile, i, utc_list[j], utc_new, utc_new_sec, t_diff[j], nd[j] ))
                    j += 1

                # Housekeeping key words
                if update:
                    history = 'SALTUTCFIX -- '
                    history += 'images='+infile+' '
                    saltsafekey.housekeeping(struct[0],'SLOTUTC','UTC has been corrected',history,infile)

                # update fits file
                if update:
                    saltsafeio.updatefits(struct)
                    saltsafeio.closefits(struct)
示例#48
0
def slotutcfix(images,update,outfile,ampperccd,ignorexp,droplimit,inter,plotdata,logfile,verbose,debug):
    
    with logging(logfile,debug) as log:
        # set up the variables
        utc_list = []


        # is the input file specified?
        saltsafeio.filedefined('Input',images)

        # if the input file is a list, does it exist?
        if images[0] == '@':
            saltsafeio.listexists('Input',images)

        # parse list of input files and place them in order
        infiles=saltsafeio.listparse('Raw image',images,'','','')
        infiles.sort()

        # check input files exist
        saltsafeio.filesexist(infiles,'','r')

        # check to see if the output file exists and if so, clobber it
        if os.path.isfile(outfile):
            try:
                os.remove(outfile)
            except:
                raise SaltIOError('File ' + outfile + ' can not be removed')

        # open the outfile
        if outfile:
            try:
                fout=open(outfile,'w')
            except:
                raise SaltIOError('File ' + outfile + ' can not be opened')

        # get time of first exposure and basic information about the observations
        infile=infiles[0]
        struct=saltsafeio.openfits(infile)

        # check to make sure slotmode data
        detmode=saltsafekey.get('DETMODE',struct[0], infile)
        if detmode != 'Slot Mode':
            raise SaltIOError('Data are not Slot Mode Observations')

        # Check to see if SLOTUTCFIX has already been run
        # and print a warning if they have
        if saltsafekey.found('SLOTUTC', struct[0]):
            message='Data have already been processed by SLOTUTCFIX'
            log.warning(message)

        # check to make sure that it is the right version of the software
        scamver=saltsafekey.get('DETSWV', struct[0], infile)
        try:
            scamver=float(scamver.split('-')[-1])
            if 4.42 <= scamver <= 5.00:
                pass
            else:
                raise SaltError('cannot currently correct this version of the SCAM software.')

        except:
            raise SaltError('Not able to read software version')

        # requested exposure time
        req_texp=saltsafekey.get('EXPTIME',struct[0],infile)

        # how many extensions?
        nextend=saltsafekey.get('NEXTEND',struct[0],infile)

        # how many amplifiers
        amplifiers=saltsafekey.get('NCCDS',struct[0],infile)
        amplifiers = int(ampperccd*float(amplifiers))
        if ampperccd>0:
            nframes = nextend/amplifiers
            nstep=amplifiers
        else:
            nframes = nextend
            nstep=1

        # how many total frame and unique times
        ntotal=nextend*len(infiles)
        nunique=len(infiles)*nframes-ignorexp+1

        # Create arrays necessary for analysis
        id_arr=np.arange(nunique)
        utc_arr=np.zeros(nunique,dtype=float)

        # Read in each file and make a list of the UTC values
        if verbose:
            log.message('Reading in files to create list of UTC values.')

        j=0
        for n,infile in enumerate(infiles):
            # Show progress
            if verbose:
                percent=100.*float(n)/float(len(infiles))
                ctext='Percentage Complete: %.2f\r' % percent
                sys.stdout.write(ctext)
                sys.stdout.flush()

            struct=saltsafeio.openfits(infile)
            if not len(struct)-1==nextend:
                raise SaltIOError(infile,' has a different number of extensions from the first file')

            # Skip through the frames and read in the utc
            istart=1
            if infile==infiles[0]:
                istart=ignorexp*amplifiers+1
            for i in range(istart,len(struct), amplifiers):
                try:
                    utc_list.append(saltsafekey.get('UTC-OBS', struct[i], infile))
                    utc_arr[j]=slottool.getobstime(struct[i], infile)
                    j += 1
                except Exception, e:
                    raise SaltIOError('Unable to create array of UTC times.  Please check the number of extensions in the files')

            # close FITS file
            saltsafeio.closefits(struct)

        # set up the other important arrays
        try:
            diff_arr=utc_arr.copy()
            diff_arr[1:]=diff_arr[1:]-utc_arr[:-1]
            diff_arr[0]=-1
            dsec_arr=utc_arr-utc_arr.astype(int)
        except:
            raise SaltIOError('Unable to create timing arrays')

        # calculate the real exposure time
        if verbose:
            log.message('Calculating real exposure time.')

        real_expt, med_expt, t_start, t_arr, ysum_arr=calculate_realexptime(id_arr, utc_arr, dsec_arr, diff_arr, req_texp, utc_list)

        # plot the results
        if plotdata:
            if verbose:
                log.message('Plotting data.')
            plt.ion()
            plt.plot(t_arr,ysum_arr,linewidth=0.5,linestyle='-',marker='',color='b')
            plt.xlabel('Time (s)')
            plt.ylabel('Fit')

        # Calculate the corrrect values
        if verbose:
            log.message('Calculating correct values')

        i_start = abs(utc_arr-t_start).argmin()
        t_diff=utc_arr*0.0+real_expt
        nd=utc_arr*0.0
        ndrop=0
        for i in range(len(utc_arr)):
            if utc_arr[i] >= t_start:
                t_new=t_start+real_expt*(i-i_start+ndrop)
                t_diff[i]=utc_arr[i]-t_new
                while (t_diff[i]>real_expt and nd[i] < droplimit):
                    nd[i]+= 1
                    t_new=t_start+real_expt*(i-i_start+ndrop+nd[i])
                    t_diff[i]=utc_arr[i]-t_new
                if (nd[i]<droplimit):
                    ndrop += nd[i]
            else:
                t_new=t_start+real_expt*(i-i_start)
                t_diff[i]=utc_arr[i]-t_new
                while (t_diff[i]>real_expt and nd[i] < droplimit):
                    nd[i]+= 1
                    t_new=t_start+real_expt*(i-i_start-nd[i])
                    t_diff[i]=utc_arr[i]-t_new

        # calculate the corrected timestamp by counting 6 record files forward and
        # 8 recored + unrecorded files back--or just 8*t_exp forward.
        # if the object is near the end of the run, then just replace it with
        # the correct value assuming no dropped exposures.

        # first make the array of new times
        new_arr=utc_arr-t_diff

        # Next loop through them to find the corrected time
        corr_arr=utc_arr*0.0

        for i in range(len(new_arr)):
            if i+6 < len(new_arr)-1:
                corr_arr[i]=new_arr[i+6]-8*real_expt
            else:
                corr_arr[i]=new_arr[i]-2*real_expt

        t_diff=utc_arr-corr_arr

        # write out the first results
        msg="Dwell Time=%5.3f Requested Exposure Time=%5.3f Nobs = %i Dropped = %i" % (real_expt, req_texp,  nunique, ndrop)
        if verbose:
            log.message(msg)

        if outfile:
            fout.write('#'+msg+'\n')
            fout.write('#%23s %2s %12s %12s %10s %8s %4s \n' % ('File', 'N', 'UTC_old', 'UTC_new', 'UTC_new(s)', 'Diff', 'drop' ))

        # Give the user a chance to update the value
        if inter:
            message='Update headers with a dwell time of %5.3f s [y/n]? ' % real_expt
            update=saltsafeio.yn_ask(message)
            if not update:
                message='Set Dwell Time manually [y/n]? '
                update=saltsafeio.yn_ask(message)
                if update:
                    message='New Dwell Time: '
                    real_expt=saltsafeio.ask(message)
                    try:
                        real_expt=float(real_expt)
                    except Exception, e:
                        msg='Could not set user dwell time because %s' % e
                        raise SaltError(msg)
示例#49
0
def specslitnormalize(images,
                      outimages,
                      outpref,
                      response=None,
                      response_output=None,
                      order=2,
                      conv=1e-2,
                      niter=20,
                      startext=0,
                      clobber=False,
                      logfile='salt.log',
                      verbose=True):

    with logging(logfile, debug) as log:

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outfiles = saltio.listparse('Outfile', outimages, outpref, infiles, '')

        # read in the response function
        response = saltio.checkfornone(response)
        if response:
            log.message('Loading response from %s' % response)
            response = readresponse(response)

        # Identify the lines in each file
        for img, ofile in zip(infiles, outfiles):

            # open the image
            hdu = saltio.openfits(img)

            for i in range(startext, len(hdu)):
                if hdu[i].name == 'SCI':
                    log.message('Normalizing extension %i in  %s' % (i, img))
                    # things that will change for each slit

                    # set up the data for the source
                    try:
                        data = hdu[i].data
                    except Exception as e:
                        message = \
                            'Unable to read in data array in %s because %s' % \
                            (img, e)
                        raise SALTSpecError(message)

                    if response is None:
                        response = create_response(data,
                                                   spatial_axis=1,
                                                   order=order,
                                                   conv=conv,
                                                   niter=niter)
                        if response_output:
                            write_response(response, clobber=clobber)
                    else:
                        # add a check that the response is the same shape as
                        # the data
                        if len(response) != data.shape[0]:
                            raise SALTSpecError(
                                'Length of response function does not equal size of image array'
                            )

                    # correct the data
                    data = data / response

                    # correct the variance frame
                    if saltkey.found('VAREXT', hdu[i]):
                        vhdu = saltkey.get('VAREXT', hdu[i])
                        hdu[vhdu].data = hdu[vhdu].data / response

                saltio.writefits(hdu, ofile, clobber=clobber)
示例#50
0
        try:
            obsdate = int('%s%s%s' % (obsdate[0:4], obsdate[5:7], obsdate[8:]))
            xkey = np.array(xdict.keys())
            date = xkey[abs(xkey - obsdate).argmin()]
            xcoeff = xdict[date]
        except Exception, e:
            msg = 'WARNING--Can not find xtalk coefficient for %s because %s' % (
                e, infile)
            if log: log.warning(msg)
            xcoeff = xdict[xdict.keys()[-1]]
    else:
        xcoeff = []
    struct = xtalk(struct, xcoeff, log=log, verbose=verbose)

    #bias correct the files
    if saltkey.fastmode(saltkey.get('DETMODE', struct[0])): order = 1

    struct = bias(struct,
                  subover=subover,
                  trim=trim,
                  subbias=subbias,
                  bstruct=bstruct,
                  median=median,
                  function=function,
                  order=order,
                  rej_lo=rej_lo,
                  rej_hi=rej_hi,
                  niter=niter,
                  plotover=plotover,
                  log=log,
                  verbose=verbose)
示例#51
0
def specarcstraighten(images, outfile, function='poly', order=3, rstep=1,
                      rstart='middlerow', nrows=1, 
                      y1=None, y2=None, sigma=5, sections=3, niter=5,
                      startext=0, clobber=False, logfile='salt.log', verbose=True):

    with logging(logfile, debug) as log:

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outfiles = saltio.argunpack('Output', outfile)

        # Identify the lines in each file
        for img, ofile in zip(infiles, outfiles):

            # open the image
            hdu = saltio.openfits(img)

            # get the basic information about the spectrograph
            dateobs = saltkey.get('DATE-OBS', hdu[0])
            try:
                utctime = saltkey.get('UTC-OBS', hdu[0])
            except SaltError:
                utctime = saltkey.get('TIME-OBS', hdu[0])

            instrume = saltkey.get('INSTRUME', hdu[0]).strip()
            grating = saltkey.get('GRATING', hdu[0]).strip()
            grang = saltkey.get('GR-ANGLE', hdu[0])
            grasteps = saltkey.get('GRTILT', hdu[0])
            arang = saltkey.get('AR-ANGLE', hdu[0])
            arsteps = saltkey.get('CAMANG', hdu[0])
            rssfilter = saltkey.get('FILTER', hdu[0])
            specmode = saltkey.get('OBSMODE', hdu[0])
            masktype = saltkey.get('MASKTYP', hdu[0]).strip().upper()
            slitname = saltkey.get('MASKID', hdu[0])
            xbin, ybin = saltkey.ccdbin(hdu[0], img)

            for i in range(startext, len(hdu)):
                if hdu[i].name == 'SCI':
                    log.message('Proccessing extension %i in  %s' % (i, img))
                    # things that will change for each slit

                    if masktype == 'LONGSLIT':
                        slit = st.getslitsize(slitname)
                        objid = None
                    #elif masktype == 'MOS':
                        #slit = 1.5
                        # slit=saltkey.get('SLIT', hdu[i])

                        # set up the x and y positions
                        #miny = hdu[i].header['MINY']
                        #maxy = hdu[i].header['MAXY']
                        #ras = hdu[i].header['SLIT_RA']
                        #des = hdu[i].header['SLIT_DEC']
                        #objid = hdu[i].header['SLITNAME']

                        # Check the perfomance of masks at different PA
                        #rac = hdu[0].header['MASK_RA']
                        #dec = hdu[0].header['MASK_DEC']
                        #pac = hdu[0].header['PA']

                    else:
                        msg = '%s is not a currently supported masktype' % masktype
                        raise SALTSpecError(msg)

                    if instrume not in ['PFIS', 'RSS']:
                        msg = '%s is not a currently supported instrument' % instrume
                        raise SALTSpecError(msg)

                    # set up the data for the source
                    try:
                        data = hdu[i].data
                    except Exception as e:
                        message = 'Unable to read in data array in %s because %s' % (
                            img, e)
                        raise SALTSpecError(message)

                    # set up the center row
                    if rstart == 'middlerow':
                        ystart = int(0.5 * len(data))
                    else:
                        ystart = rstart


                    # set up the xarr array based on the image
                    xarr = np.arange(len(data[ystart]), dtype='int64')

                    # calculate the transformation
                    ImageSolution = arcstraight(data, xarr, ystart, function=function, order=order, 
                                                rstep=rstep, y1=y1, y2=y2, sigma=sigma, sections=sections,
                                                niter=niter, log=log, verbose=verbose)

                    if outfile and len(ImageSolution):
                        writeIS(ImageSolution, outfile, dateobs=dateobs, utctime=utctime, instrume=instrume,
                                grating=grating, grang=grang, grasteps=grasteps, arsteps=arsteps,
                                arang=arang, rfilter=rssfilter, slit=slit, xbin=xbin,
                                ybin=ybin, objid=objid,
                                filename=img, log=log, verbose=verbose)
示例#52
0
def saltclean(images,
              outpath,
              obslogfile=None,
              gaindb=None,
              xtalkfile=None,
              geomfile=None,
              subover=True,
              trim=True,
              masbias=None,
              subbias=False,
              median=False,
              function='polynomial',
              order=5,
              rej_lo=3,
              rej_hi=3,
              niter=5,
              interp='linear',
              clobber=False,
              logfile='salt.log',
              verbose=True):
    """SALTCLEAN will provide basic CCD reductions for a set of data.  It will 
      sort the data, and first process the biases, flats, and then the science 
      frames.  It will record basic quality control information about each of 
      the steps.
   """
    plotover = False

    #start logging
    with logging(logfile, debug) as log:

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outpath = saltio.abspath(outpath)

        #does the gain database file exist
        if gaindb:
            dblist = saltio.readgaindb(gaindb)
        else:
            dblist = []

        # does crosstalk coefficient data exist
        if xtalkfile:
            xtalkfile = xtalkfile.strip()
            xdict = saltio.readxtalkcoeff(xtalkfile)
        else:
            xdict = None
        #does the mosaic file exist--raise error if no
        saltio.fileexists(geomfile)

        # Delete the obslog file if it already exists
        if os.path.isfile(obslogfile) and clobber: saltio.delete(obslogfile)

        #read in the obsveration log or create it
        if os.path.isfile(obslogfile):
            msg = 'The observing log already exists.  Please either delete it or run saltclean with clobber=yes'
            raise SaltError(msg)
        else:
            headerDict = obslog(infiles, log)
            obsstruct = createobslogfits(headerDict)
            saltio.writefits(obsstruct, obslogfile)

        #create the list of bias frames and process them
        filename = obsstruct.data.field('FILENAME')
        detmode = obsstruct.data.field('DETMODE')
        ccdtype = obsstruct.data.field('CCDTYPE')

        #set the bias list of objects
        biaslist = filename[ccdtype == 'ZERO']
        masterbias_dict = {}
        for img in infiles:
            if os.path.basename(img) in biaslist:
                #open the image
                struct = fits.open(img)
                bimg = outpath + 'bxgp' + os.path.basename(img)

                #print the message
                if log:
                    message = 'Processing Zero frame %s' % img
                    log.message(message, with_stdout=verbose)

                #process the image
                struct = clean(struct,
                               createvar=False,
                               badpixelstruct=None,
                               mult=True,
                               dblist=dblist,
                               xdict=xdict,
                               subover=subover,
                               trim=trim,
                               subbias=False,
                               bstruct=None,
                               median=median,
                               function=function,
                               order=order,
                               rej_lo=rej_lo,
                               rej_hi=rej_hi,
                               niter=niter,
                               plotover=plotover,
                               log=log,
                               verbose=verbose)

                #write the file out
                # housekeeping keywords
                fname, hist = history(
                    level=1,
                    wrap=False,
                    exclude=['images', 'outimages', 'outpref'])
                saltkey.housekeeping(struct[0], 'SPREPARE',
                                     'Images have been prepared', hist)
                saltkey.new('SGAIN', time.asctime(time.localtime()),
                            'Images have been gain corrected', struct[0])
                saltkey.new('SXTALK', time.asctime(time.localtime()),
                            'Images have been xtalk corrected', struct[0])
                saltkey.new('SBIAS', time.asctime(time.localtime()),
                            'Images have been de-biased', struct[0])

                # write FITS file
                saltio.writefits(struct, bimg, clobber=clobber)
                saltio.closefits(struct)

                #add files to the master bias list
                masterbias_dict = compareimages(struct,
                                                bimg,
                                                masterbias_dict,
                                                keylist=biasheader_list)

        #create the master bias frame
        for i in masterbias_dict.keys():
            bkeys = masterbias_dict[i][0]
            blist = masterbias_dict[i][1:]
            mbiasname = outpath + createmasterbiasname(blist, bkeys)
            bfiles = ','.join(blist)
            saltcombine(bfiles, mbiasname, method='median', reject='sigclip', mask=False,
                        weight=False, blank=0, scale=None, statsec=None, lthresh=3,    \
                        hthresh=3, clobber=False, logfile=logfile,verbose=verbose)

        #create the list of flatfields and process them
        flatlist = filename[ccdtype == 'FLAT']
        masterflat_dict = {}
        for img in infiles:
            if os.path.basename(img) in flatlist:
                #open the image
                struct = fits.open(img)
                fimg = outpath + 'bxgp' + os.path.basename(img)

                #print the message
                if log:
                    message = 'Processing Flat frame %s' % img
                    log.message(message, with_stdout=verbose)

                #process the image
                struct = clean(struct,
                               createvar=False,
                               badpixelstruct=None,
                               mult=True,
                               dblist=dblist,
                               xdict=xdict,
                               subover=subover,
                               trim=trim,
                               subbias=False,
                               bstruct=None,
                               median=median,
                               function=function,
                               order=order,
                               rej_lo=rej_lo,
                               rej_hi=rej_hi,
                               niter=niter,
                               plotover=plotover,
                               log=log,
                               verbose=verbose)

                #write the file out
                # housekeeping keywords
                fname, hist = history(
                    level=1,
                    wrap=False,
                    exclude=['images', 'outimages', 'outpref'])
                saltkey.housekeeping(struct[0], 'SPREPARE',
                                     'Images have been prepared', hist)
                saltkey.new('SGAIN', time.asctime(time.localtime()),
                            'Images have been gain corrected', struct[0])
                saltkey.new('SXTALK', time.asctime(time.localtime()),
                            'Images have been xtalk corrected', struct[0])
                saltkey.new('SBIAS', time.asctime(time.localtime()),
                            'Images have been de-biased', struct[0])

                # write FITS file
                saltio.writefits(struct, fimg, clobber=clobber)
                saltio.closefits(struct)

                #add files to the master bias list
                masterflat_dict = compareimages(struct,
                                                fimg,
                                                masterflat_dict,
                                                keylist=flatheader_list)

        #create the master flat frame
        for i in masterflat_dict.keys():
            fkeys = masterflat_dict[i][0]
            flist = masterflat_dict[i][1:]
            mflatname = outpath + createmasterflatname(flist, fkeys)
            ffiles = ','.join(flist)
            saltcombine(ffiles, mflatname, method='median', reject='sigclip', mask=False,
                        weight=False, blank=0, scale=None, statsec=None, lthresh=3,    \
                        hthresh=3, clobber=False, logfile=logfile,verbose=verbose)

        #process the science data
        for img in infiles:
            nimg = os.path.basename(img)
            #print nimg, nimg in flatlist, nimg in biaslist
            if not (nimg in biaslist):
                #open the image
                struct = fits.open(img)
                simg = outpath + 'bxgp' + os.path.basename(img)

                #print the message
                if log:
                    message = 'Processing science frame %s' % img
                    log.message(message, with_stdout=verbose)

                #process the image
                struct = clean(struct,
                               createvar=False,
                               badpixelstruct=None,
                               mult=True,
                               dblist=dblist,
                               xdict=xdict,
                               subover=subover,
                               trim=trim,
                               subbias=False,
                               bstruct=None,
                               median=median,
                               function=function,
                               order=order,
                               rej_lo=rej_lo,
                               rej_hi=rej_hi,
                               niter=niter,
                               plotover=plotover,
                               log=log,
                               verbose=verbose)

                #write the file out
                # housekeeping keywords
                fname, hist = history(
                    level=1,
                    wrap=False,
                    exclude=['images', 'outimages', 'outpref'])
                saltkey.housekeeping(struct[0], 'SPREPARE',
                                     'Images have been prepared', hist)
                saltkey.new('SGAIN', time.asctime(time.localtime()),
                            'Images have been gain corrected', struct[0])
                saltkey.new('SXTALK', time.asctime(time.localtime()),
                            'Images have been xtalk corrected', struct[0])
                saltkey.new('SBIAS', time.asctime(time.localtime()),
                            'Images have been de-biased', struct[0])

                # write FITS file
                saltio.writefits(struct, simg, clobber=clobber)
                saltio.closefits(struct)

                #mosaic the files--currently not in the proper format--will update when it is
                if not saltkey.fastmode(saltkey.get('DETMODE', struct[0])):
                    mimg = outpath + 'mbxgp' + os.path.basename(img)
                    saltmosaic(images=simg,
                               outimages=mimg,
                               outpref='',
                               geomfile=geomfile,
                               interp=interp,
                               cleanup=True,
                               clobber=clobber,
                               logfile=logfile,
                               verbose=verbose)

                    #remove the intermediate steps
                    saltio.delete(simg)
示例#53
0
def specidentify(images, linelist, outfile, guesstype='rss', guessfile='',
                 automethod='Matchlines', function='poly', order=3, rstep=100,
                 rstart='middlerow', mdiff=5, thresh=3, niter=5, smooth=0,
                 subback=0, inter=True, startext=0,  clobber=False, 
                 textcolor='black', logfile='salt.log', verbose=True):

    with logging(logfile, debug) as log:

        # set up the variables
        infiles = []
        outfiles = []

        # Check the input images
        infiles = saltio.argunpack('Input', images)

        # create list of output files
        outfiles = saltio.argunpack('Output', outfile)

        # open the line lists
        slines, sfluxes = st.readlinelist(linelist)

        # Identify the lines in each file
        for img, ofile in zip(infiles, outfiles):

            # open the image
            hdu = saltio.openfits(img)

            # get the basic information about the spectrograph
            dateobs = saltkey.get('DATE-OBS', hdu[0])
            try:
                utctime = saltkey.get('UTC-OBS', hdu[0])
            except SaltError:
                utctime = saltkey.get('TIME-OBS', hdu[0])

            instrume = saltkey.get('INSTRUME', hdu[0]).strip()
            grating = saltkey.get('GRATING', hdu[0]).strip()
            grang = saltkey.get('GR-ANGLE', hdu[0])
            grasteps = saltkey.get('GRTILT', hdu[0])
            arang = saltkey.get('AR-ANGLE', hdu[0])
            arsteps = saltkey.get('CAMANG', hdu[0])
            rssfilter = saltkey.get('FILTER', hdu[0])
            specmode = saltkey.get('OBSMODE', hdu[0])
            masktype = saltkey.get('MASKTYP', hdu[0]).strip().upper()
            slitname = saltkey.get('MASKID', hdu[0])
            xbin, ybin = saltkey.ccdbin(hdu[0], img)

            for i in range(startext, len(hdu)):
                if hdu[i].name == 'SCI':
                    log.message('Proccessing extension %i in  %s' % (i, img))
                    # things that will change for each slit

                    if masktype == 'LONGSLIT':
                        slit = st.getslitsize(slitname)
                        xpos = -0.2666
                        ypos = 0.0117
                        objid = None
                    elif masktype == 'MOS':
                        slit = 1.5
                        #slit=saltkey.get('SLIT', hdu[i])

                        # set up the x and y positions
                        miny = hdu[i].header['MINY']
                        maxy = hdu[i].header['MAXY']
                        ras = hdu[i].header['SLIT_RA']
                        des = hdu[i].header['SLIT_DEC']
                        objid = hdu[i].header['SLITNAME']

                        # TODO: Check the perfomance of masks at different PA
                        rac = hdu[0].header['MASK_RA']
                        dec = hdu[0].header['MASK_DEC']
                        pac = hdu[0].header['PA']

                        # these are hard wired at the moment
                        xpixscale = 0.1267 * xbin
                        ypixscale = 0.1267 * ybin
                        cx = int(3162 / xbin)
                        cy = int(2050 / ybin)

                        x, y = mt.convert_fromsky(ras, des, rac, dec, xpixscale=xpixscale, ypixscale=ypixscale,
                                                  position_angle=-pac, ccd_cx=cx, ccd_cy=cy)
                        xpos = 0.015 * 2 * (cx - x[0])
                        ypos = 0.0117
                    else:
                        msg = '%s is not a currently supported masktype' % masktype
                        raise SALTSpecError(msg)

                    if instrume not in ['PFIS', 'RSS']:
                        msg = '%s is not a currently supported instrument' % instrume
                        raise SALTSpecError(msg)

                    # create RSS Model
                    rss = RSSModel.RSSModel(grating_name=grating.strip(), gratang=grang,
                                            camang=arang, slit=slit, xbin=xbin, ybin=ybin,
                                            xpos=xpos, ypos=ypos)
                    res = 1e7 * rss.calc_resolelement(rss.alpha(), -rss.beta())
                    dres = res / 10.0
                    wcen = 1e7 * rss.calc_centralwavelength()
                    R = rss.calc_resolution(
                        wcen / 1e7, rss.alpha(), -rss.beta())
                    logmsg = '\nGrating\tGR-ANGLE\tAR-ANGLE\tSlit\tWCEN\tR\n'
                    logmsg += '%s\t%8.3f\t%8.3f\t%4.2f\t%6.2f\t%4f\n' % (
                        grating, grang, arang, slit, wcen, R)
                    if log:
                        log.message(logmsg, with_header=False)

                    # set up the data for the source
                    try:
                        data = hdu[i].data
                    except Exception as e:
                        message = 'Unable to read in data array in %s because %s' % (
                            img, e)
                        raise SALTSpecError(message)

                    # set up the center row
                    if rstart == 'middlerow':
                        ystart = int(0.5 * len(data))
                    else:
                        ystart = int(rstart)

                    rss.gamma = 0.0
                    if masktype == 'MOS':
                        rss.gamma = 180.0 / math.pi * math.atan((y * rss.detector.pix_size * rss.detector.ybin
                                                                 - 0.5 * rss.detector.find_height()) / rss.camera.focallength)

                    # set up the xarr array based on the image
                    xarr = np.arange(len(data[ystart]), dtype='int64')

                    # get the guess for the wavelength solution
                    if guesstype == 'rss':
                        # set up the rss model
                        ws = st.useRSSModel(
                            xarr, rss, function=function, order=order, gamma=rss.gamma)
                    elif guesstype == 'file':
                        soldict = {}
                        soldict = readsolascii(guessfile, soldict)
                        timeobs = enterdatetime('%s %s' % (dateobs, utctime))
                        exptime = saltkey.get('EXPTIME', hdu[0])
                        filtername = saltkey.get('FILTER', hdu[0]).strip()
                        try:
                            slitid = saltkey.get('SLITNAME', hdu[i])
                        except:
                            slitid = None

                        function, order, coef = findlinesol(
                            soldict, ystart, True, timeobs, exptime, instrume, grating, grang, arang, filtername, slitid, xarr=xarr)
                        ws = WavelengthSolution.WavelengthSolution(
                            xarr, xarr, function=function, order=order)
                        ws.set_coef(coef)
                    else:
                        raise SALTSpecError(
                            'This guesstype is not currently supported')

                    # identify the spectral lines
                    ImageSolution = identify(data, slines, sfluxes, xarr, ystart, ws=ws, function=function,
                                             order=order, rstep=rstep,  mdiff=mdiff, thresh=thresh, niter=niter,
                                             method=automethod, res=res, dres=dres, smooth=smooth, inter=inter, filename=img,
                                             subback=0, textcolor=textcolor, log=log, verbose=verbose)

                    if outfile and len(ImageSolution):
                        writeIS(ImageSolution, outfile, dateobs=dateobs, utctime=utctime, instrume=instrume,
                                grating=grating, grang=grang, grasteps=grasteps, arsteps=arsteps,
                                arang=arang, rfilter=rssfilter, slit=slit, xbin=xbin,
                                ybin=ybin, objid=objid,
                                filename=img, log=log, verbose=verbose)
示例#54
0
def rectify(hdu, soldict, caltype='line', function='poly', order=3, inttype='interp', 
            w1=None, w2=None, dw=None, nw=None,
            blank=0, pixscale=0.0, time_interp=False, clobber=True, log=None, verbose=True):
   """Read in an image and a set of wavlength solutions.  Calculate the best
      wavelength solution for a given dataset and then apply that data set to the 
      image 

    return
   """
 
   #set the 
   set_w1=(w1 is  None)
   set_w2=(w2 is  None)
   set_dw=(dw is  None)
   set_nw=(nw is  None)

   #set up the time of the observation
   dateobs=saltkey.get('DATE-OBS', hdu[0])
   utctime=saltkey.get('TIME-OBS', hdu[0])
   exptime=saltkey.get('EXPTIME', hdu[0])
   instrume=saltkey.get('INSTRUME', hdu[0]).strip()
   grating=saltkey.get('GRATING', hdu[0]).strip()
   if caltype=='line':
      grang=saltkey.get('GRTILT', hdu[0])
      arang=saltkey.get('CAMANG', hdu[0])
   else:
      grang=saltkey.get('GR-ANGLE', hdu[0])
      arang=saltkey.get('AR-ANGLE', hdu[0])
   filtername=saltkey.get('FILTER', hdu[0]).strip()
   slitname=saltkey.get('MASKID', hdu[0])
   slit=st.getslitsize(slitname)
   xbin, ybin = saltkey.ccdbin( hdu[0])

   timeobs=enterdatetime('%s %s' % (dateobs, utctime))

   #check to see if there is more than one solution
   if caltype=='line':
    if len(soldict)==1:
      sol=soldict.keys()[0]
      slitid=None
      if not matchobservations(soldict[sol], instrume, grating, grang, arang, filtername, slitid):
         msg='Observations do not match setup for transformation but using the solution anyway'
         if log: log.warning(msg)

   for i in range(1,len(hdu)):
     if hdu[i].name=='SCI':
       if log: log.message('Correcting extension %i' % i)
       istart=int(0.5*len(hdu[i].data))
       #open up the data
       #set up the xarr and initial wavlength solution
       xarr=np.arange(len(hdu[i].data[istart]), dtype='int64')

       #get the slitid
       try:
          slitid=saltkey.get('SLITNAME', hdu[i])
       except:
          slitid=None
       #set up a wavelength solution
       try:
           w_arr=findsol(xarr, soldict, istart, caltype, timeobs, exptime, instrume, grating, grang, arang, filtername, 
              slit, xbin, ybin, slitid, function, order )
       except SALTSpecError, e:
           if slitid: 
              msg='SLITID %s: %s' % (slitid, e)
              if log: log.warning(msg)
              continue
           else:
              raise SALTSpecError(e)
 
       if w_arr is None: 
          w_arr=findsol(xarr, soldict, istart, 'rss', timeobs, exptime, instrume, grating, grang, arang, filtername, 
              slit, xbin, ybin, slitid, function, order )
  
       #set up the output x-axis

       if set_w1: w1=w_arr.min()
       if set_w2: w2=w_arr.max()
       if set_nw: nw=len(xarr)
       if set_dw: dw=float(w2-w1)/nw
       nw_arr=createoutputxaxis(w1, w2, nw)

       #setup the VARIANCE and BPM frames       
       if saltkey.found('VAREXT', hdu[i]):
           varext=saltkey.get('VAREXT', hdu[i])
       else:
           varext=None

       #setup the BPM frames       
       if saltkey.found('BPMEXT', hdu[i]):
           bpmext=saltkey.get('BPMEXT', hdu[i])
       else:
           bpmext=None

       #for each line in the data, determine the wavelength solution
       #for a given line in the image
       for j in range(len(hdu[i].data)):
           #find the wavelength solution for the data
           w_arr=findsol(xarr, soldict, j, caltype, timeobs, exptime, instrume, grating, grang, arang, filtername, 
              slit, xbin, ybin, slitid, function, order )

           #apply that wavelength solution to the data
           if w_arr is not None:
               try:
                  hdu[i].data[j,:]=st.interpolate(nw_arr, w_arr, hdu[i].data[j,:], inttype, left=blank, right=blank)
               except Exception, e:
                  hdu[i].data[j,:]=hdu[i].data[j,:]*0.0+blank
                  msg='In row %i, solution cannot be found due to %s' % (i, e)  

               #correct the variance frame
               if varext:
                   try:
                       hdu[varext].data[j,:]=st.interpolate(nw_arr, w_arr, hdu[varext].data[j,:], inttype, left=blank, right=blank)
                   except Exception, e:
                       msg='In row %i, solution cannot be found due to %s' % (i, e)  

               #correct the BPM frame
               if bpmext:
                   try:
                       hdu[bpmext].data[j,:]=st.interpolate(nw_arr, w_arr, hdu[bpmext].data[j,:], inttype, left=blank, right=blank)
                   except Exception, e:
                       msg='In row %i, solution cannot be found due to %s' % (i, e)  
示例#55
0
def rectify(hdu, soldict, caltype='line', function='poly', order=3, inttype='interp',
            w1=None, w2=None, dw=None, nw=None, blank=0, pixscale=0.0, time_interp=False,
            conserve=False, nearest=False, clobber=True, log=None, verbose=True):
    """Read in an image and a set of wavlength solutions.  Calculate the best
       wavelength solution for a given dataset and then apply that data set to the
       image

     return
    """

    # set the basic values
    set_w1 = (w1 is None)
    set_w2 = (w2 is None)
    set_dw = (dw is None)
    set_nw = (nw is None)

    # set up the time of the observation
    dateobs = saltkey.get('DATE-OBS', hdu[0])
    utctime = saltkey.get('TIME-OBS', hdu[0])
    exptime = saltkey.get('EXPTIME', hdu[0])
    instrume = saltkey.get('INSTRUME', hdu[0]).strip()
    grating = saltkey.get('GRATING', hdu[0]).strip()
    if caltype == 'line':
        grang = saltkey.get('GRTILT', hdu[0])
        arang = saltkey.get('CAMANG', hdu[0])
    else:
        grang = saltkey.get('GR-ANGLE', hdu[0])
        arang = saltkey.get('AR-ANGLE', hdu[0])
    filtername = saltkey.get('FILTER', hdu[0]).strip()
    slitname = saltkey.get('MASKID', hdu[0])
    slit = st.getslitsize(slitname)
    xbin, ybin = saltkey.ccdbin(hdu[0])

    timeobs = enterdatetime('%s %s' % (dateobs, utctime))

    # check to see if there is more than one solution
    if caltype == 'line':
        if len(soldict) == 1:
            sol = soldict.keys()[0]
            slitid = None
            if not matchobservations(
                    soldict[sol], instrume, grating, grang, arang, filtername, slitid):
                msg = 'Observations do not match setup for transformation but using the solution anyway'
                if log:
                    log.warning(msg)

    for i in range(1, len(hdu)):
        if hdu[i].name == 'SCI':
            if log:
                log.message('Correcting extension %i' % i)
            istart = int(0.5 * len(hdu[i].data))
            # open up the data
            # set up the xarr and initial wavlength solution
            xarr = np.arange(len(hdu[i].data[istart]), dtype='int64')

            # get the slitid
            try:
                slitid = saltkey.get('SLITNAME', hdu[i])
            except:
                slitid = None
            # set up a wavelength solution
            try:
                w_arr = findsol(xarr, soldict, istart, caltype, nearest, timeobs, exptime, instrume, grating, grang, arang, filtername,
                                slit, xbin, ybin, slitid, function, order)
            except SALTSpecError as e:
                if slitid:
                    msg = 'SLITID %s: %s' % (slitid, e)
                    if log:
                        log.warning(msg)
                    continue
                else:
                    raise SALTSpecError(e)

            if w_arr is None:
                w_arr = findsol(xarr, soldict, istart, 'rss', nearest, timeobs, exptime, instrume, grating, grang, arang, filtername,
                                slit, xbin, ybin, slitid, function, order)

            # set up the output x-axis

            if set_w1:
                w1 = w_arr.min()
            if set_w2:
                w2 = w_arr.max()
            if set_nw:
                nw = len(xarr)
            if set_dw:
                dw = float(w2 - w1) / nw
            nw_arr = createoutputxaxis(w1, w2, nw)

            # setup the VARIANCE and BPM frames
            if saltkey.found('VAREXT', hdu[i]):
                varext = saltkey.get('VAREXT', hdu[i])
            else:
                varext = None

            # setup the BPM frames
            if saltkey.found('BPMEXT', hdu[i]):
                bpmext = saltkey.get('BPMEXT', hdu[i])
            else:
                bpmext = None

            # for each line in the data, determine the wavelength solution
            # for a given line in the image
            for j in range(len(hdu[i].data)):
                # find the wavelength solution for the data
                w_arr = findsol(xarr, soldict, j, caltype, nearest, timeobs, exptime, instrume, grating, grang, arang, filtername,
                                slit, xbin, ybin, slitid, function, order)

                # apply that wavelength solution to the data
                if w_arr is not None:
                    try:
                        hdu[i].data[
                            j,
                            :] = st.interpolate(
                            nw_arr,
                            w_arr,
                            hdu[i].data[
                                j,
                                :],
                            inttype,
                            left=blank,
                            right=blank)
                    except Exception as e:
                        hdu[i].data[j, :] = hdu[i].data[j, :] * 0.0 + blank
                        msg = 'In row %i, solution cannot be found due to %s' % (
                            i, e)

                    # correct the variance frame
                    if varext:
                        try:
                            hdu[varext].data[
                                j,
                                :] = st.interpolate(
                                nw_arr,
                                w_arr,
                                hdu[varext].data[
                                    j,
                                    :],
                                inttype,
                                left=blank,
                                right=blank)
                        except Exception as e:
                            msg = 'In row %i, solution cannot be found due to %s' % (
                                i, e)

                    # correct the BPM frame
                    if bpmext:
                        try:
                            hdu[bpmext].data[
                                j,
                                :] = st.interpolate(
                                nw_arr,
                                w_arr,
                                hdu[bpmext].data[
                                    j,
                                    :],
                                inttype,
                                left=blank,
                                right=blank)
                        except Exception as e:
                            msg = 'In row %i, solution cannot be found due to %s' % (
                                i, e)
                else:
                    hdu[i].data[j, :] = hdu[i].data[j, :] * 0.0 + blank

            if conserve:
                hdu[i].data = hdu[i].data / dw
                if varext:
                    hdu[varext].data = hdu[varext].data / dw

            # Add WCS information
            saltkey.new('CTYPE1', 'LAMBDA', 'Coordinate Type', hdu[i])
            saltkey.new('CTYPE2', 'PIXEL', 'Coordinate Type', hdu[i])
            saltkey.new(
                'CD1_1',
                dw,
                'WCS: Wavelength Dispersion in angstrom/pixel',
                hdu[i])
            saltkey.new('CD2_1', 0.0, 'WCS: ', hdu[i])
            saltkey.new('CD1_2', 0.0, 'WCS: ', hdu[i])
            saltkey.new('CD2_2', ybin * pixscale, 'WCS: ', hdu[i])
            saltkey.new('CRPIX1', 0.0, 'WCS: X Reference pixel', hdu[i])
            saltkey.new('CRPIX2', 0.0, 'WCS: Y Reference pixel', hdu[i])
            saltkey.new('CRVAL1', w1, 'WCS: X Reference pixel', hdu[i])
            saltkey.new('CRVAL2', 0.0, 'WCS: Y Reference pixel', hdu[i])
            saltkey.new('CDELT1', 1.0, 'WCS: X pixel size', hdu[i])
            saltkey.new('CDELT2', 1.0, 'WCS: Y pixel size', hdu[i])
            saltkey.new('DC-FLAG', 0, 'Dispesion Corrected image', hdu[i])

    return hdu
示例#56
0
def extract(hdu,
            ext=1,
            method='normal',
            section=[],
            minsize=3.0,
            thresh=3.0,
            convert=True):
    """For a given image, extract a 1D spectra from the image
       and write the spectra to the output file

    """

    ap_list = []
    i = ext
    if hdu[i].name == 'SCI':
        # set up the data, variance, and bad pixel frames
        # first step is to find the region to extract
        data_arr = hdu[i].data
        try:
            var_arr = hdu[hdu[i].header['VAREXT']].data
        except:
            var_arr = None
        try:
            bpm_arr = hdu[hdu[i].header['BPMEXT']].data
        except:
            bpm_arr = None
        var_arr = None
        bpm_arr = None

        xarr = np.arange(len(data_arr[0]))

        # convert using the WCS information
        try:
            w0 = saltkey.get('CRVAL1', hdu[i])
            dw = saltkey.get('CD1_1', hdu[i])
        except Exception as e:
            msg = 'Error on Ext %i: %s' % (i, e)
            raise SALTSpecError(msg)
        warr = w0 + dw * xarr

        # convert from air to vacuum
        if convert:
            warr = Spectrum.air2vac(warr)

        # set up the sections in case of findobj
        if section is None:
            section = findobj.findObjects(data_arr,
                                          method='median',
                                          specaxis=1,
                                          minsize=minsize,
                                          thresh=thresh,
                                          niter=5)

        # extract all of the  regions
        for sec in section:
            ap = apext.apext(warr, data_arr, ivar=var_arr)
            y1, y2 = sec
            ap.flatten(y1, y2)
            ap_list.append(ap)

    return ap_list
示例#57
0
def make_mosaic(struct, gap, xshift, yshift, rotation, interp_type='linear',
                boundary='constant', constant=0, geotran=True, fill=False,
                cleanup=True, log=None, verbose=False):
    """Given a SALT image struct, combine each of the individual amplifiers and
        apply the geometric CCD transformations to the image
    """

    # get the name of the file
    infile = saltkey.getimagename(struct[0], base=True)
    outpath = './'

    # identify instrument
    instrume, keyprep, keygain, keybias, keyxtalk, keyslot = \
        saltkey.instrumid(struct)

    # how many amplifiers?
    nsciext = saltkey.get('NSCIEXT', struct[0])
    nextend = saltkey.get('NEXTEND', struct[0])
    nccds = saltkey.get('NCCDS', struct[0])
    amplifiers = nccds * 2

    if nextend > nsciext:
        varframe = True
    else:
        varframe = False

    # CCD geometry coefficients
    if (instrume == 'RSS' or instrume == 'PFIS'):
        xsh = [0., xshift[0], 0., xshift[1]]
        ysh = [0., yshift[0], 0., yshift[1]]
        rot = [0., rotation[0], 0., rotation[1]]
    elif instrume == 'SALTICAM':
        xsh = [0., xshift[0], 0.]
        ysh = [0., yshift[0], 0.]
        rot = [0., rotation[0], 0]

    # how many extensions?
    nextend = saltkey.get('NEXTEND', struct[0])

    # CCD on-chip binning
    xbin, ybin = saltkey.ccdbin(struct[0])

    # create temporary primary extension
    outstruct = []
    outstruct.append(struct[0])
    # define temporary FITS file store tiled CCDs

    tilefile = saltio.tmpfile(outpath)
    tilefile += 'tile.fits'
    if varframe:
        tilehdu = [None] * (3 * int(nsciext / 2) + 1)
    else:
        tilehdu = [None] * int(nsciext / 2 + 1)
    tilehdu[0] = fits.PrimaryHDU()
    #tilehdu[0].header = struct[0].header

    if log:
        log.message('', with_stdout=verbose)

    # iterate over amplifiers, stich them to produce file of CCD images
    for i in range(int(nsciext / 2)):
        hdu = i * 2 + 1
        # amplifier = hdu%amplifiers
        # if (amplifier == 0): amplifier = amplifiers

        # read DATASEC keywords
        datasec1 = saltkey.get('DATASEC', struct[hdu])
        datasec2 = saltkey.get('DATASEC', struct[hdu + 1])
        xdsec1, ydsec1 = saltstring.secsplit(datasec1)
        xdsec2, ydsec2 = saltstring.secsplit(datasec2)

        # read images
        imdata1 = saltio.readimage(struct, hdu)
        imdata2 = saltio.readimage(struct, hdu + 1)

        # tile 2n amplifiers to yield n CCD images
        outdata = numpy.zeros((ydsec1[1] +
                               abs(ysh[i +
                                       1] /
                                   ybin), xdsec1[1] +
                               xdsec2[1] +
                               abs(xsh[i +
                                       1] /
                                   xbin)), numpy.float32)

        # set up the variance frame
        if varframe:
            vardata = outdata.copy()
            vdata1 = saltio.readimage(struct, struct[hdu].header['VAREXT'])
            vdata2 = saltio.readimage(struct, struct[hdu + 1].header['VAREXT'])

            bpmdata = outdata.copy()
            bdata1 = saltio.readimage(struct, struct[hdu].header['BPMEXT'])
            bdata2 = saltio.readimage(struct, struct[hdu + 1].header['BPMEXT'])

        x1 = xdsec1[0] - 1
        if x1 != 0:
            msg = 'The data in %s have not been trimmed prior to mosaicking.' \
                  % infile
            log.error(msg)
        if xsh[i + 1] < 0:
            x1 += abs(xsh[i + 1] / xbin)
        x2 = x1 + xdsec1[1]
        y1 = ydsec1[0] - 1
        if ysh[i + 1] < 0:
            y1 += abs(ysh[i + 1] / ybin)
        y2 = y1 + ydsec1[1]
        outdata[y1:y2, x1:x2] =\
            imdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        if varframe:
            vardata[y1:y2, x1:x2] =\
                vdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]
            bpmdata[y1:y2, x1:x2] =\
                bdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        x1 = x2
        x2 = x1 + xdsec2[1]
        y1 = ydsec2[0] - 1
        if ysh[i + 1] < 0:
            y1 += abs(ysh[i + 1] / ybin)
        y2 = y1 + ydsec2[1]
        outdata[y1:y2, x1:x2] =\
            imdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        if varframe:
            vardata[y1:y2, x1:x2] =\
                vdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]
            bpmdata[y1:y2, x1:x2] =\
                bdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]

        # size of new image
        naxis1 = str(xdsec1[1] + xdsec2[1])
        naxis2 = str(ydsec1[1])

        # add image and keywords to HDU list
        tilehdu[i + 1] = fits.ImageHDU(outdata)
        tilehdu[i + 1].header = struct[hdu].header
        #tilehdu[
        #    i + 1].header['DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'

        if varframe:
            vext = i + 1 + int(nsciext / 2.)
            tilehdu[vext] = fits.ImageHDU(vardata)
            #tilehdu[vext].header = struct[struct[hdu].header['VAREXT']].header
            #tilehdu[vext].header[
            #    'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'

            bext = i + 1 + 2 * int(nsciext / 2.)
            tilehdu[bext] = fits.ImageHDU(bpmdata)
            #tilehdu[bext].header = struct[struct[hdu].header['BPMEXT']].header
            #tilehdu[bext].header[
            #    'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'

        # image tile log message #1
        if log:
            message = os.path.basename(infile) + '[' + str(hdu) + ']['
            message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','
            message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '
            message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['
            message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','
            message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + ']'
            log.message(message, with_stdout=verbose, with_header=False)
            message = os.path.basename(infile) + '[' + str(hdu + 1) + ']['
            message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','
            message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '
            message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['
            message += str(xdsec1[1] + 1) + ':' + \
                str(xdsec1[1] + xdsec2[1]) + ','
            message += str(ydsec2[0]) + ':' + str(ydsec2[1]) + ']'
            log.message(message, with_stdout=verbose, with_header=False)

    # write temporary file of tiled CCDs
    hdulist = fits.HDUList(tilehdu)
    hdulist.writeto(tilefile)

    # iterate over CCDs, transform and rotate images
    yrot = [None] * 4
    xrot = [None] * 4

    tranfile = [' ']
    tranhdu = [0]
    if varframe:
        tranfile = [''] * (3 * int(nsciext / 2) + 1)
        tranhdu = [0] * (3 * int(nsciext / 2) + 1)
    else:
        tranfile = [''] * int(nsciext / 2 + 1)
        tranhdu = [0] * int(nsciext / 2 + 1)

    # this is hardwired for SALT where the second CCD is considered the
    # fiducial
    for hdu in range(1, int(nsciext / 2 + 1)):
        tranfile[hdu] = saltio.tmpfile(outpath)
        tranfile[hdu] += 'tran.fits'
        if varframe:
            tranfile[hdu + nccds] = saltio.tmpfile(outpath) + 'tran.fits'
            tranfile[hdu + 2 * nccds] = saltio.tmpfile(outpath) + 'tran.fits'

        ccd = hdu % nccds
        if (ccd == 0):
            ccd = nccds

        # correct rotation for CCD binning
        yrot[ccd] = rot[ccd] * ybin / xbin
        xrot[ccd] = rot[ccd] * xbin / ybin
        dxshift = xbin * int(float(int(gap) / xbin) + 0.5) - gap

        # transformation using geotran IRAF task
        # if (ccd == 1):
        if (ccd != 2):

            if geotran:
                message = '\nSALTMOSAIC -- geotran ' + tilefile + \
                    '[' + str(ccd) + '] ' + tranfile[hdu]
                message += ' \"\" \"\" xshift=' + \
                    str((xsh[ccd] + (2 - ccd) * dxshift) / xbin) + ' '
                message += 'yshift=' + \
                    str(ysh[ccd] / ybin) + ' xrotation=' + str(xrot[ccd]) + ' '
                message += 'yrotation=' + \
                    str(yrot[ccd]) + ' xmag=1 ymag=1 xmin=\'INDEF\''
                message += 'xmax=\'INDEF\' ymin=\'INDEF\' ymax=\'INDEF\' '
                message += 'ncols=\'INDEF\' '
                message += 'nlines=\'INDEF\' verbose=\'no\' '
                message += 'fluxconserve=\'yes\' nxblock=2048 '
                message += 'nyblock=2048 interpolant=\'' + \
                    interp_type + '\' boundary=\'constant\' constant=0'
                log.message(message, with_stdout=verbose)

                yd, xd = tilehdu[ccd].data.shape
                ncols = 'INDEF'  # ncols=xd+abs(xsh[ccd]/xbin)
                nlines = 'INDEF'  # nlines=yd+abs(ysh[ccd]/ybin)
                geo_xshift = xsh[ccd] + (2 - ccd) * dxshift / xbin
                geo_yshift = ysh[ccd] / ybin
                iraf.images.immatch.geotran(tilefile + "[" + str(ccd) + "]",
                                            tranfile[hdu],
                                            "",
                                            "",
                                            xshift=geo_xshift,
                                            yshift=geo_yshift,
                                            xrotation=xrot[ccd],
                                            yrotation=yrot[ccd],
                                            xmag=1, ymag=1, xmin='INDEF',
                                            xmax='INDEF', ymin='INDEF',
                                            ymax='INDEF', ncols=ncols,
                                            nlines=nlines, verbose='no',
                                            fluxconserve='yes', nxblock=2048,
                                            nyblock=2048, interpolant="linear",
                                            boundary="constant", constant=0)
                if varframe:
                    var_infile = tilefile + "[" + str(ccd + nccds) + "]"
                    iraf.images.immatch.geotran(var_infile,
                                                tranfile[hdu + nccds],
                                                "",
                                                "",
                                                xshift=geo_xshift,
                                                yshift=geo_yshift,
                                                xrotation=xrot[ccd],
                                                yrotation=yrot[ccd],
                                                xmag=1, ymag=1, xmin='INDEF',
                                                xmax='INDEF', ymin='INDEF',
                                                ymax='INDEF', ncols=ncols,
                                                nlines=nlines, verbose='no',
                                                fluxconserve='yes',
                                                nxblock=2048, nyblock=2048,
                                                interpolant="linear",
                                                boundary="constant",
                                                constant=0)
                    var2_infile = tilefile + "[" + str(ccd + 2 * nccds) + "]"
                    iraf.images.immatch.geotran(var2_infile,
                                                tranfile[hdu + 2 * nccds],
                                                "",
                                                "",
                                                xshift=geo_xshift,
                                                yshift=geo_yshift,
                                                xrotation=xrot[ccd],
                                                yrotation=yrot[ccd],
                                                xmag=1, ymag=1, xmin='INDEF',
                                                xmax='INDEF', ymin='INDEF',
                                                ymax='INDEF', ncols=ncols,
                                                nlines=nlines, verbose='no',
                                                fluxconserve='yes',
                                                nxblock=2048, nyblock=2048,
                                                interpolant="linear",
                                                boundary="constant",
                                                constant=0)

                # open the file and copy the data to tranhdu
                tstruct = fits.open(tranfile[hdu])
                tranhdu[hdu] = tstruct[0].data
                tstruct.close()
                if varframe:
                    tranhdu[
                        hdu +
                        nccds] = fits.open(
                        tranfile[
                            hdu +
                            nccds])[0].data
                    tranhdu[
                        hdu +
                        2 *
                        nccds] = fits.open(
                        tranfile[
                            hdu +
                            2 *
                            nccds])[0].data

            else:
                log.message(
                    "Transform CCD #%i using dx=%s, dy=%s, rot=%s" %
                    (ccd,
                     xsh[ccd] /
                        2.0,
                        ysh[ccd] /
                        2.0,
                        xrot[ccd]),
                    with_stdout=verbose,
                    with_header=False)
                tranhdu[hdu] = geometric_transform(
                    tilehdu[ccd].data,
                    tran_func,
                    prefilter=False,
                    order=1,
                    extra_arguments=(
                        xsh[ccd] / 2,
                        ysh[ccd] / 2,
                        1,
                        1,
                        xrot[ccd],
                        yrot[ccd]))
                tstruct = fits.PrimaryHDU(tranhdu[hdu])
                tstruct.writeto(tranfile[hdu])
                if varframe:
                    tranhdu[hdu + nccds] = geometric_transform(
                        tilehdu[hdu + 3].data,
                        tran_func,
                        prefilter=False,
                        order=1,
                        extra_arguments=(
                            xsh[ccd] / 2, ysh[ccd] / 2,
                            1, 1,
                            xrot[ccd], yrot[ccd]))
                    tranhdu[hdu + 2 * nccds] = geometric_transform(
                        tilehdu[hdu + 6].data,
                        tran_func,
                        prefilter=False,
                        order=1,
                        extra_arguments=(
                            xsh[ccd] / 2, ysh[ccd] / 2,
                            1, 1,
                            xrot[ccd], yrot[ccd]))

        else:
            log.message(
                "Transform CCD #%i using dx=%s, dy=%s, rot=%s" %
                (ccd, 0, 0, 0), with_stdout=verbose, with_header=False)
            tranhdu[hdu] = tilehdu[ccd].data
            if varframe:
                tranhdu[hdu + nccds] = tilehdu[ccd + nccds].data
                tranhdu[hdu + 2 * nccds] = tilehdu[ccd + 2 * nccds].data

    # open outfile
    if varframe:
        outlist = 4 * [None]
    else:
        outlist = 2 * [None]

    #outlist[0] = struct[0].copy()
    outlist[0] = fits.PrimaryHDU()
    outlist[0].header = struct[0].header

    naxis1 = int(gap / xbin * (nccds - 1))
    naxis2 = 0
    for i in range(1, nccds + 1):
        yw, xw = tranhdu[i].shape
        naxis1 += xw + int(abs(xsh[ccd] / xbin)) + 1
        naxis2 = max(naxis2, yw)
    outdata = numpy.zeros((naxis2, naxis1), numpy.float32)
    outdata.shape = naxis2, naxis1
    if varframe:
        vardata = outdata * 0
        bpmdata = outdata * 0 + 1

    # iterate over CCDs, stich them to produce a full image
    hdu = 0
    totxshift = 0
    for hdu in range(1, nccds + 1):

        # read DATASEC keywords
        ydsec, xdsec = tranhdu[hdu].shape

        # define size and shape of final image
        # tile CCDs to yield mosaiced image
        x1 = int((hdu - 1) * (xdsec + gap / xbin)) + int(totxshift)
        x2 = xdsec + x1
        y1 = int(0)
        y2 = int(ydsec)
        outdata[y1:y2, x1:x2] = tranhdu[hdu]
        totxshift += int(abs(xsh[hdu] / xbin)) + 1
        if varframe:
            vardata[y1:y2, x1:x2] = tranhdu[hdu + nccds]
            bpmdata[y1:y2, x1:x2] = tranhdu[hdu + 2 * nccds]

    # make sure to cover up all the gaps include bad areas
    if varframe:
        baddata = (outdata == 0)
        baddata = nd.maximum_filter(baddata, size=3)
        bpmdata[baddata] = 1
        

    # fill in the gaps if requested
    if fill:
        if varframe:
            outdata = fill_gaps(outdata, 0)
        else:
            outdata = fill_gaps(outdata, 0)

    # add to the file
    outlist[1] = fits.ImageHDU(outdata)
    if varframe:
        outlist[2] = fits.ImageHDU(vardata,name='VAR')
        outlist[3] = fits.ImageHDU(bpmdata,name='BPM')

    # create the image structure
    outstruct = fits.HDUList(outlist)

    # update the head informaation
    # housekeeping keywords
    saltkey.put('NEXTEND', 2, outstruct[0])
    saltkey.new('EXTNAME', 'SCI', 'Extension name', outstruct[1])
    saltkey.new('EXTVER', 1, 'Extension number', outstruct[1])
    if varframe:
        saltkey.new('VAREXT', 2, 'Variance frame extension', outstruct[1])
        saltkey.new('BPMEXT', 3, 'BPM Extension', outstruct[1])

    try:
        saltkey.copy(struct[1], outstruct[1], 'CCDSUM')
    except:
        pass

    # Add keywords associated with geometry
    saltkey.new('SGEOMGAP', gap, 'SALT Chip Gap', outstruct[0])
    c1str = '{:3.2f} {:3.2f} {:3.4f}'.format(xshift[0],
                                     yshift[0],
                                     rotation[0])
    saltkey.new('SGEOM1', c1str, 'SALT Chip 1 Transform', outstruct[0])
    c2str = '{:3.2f} {:3.2f} {:3.4f}'.format(xshift[1],
                                     yshift[1],
                                     rotation[1])
    saltkey.new('SGEOM2', c2str, 'SALT Chip 2 Transform', outstruct[0])

    # WCS keywords
    saltkey.new('CRPIX1', 0, 'WCS: X reference pixel', outstruct[1])
    saltkey.new('CRPIX2', 0, 'WCS: Y reference pixel', outstruct[1])
    saltkey.new(
        'CRVAL1',
        float(xbin),
        'WCS: X reference coordinate value',
        outstruct[1])
    saltkey.new(
        'CRVAL2',
        float(ybin),
        'WCS: Y reference coordinate value',
        outstruct[1])
    saltkey.new('CDELT1', float(xbin), 'WCS: X pixel size', outstruct[1])
    saltkey.new('CDELT2', float(ybin), 'WCS: Y pixel size', outstruct[1])
    saltkey.new('CTYPE1', 'pixel', 'X type', outstruct[1])
    saltkey.new('CTYPE2', 'pixel', 'Y type', outstruct[1])

    # cleanup temporary files
    if cleanup:
        for tfile in tranfile:
            if os.path.isfile(tfile):
                saltio.delete(tfile)
        if os.path.isfile(tilefile):
            status = saltio.delete(tilefile)

    # return the file
    return outstruct
示例#58
0
def bias(struct,subover=True,trim=True, subbias=False, bstruct=None, 
         median=False, function='polynomial',order=3,rej_lo=3,rej_hi=3,niter=10,
         plotover=False, log=None, verbose=True):
   """Bias subtracts the bias levels from a frame.  It will fit and subtract the overscan
      region, trim the images, and subtract a master bias if required.

      struct--image structure 
      subover--subtract the overscan region
      trim--trim the image
      subbias--subtract master bias
      bstruct--master bias image structure
      median--use the median instead of mean in image statistics
      function--form to fit to the overscan region
      order--order for the function
      rej_lo--sigma  of low points to reject in the fit
      rej_hi--sigma of high points to reject in the fit
      niter--number of iterations
      log--saltio log for recording information
      verbose--whether to print to stdout 
   """
   infile=saltkey.getimagename(struct[0])

   # how many extensions?
   nsciext = saltkey.get('NSCIEXT',struct[0])
   nextend = saltkey.get('NEXTEND',struct[0])
   nccd = saltkey.get('NCCDS',struct[0])

   # how many amplifiers?--this is hard wired
   amplifiers = 2 * nccd


   #log the process
   if subover and log:
        message = '%28s %7s %5s %4s %6s' % \
            ('HDU','Overscan','Order','RMS','Niter')
        log.message('\n     --------------------------------------------------', 
                   with_header=False, with_stdout=verbose)
        log.message(message, with_header=False, with_stdout=verbose)
        log.message('     --------------------------------------------------', 
                   with_header=False, with_stdout=verbose)

   if (plotover): 
       plt.figure(1)
       plt.axes([0.1,0.1,0.8,0.8])
       plt.xlabel('CCD Column')
       plt.ylabel('Pixel Counts (e-)')
       plt.ion()


   #loop through the extensions and subtract the bias
   for i in range(1,nsciext+1):
     if struct[i].name=='SCI':

       #get the bias section
       biassec = saltkey.get('BIASSEC',struct[i])
       y1,y2,x1,x2 = saltio.getSection(biassec, iraf_format=True)
       #get the data section
       datasec = saltkey.get('DATASEC',struct[i])
       dy1,dy2, dx1, dx2 = saltio.getSection(datasec, iraf_format=True)
  
       #setup the overscan region
       if subover:
           yarr=np.arange(y1,y2, dtype=float)
           data=struct[i].data
           odata=struct[i].data[y1:y2,x1:x2]
           if median:
              odata=np.median((struct[i].data[y1:y2,x1:x2]),axis=1)
              olevel=np.median((struct[i].data[y1:y2,x1:x2]))
              saltkey.new('OVERSCAN','%f' % (olevel),'Overscan median value', struct[i])
           else:
              odata=np.mean((struct[i].data[y1:y2,x1:x2]),axis=1)
              olevel=np.mean((struct[i].data[y1:y2,x1:x2]))
              saltkey.new('OVERSCAN','%f' % (olevel),'Overscan mean value', struct[i])

           #fit the overscan region
           ifit=saltfit.interfit(yarr, odata, function=function, \
                                 order=order, thresh=rej_hi, niter=niter)
           try:
               ifit.interfit()
               coeffs=ifit.coef
               ofit=ifit(yarr)
               omean, omed, osigma=saltstat.iterstat((odata-ofit), sig=3, niter=5)
           except ValueError:
               #catch the error if it is a zero array
               ofit=np.array(yarr)*0.0
               osigma=0.0
           except TypeError:
               #catch the error if it is a zero array
               ofit=np.array(yarr)*0.0
               osigma=0.0

           #if it hasn't been already, convert image to
           #double format
           struct[i].data = 1.0 * struct[i].data
           try:
               struct[i].header.remove('BZERO')
               struct[i].header.remove('BSCALE')
           except:
               pass


           #subtract the overscan region
           for j in range(len(struct[i].data[0])):
               struct[i].data[y1:y2,j] -= ofit

           #report the information 
           if log:
                message = '%25s[%1d] %8.2f %3d %7.2f %3d' % \
                    (infile, i, olevel, order, osigma, niter)
                log.message(message, with_stdout=verbose, with_header=False)

           #add the statistics to the image header
           saltkey.new('OVERRMS','%f' % (osigma),'Overscan RMS value', struct[i])

           #update the variance frame
           if saltkey.found('VAREXT', struct[i]):
               vhdu=saltkey.get('VAREXT', struct[i])
               try:
                   vdata=struct[vhdu].data
                   #The bias level should not be included in the noise from the signal
                   for j in range(len(struct[i].data[0])):
		       vdata[y1:y2,j] -= ofit
                   #add a bit to make sure that the minimum error is the rednoise
                   rdnoise= saltkey.get('RDNOISE',struct[i])
                   vdata[vdata<rdnoise**2]=rdnoise**2
                   struct[vhdu].data=vdata+osigma**2

               except Exception, e:
                    msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e)
                    raise SaltError(msg)


           #plot the overscan region
           if plotover:  
              plt.plot(yarr, odata)
              plt.plot(yarr, ofit)

       #trim the data and update the headers
       if trim:
           struct[i].data=struct[i].data[dy1:dy2,dx1:dx2]
           datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
           saltkey.put('DATASEC',datasec,struct[i])

           #update the variance frame
           if saltkey.found('VAREXT', struct[i]):
               vhdu=saltkey.get('VAREXT', struct[i])
               struct[vhdu].data=struct[vhdu].data[dy1:dy2,dx1:dx2]
               datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
               saltkey.put('DATASEC',datasec,struct[vhdu])
           #update the BPM frame
           if saltkey.found('BPMEXT', struct[i]):
               bhdu=saltkey.get('BPMEXT', struct[i])
               struct[bhdu].data=struct[bhdu].data[dy1:dy2,dx1:dx2]
               datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
               saltkey.put('DATASEC',datasec,struct[bhdu])

       #subtract the master bias if necessary
       if subbias and bstruct:
           struct[i].data -= bstruct[i].data

           #update the variance frame
           if saltkey.found('VAREXT', struct[i]):
               vhdu=saltkey.get('VAREXT', struct[i])
               try:
                   vdata=struct[vhdu].data
                   struct[vhdu].data=vdata+bstruct[vhdu].data
               except Exception, e:
                    msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e)
                    raise SaltError(msg)
示例#59
0
def specslit(image,
             outimage,
             outpref,
             exttype='auto',
             slitfile='',
             outputslitfile='',
             regprefix='',
             sections=3,
             width=25,
             sigma=2.2,
             thres=6,
             order=3,
             padding=5,
             yoffset=0,
             inter=False,
             clobber=True,
             logfile='salt.log',
             verbose=True):

    with logging(logfile, debug) as log:

        # check all the input and make sure that all the input needed is provided
        # by the user

        # read the image or image list and check if each in the list exist
        infiles = saltio.argunpack('Input', image)

        # unpack the outfiles
        outfiles = saltio.listparse('Outimages', outimage, outpref, infiles,
                                    '')

        # from the extraction type, check whether the input file is specified.
        # if the slitfile parameter is specified then use the slit files for
        # the extraction. if the extraction type is auto then use image for the
        # detection and the slit extraction

        if exttype == 'rsmt' or exttype == 'fits' or exttype == 'ascii' or exttype == 'ds9':
            slitfiles = saltio.argunpack('Slitfile', slitfile)
            if len(slitfiles) == 1:
                slitfiles = slitfiles * len(infiles)
            saltio.comparelists(infiles, slitfiles, 'image', 'slitfile')
        elif exttype == 'auto':
            slitfiles = infiles
            log.message(
                'Extraction type is AUTO. Slit detection will be done from image'
            )

        # read in if an optional ascii file is requested
        if len(outputslitfile) > 0:
            outslitfiles = saltio.argunpack('Outslitfiles', outputslitfile)
            saltio.comparelists(infiles, outslitfiles, 'image',
                                'outputslitfile')
        else:
            outslitfiles = [''] * len(infiles)

        # check if the width and sigma parameters were specified.
        # default is 25 and 2.2
        if width < 10.:
            msg = 'The width parameter needs be a value larger than 10'
            raise SALTSpecError(msg)

        if sigma < 0.0:
            msg = 'Sigma must be greater than zero'
            raise SaltSpecError(msg)

        # check the treshold parameter. this needs to be specified by the user
        if thres <= 0.0:
            msg = 'Threshold must be greater than zero'
            raise SaltSpecError(msg)

        # check to make sure that the sections are greater than the order
        if sections <= order:
            msg = 'Number of sections must be greater than the order for the spline fit'
            raise SaltSpecError(msg)

        # run through each of the images and extract the slits
        for img, oimg, sfile, oslit in zip(infiles, outfiles, slitfiles,
                                           outslitfiles):
            log.message('Proccessing image %s' % img)

            # open the image
            struct = saltio.openfits(img)
            ylen, xlen = struct[1].data.shape
            xbin, ybin = saltkey.ccdbin(struct[0], img)
            # setup the VARIANCE and BPM frames
            if saltkey.found('VAREXT', struct[1]):
                varext = saltkey.get('VAREXT', struct[1])
                varlist = []
            else:
                varext = None

            # setup the BPM frames
            if saltkey.found('BPMEXT', struct[1]):
                bpmext = saltkey.get('BPMEXT', struct[1])
                bpmlist = []
            else:
                bpmext = None

            # open the slit definition file or identify the slits in the image
            slitmask = None
            ycheck = False
            if exttype == 'rsmt':
                log.message('Using slits from %s' % sfile)
                if yoffset is None:
                    yoffset = 0
                    ycheck = True
                slitmask = mt.read_slitmask_from_xml(sfile)
                xpos = -0.3066
                ypos = 0.0117
                cx = int(xlen / 2.0)
                cy = int(ylen / 2.0) + ypos / 0.015 / ybin + yoffset
                order, slit_positions = mt.convert_slits_from_mask(
                    slitmask,
                    order=1,
                    xbin=xbin,
                    ybin=ybin,
                    pix_scale=0.1267,
                    cx=cx,
                    cy=cy)
                sections = 1
            elif exttype == 'fits':
                log.message('Using slits from %s' % sfile)
                order, slit_positions = read_slits_from_fits(sfile)
            elif exttype == 'ascii':
                log.message('Using slits from %s' % sfile)
                order, slit_positions = mt.read_slits_from_ascii(sfile)
            elif exttype == 'ds9':
                log.message('Using slits from %s' % sfile)
                order, slit_positions, slitmask = mt.read_slits_from_ds9(
                    sfile, order=order)
                slitmask = None
                sections = 1
            elif exttype == 'auto':
                log.message('Identifying slits in %s' % img)
                # identify the slits in the image
                order, slit_positions = identify_slits(struct[1].data, order,
                                                       sections, width, sigma,
                                                       thres)

                # write out the slit identifications if ofile has been supplied
                if oslit:
                    log.message('Writing slit positions to %s' % oslit)
                    mt.write_outputslitfile(slit_positions, oslit, order)

            if ycheck:
                slit_positions, dy = check_ypos(slit_positions, struct[1].data)
                log.message('Using an offset of {}'.format(dy))

            # extract the slits
            spline_x = mt.divide_image(struct[1].data, sections)
            spline_x = 0.5 * (np.array(spline_x[:-1]) + np.array(spline_x[1:]))
            extracted_spectra, spline_positions = mt.extract_slits(
                slit_positions,
                spline_x,
                struct[1].data,
                order=order,
                padding=padding)
            if varext:
                extracted_var, var_positions = mt.extract_slits(
                    slit_positions,
                    spline_x,
                    struct[varext].data,
                    order=order,
                    padding=padding)
            if bpmext:
                extracted_bpm, bpm_positions = mt.extract_slits(
                    slit_positions,
                    spline_x,
                    struct[bpmext].data,
                    order=order,
                    padding=padding)

            # write out the data to the new array
            # create the new file
            hdulist = fits.HDUList([struct[0]])

            # log the extracted spectra if needed
            log.message('', with_stdout=verbose)

            # setup output ds9 file
            if regprefix:
                regout = open(
                    regprefix + os.path.basename(img).strip('.fits') + '.reg',
                    'w')
                regout.write('# Region file format: DS9 version 4.1\n')
                regout.write('# Filename: %s\n' % img)
                regout.write(
                    'global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\nphysical\n'
                )

            # add each
            imglist = []
            nslits = len(spline_positions)
            for i in range(nslits):
                y1 = spline_positions[i][0].min()
                y2 = spline_positions[i][1].max()
                msg = 'Extracted Spectra %i between %i to %i' % (i + 1, y1, y2)
                # log.message(msg, with_header=False, with_stdout=verbose)
                sdu = fits.ImageHDU(extracted_spectra[i],
                                    header=struct[1].header)
                if varext:
                    vdu = fits.ImageHDU(extracted_var[i],
                                        header=struct[varext].header)
                    sdu.header['VAREXT'] = i + nslits + 1
                    varlist.append(vdu)
                if bpmext:
                    bdu = fits.ImageHDU(extracted_bpm[i],
                                        header=struct[bpmext].header)
                    sdu.header['BPMEXT'] = i + 2 * nslits + 1
                    bpmlist.append(bdu)
                imglist.append(sdu)

                # add in some additional keywords
                imglist[i].header['MINY'] = (y1,
                                             'Lower Y value in original image')
                imglist[i].header['MAXY'] = (y2,
                                             'Upper Y value in original image')
                if regprefix:
                    xsize = struct[1].data.shape[1]
                    xsize = int(0.5 * xsize)
                    rtext = ''
                    if slitmask:
                        # rtext='%s, %8.7f, %8.7f, %3.2f' % (slitmask.slitlets.data[i]['name'], slitmask.slitlets.data[i]['targ_ra'], slitmask.slitlets.data[i]['targ_dec'], slitmask.slitlets.data[i]['slit_width'])
                        pass
                    regout.write('box(%i,%i, %i, %i) #text={%s}\n' %
                                 (xsize, 0.5 *
                                  (y1 + y2), 2 * xsize, y2 - y1, rtext))

                # add slit information
                if slitmask:
                    imglist[i].header['SLITNAME'] = (
                        slitmask.slitlets.data[i]['name'], 'Slit Name')
                    imglist[i].header['SLIT_RA'] = (
                        slitmask.slitlets.data[i]['targ_ra'], 'Slit RA')
                    imglist[i].header['SLIT_DEC'] = (
                        slitmask.slitlets.data[i]['targ_dec'], 'Slit DEC')
                    imglist[i].header['SLIT'] = (
                        slitmask.slitlets.data[i]['slit_width'], 'Slit Width')

            # add to the hdulist
            hdulist += imglist
            if varext:
                hdulist += varlist
            if bpmext:
                hdulist += bpmlist

            # write the slit positions to the header
            # create the binary table HDU that contains the split positions
            tbhdu = mt.slits_HDUtable(slit_positions, order)
            bintable_hdr = tbhdu.header

            # add the extname parameter to the extension
            tbhdu.header['EXTNAME'] = 'BINTABLE'

            # add the extname parameter to the extension
            hdulist[0].header['SLITEXT'] = len(hdulist)
            hdulist.append(tbhdu)

            # add addition header information about the mask
            if slitmask:
                hdulist[0].header['MASKNAME'] = (slitmask.mask_name,
                                                 'SlitMask Name')
                hdulist[0].header['MASK_RA'] = (slitmask.center_ra,
                                                'SlitMask RA')
                hdulist[0].header['MASK_DEC'] = (slitmask.center_dec,
                                                 'SlitMask DEC')
                hdulist[0].header['MASK_PA'] = (slitmask.position_angle,
                                                'SlitMask Position Angle')

            # write out the image
            saltio.writefits(hdulist, oimg, clobber)