def write_new_table(self, fname):
     cols = list(self.get_columns())
     cols.extend(self.get_fiber_positions_columns())
     
     # Create the table HDU
     tablehdu = pf.new_table(cols)
     
     # Create an AstroData object to contain the table
     # and write to disk.
     new_ad = AstroData(tablehdu)
     new_ad.rename_ext('SCI', 1)
     new_ad.write(fname, clobber=True)
示例#2
0
    def as_astrodata(self):
        """
            
          With each cut object in the cut_list having the SCI,DQ,VAR set,
          form an hdu and append it to adout.  Update keywords EXTNAME= 'SCI', 
          EXTVER=<footprint#>, CCDSEC, DISPAXIS, CUTSECT, CUTORDER in the header
          and reset WCS information if there was a WCS in the input AD header.

          ::

           Input:
              self.cut_list: List of Cut objects.
              self.adout:    Output AD object with MDF and
                             TRACEFP extensions.
           Output:
              adout: contains the appended HDUs.
        """

        adout = self._init_as_astrodata()

        ad = self.ad
        scihdr =        ad['SCI',1].header.copy()
        if self.has_dq:
            dqheader =  ad['DQ', 1].header.copy()
        if self.has_var:
            varheader = ad['VAR',1].header.copy()

        # Update NSCIEXT keyword to represent the current number of cuts.
        if new_pyfits_version:
            adout.phu.header.update = adout.phu.header.set
        adout.phu.header.update('NSCIEXT',len(self.cut_list)) 

        # This is a function renaming when using Pyfits 3.1
        if new_pyfits_version:
            scihdr.update = scihdr.set
        extver = 1

        # Generate the cuts using the region's sci_cut,var_cut and
        # dq_cut
        for region,sci_cut,var_cut,dq_cut in self.cut_list: 
            rx1,rx2,ry1,ry2 = np.asarray(region) + 1   # To 1-based
            csec = '[%d:%d,%d:%d]'%(rx1,rx2,ry1,ry2)
            scihdr.update('NSCUTSEC',csec,
                          comment="Region extracted by 'cut_footprints'")
            scihdr.update('NSCUTSPC',extver,comment="Spectral order")
            form_extn_wcs(scihdr, self.wcs, region)
            new_sci_ext = AstroData(data=sci_cut,header=scihdr)
            new_sci_ext.rename_ext(name='SCI',ver=extver)
            adout.append(new_sci_ext)
            if self.has_dq:
                new_dq_ext = AstroData(data=dq_cut, header=dqheader)
                new_dq_ext.rename_ext(name='DQ',ver=extver)
                adout.append(new_dq_ext)
            if self.has_var:
                new_var_ext = AstroData(data=var_cut, header=varheader)
                new_var_ext.rename_ext(name='VAR',ver=extver)
                adout.append(new_var_ext)
            extver += 1

        return adout
示例#3
0
    def as_bintable(self):
        """
        Creates a BINTABLE object from the 
        FootprintTrace object.
        
        Input:
           self.footprints: list of Footprint objects.

        Output:
           AD: HDU astrodata object with a TRACEFP bintable extension.

        **Column discription**
        
        ::

         'id'       : integer reference number for footprint.
         'region'   : (x1,x2,y1,y2), window of pixel co-ords enclosing this
                      footprint. The origin of these coordinates could be 
                      the lower left of the original image.
         'range1'   : (x1,x2,y1,y2), range where edge_1 is valid.
                      The origin of these coordinates is the lower left of the
                      original image.
         'function1': Fit function name (default: polynomial) fitting edge_1.
         'coeff1'   : Arrray of coefficients, high to low order, such that
                      pol(x) = c1*x**2 + c2*x + c3   (for order 2).
         'order1'   : Order or polynomial (default: 2).
         'range2'   : ditto for edge_2.
         'function2': ditto for edges_2
         'coeff2'   : ditto for edges_2
         'order2'   : ditto for edges_2

         'cutrange1'   : (x1,x2,y1,y2), range where edge_1 is valid.
                         The origin of these coordinates is the lower left
                         of the cutout region.
         'cutfunction1': Fit function name (default: polynomial).
         'cutcoeff1'   : Arrray of coefficients, high to low order, such that
                         pol(x) = c1*x**2 + c2*x + c3   (for order 2)
         'cutorder1'   : Order or polynomial (default: 2).
         'cutrange2'   : ditto for edge_2
         'cutfunction2': ditto for edge_2
         'cutcoeff2'   : ditto for edge_2
         'cutorder2'   : ditto for edge_2

        """
        footprints = self.footprints

        # Get n_coeffs'. We are assuming they are the same for all edges.
        n_coeff = len(footprints[0].edges[0].coefficients)
        c1 = pf.Column (name='id',format='J')
        c2 = pf.Column (name='region',format='4E')
        c3 = pf.Column (name='range1',format='4E')
        c4 = pf.Column (name='function1',format='15A')
        c5 = pf.Column (name='order1',format='J')
        c6 = pf.Column (name='coeff1',format='%dE'%n_coeff)

        c7 = pf.Column (name='range2',format='4E')
        c8 = pf.Column (name='function2',format='15A')
        c9 = pf.Column (name='order2',format='J')
        c10 = pf.Column (name='coeff2',format='%dE'%n_coeff)

        c11 = pf.Column (name='cutrange1',format='4E')
        c12 = pf.Column (name='cutfunction1',format='15A')
        c13 = pf.Column (name='cutorder1',format='J')
        c14 = pf.Column (name='cutcoeff1',format='%dE'%n_coeff)

        c15 = pf.Column (name='cutrange2',format='4E')
        c16 = pf.Column (name='cutfunction2',format='15A')
        c17 = pf.Column (name='cutorder2',format='J')
        c18 = pf.Column (name='cutcoeff2',format='%dE'%n_coeff)

        nrows = len(footprints)
        tbhdu = pf.new_table(pf.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,\
                            c11,c12,c13,c14,c15,c16,c17,c18]),nrows=nrows)
        tb = tbhdu    # an alias

        # Write data to table columns

        orientation = footprints[0].edges[0].orientation
        for k,footprint in enumerate(footprints):
            edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
            tb.data.field('id')[k]        = footprint.id
            tb.data.field('region')[k]    = np.asarray(footprint.region)

            # EGDE_1 DATA with respect to original image co-ords
            range1 = np.asarray(edge1.xlim+edge1.ylim)  # (x1, x2, y1, y2)
            tb.data.field('range1')[k]    = range1
            tb.data.field('function1')[k] = edge1.function
            tb.data.field('order1')[k]    = edge1.order
            tb.data.field('coeff1')[k]    = edge1.coefficients

            # EGDE_2 DATA with respect to original image co-ords
            range2 = np.asarray(edge2.xlim+edge2.ylim)  # (x1, x2, y1, y2)
            tb.data.field('range2')[k]    = range2
            tb.data.field('function2')[k] = edge2.function
            tb.data.field('order2')[k]    = edge2.order
            tb.data.field('coeff2')[k]    = edge2.coefficients

            region_x1 = footprint.region[0]
            region_y1 = footprint.region[2]
            # Setup the coefficient of the edge fit functions. We are
            # shifting the origin; so refit
            lcoeff=[]
            zval=[]
            for xx,yy in [edge1.trace,edge2.trace]:
                # We need to refit inside the cutregion
                xmr = xx - region_x1
                ymr = yy - region_y1
                if orientation == 0:
                    z = gfit.Gfit(xmr,ymr,edge1.function,edge1.order) 
                else:
                    z = gfit.Gfit(ymr,xmr,edge1.function,edge1.order) 
                lcoeff.append(z.coeff)
                zval.append(z)

            xlim1 = np.asarray(edge1.xlim)
            ylim1 = np.asarray(edge1.ylim)
            xlim2 = np.asarray(edge2.xlim)
            ylim2 = np.asarray(edge2.ylim)

            # Get the maximum values from both edges, so we can zero
            # the areas outside the footprint when cutting.
            #
            if orientation == 0:
                # Choose the largest x between both edges. 
                xmax = max(xlim1[1],xlim2[1])
                xlim1[1] = xmax
                xlim2[1] = xmax
                x1,x2 = (min(0,xlim1[0]),xmax)
                # And reevaluate the y values at this xmax
                y1 = ylim1[0] - region_y1
                y2 = zval[1](xmax)[0]
            else:
                # Choose the largest y between both edges
                ymax = max(ylim1[1],ylim2[1])
                ylim1[1] = ymax
                ylim2[1] = ymax
                y1,y2 = (min(0,ylim1[0]),ymax)
                # And reevaluate the x values at this ymax
                x1 = xlim1[0] - region_x1
                x2 = zval[1](ymax)[0] 

            # --- Set edge_1 data with respect to cutout image co-ords.
            tb.data.field('cutrange1')[k]    = (x1,x2,y1,y2)
            tb.data.field('cutfunction1')[k] = edge1.function
            tb.data.field('cutorder1')[k]    = edge1.order
            tb.data.field('cutcoeff1')[k]    = lcoeff[0]


            # --- Set edge_2 data with respect to cutout image co-ords
            # Applied offsets to range2 from footprint.region(x1,y1) 
            tb.data.field('cutrange2')[k]    = (x1,x2,y1,y2)
            tb.data.field('cutfunction2')[k] = edge2.function
            tb.data.field('cutorder2')[k]    = edge2.order
            tb.data.field('cutcoeff2')[k]    = lcoeff[1]

        # Add comment to TTYPE card
        hdr = tb.header
        if new_pyfits_version:
            hdr.update = hdr.set
        hdr.update('TTYPE2',hdr['TTYPE2'],
                  comment='(x1,y1,x2,y2): footprint window of pixel co-ords.')
        hdr.update('TTYPE3',hdr['TTYPE3'], comment='type of fitting function.')
        hdr.update('TTYPE4',hdr['TTYPE4'], comment='Number of coefficients.')
        hdr.update('TTYPE5',hdr['TTYPE5'], 
             comment='Coeff array: c[0]*x**3 + c[1]*x**2+ c[2]*x+c[3]')
        hdr.update('TTYPE6',hdr['TTYPE6'], 
             comment='(x1,y1,x2,y2): Edge fit window definition.')
        tb.header = hdr

        # Create an AD object with this
        tabad = AstroData(tbhdu)
        tabad.rename_ext("TRACEFP", 1)

        return tabad
示例#4
0
    def merge_catalogs(self, ref_wcs, tile, merge_extvers, tab_extname, 
                        recalculate_xy='wcs',transform_pars=None):
        """
          This function merges together separate bintable extensions (tab_extname), 
          converts the pixel coordinates to the reference extension WCS
          and remove duplicate entries based on RA and DEC. 

          NOTE: Names used here so far: *OBJCAT:* Object catalog extension name

          *Input:*

          :param ref_wcs: Pywcs object containing the WCS from the output header.
          :param merge_extvers: List of extvers to merge from the tab_extname
          :param tab_extname: Binary table extension name to be merge over all
                         its ext_ver's.
          :param transform_pars: Dictionary  with rotation angle, translation
                                 and magnification.
          :param recalculate_xy: Use reference extension WCS to recalculate the
                                 pixel coordinates. If value is 'transform' use 
                                 the tranformation linear equations.
          :type recalculate_xy: (string, default: 'wcs'). 
	           Allow values: ('wcs', 'transform')
                        
          Note
          ----
             For 'transform' mode this are the
             linear equations to use.

             X_out = X*mx*cosA - Y*mx*sinA + mx*tx
             Y_out = X*my*sinA + Y*my*cosA + my*ty

             mx,my: magnification factors.
             tx,ty: translation amount in pixels.
             A: Angle in radians.
        """

        column_names = self.column_names
        adoutput_list = []
           
        col_names = None
        col_fmts = None
        col_data = {}      # Dictionary to hold column data from all extensions
        newdata = {}
        
        # Get column names from column_names dictionary
        # EXAMPLE:
        #   column_names = 
        #      {'OBJCAT': ('X_IMAGE', 'Y_IMAGE', 'X_WORLD', 'Y_WORLD'),
        #      'REFCAT': (None, None, 'RAJ2000', 'DEJ2000') }


        for key in column_names:
            if key == tab_extname:
               Xcolname, Ycolname = column_names[key][:2]
               ra_colname, dec_colname = column_names[key][2:4]

        # Get catalog data for the extension numbers in merge_extvers list.
        do_transform = (recalculate_xy == 'transform') and (Xcolname != None)
        if do_transform:
            dict = self.data_index_per_block
            nbx,nby=self.geometry.mosaic_grid
        for extv in merge_extvers:
        
            inp_catalog = self.ad[tab_extname,extv]

            # Make sure there is data. 
            if inp_catalog is None:
                continue
            if inp_catalog.data is None:
                continue
            if len(inp_catalog.data)==0:
                continue
            catalog_data = True

            # Get column names and formats for the first extv
            # and copy the data into the dictionary.
            if col_names is None:
                col_names = inp_catalog.data.names
                col_fmts = inp_catalog.data.formats
                # fill out the dictionary
                for name in col_names:
                    col_data[name] = []
                xx=[]; yy=[]
            for name in col_names:
                newdata[name] = inp_catalog.data.field(name)
            # append data from each column to the dictionary. 
            for name in col_names:
                col_data[name] = np.append(col_data[name],newdata[name])

            if do_transform:
                # Get the block tuple where an amplifier (extv) is located.
                block=[k for k, v in dict.iteritems() if extv-1 in v][0]
                if (extv-1) in self.data_index_per_block[block]:
                    # We might have more than one amplifier per block,
                    # so offset all these xx,yy to block's lower left.
                    x1,y1=[self.coords['amp_block_coord'][extv-1][k] for k in [0,2]]
                    # add it to the xx,yy
                    xx = np.append(xx,newdata[Xcolname]+x1)
                    yy = np.append(yy,newdata[Ycolname]+y1)
                    if extv%self._amps_per_block != 0:
                       continue


                # Turn tuples values (col,row) to index
                bindx = block[0]+nbx*block[1]
                nxx,nyy = self._transform_xy(bindx,xx,yy) 

                # Now change the origin of the block's (nxx,nyy) set to the 
                # mosaic lower left. We find the offset of the LF corner
                # by adding the width and the gaps of all the block to 
                # the left of the current block.
                #  

                if tile: gap_mode = 'tile_gaps'
                else:    gap_mode = 'transform_gaps'
                gaps = self.geometry.gap_dict[gap_mode]
                # The block size in pixels.
                blksz_x,blksz_y = self.blocksize
                col,row = block
                # the sum of the gaps to the left of the current block
                sgapx = sum([gaps[k,row][0] for k in range(col+1)])
                # the sum of the gaps below of the current block
                sgapy = sum([gaps[col,k][1] for k in range(row+1)])
                ref_x1 = int(col*blksz_x + sgapx)
                ref_x2 = ref_x1 + blksz_x
                ref_y1 = int(row*blksz_y + sgapy)
                ref_y2 = int(ref_y1 + blksz_y)

                newdata[Xcolname] = nxx+ref_x1
                newdata[Ycolname] = nyy+ref_y1
                xx = []
                yy = []

        # Eliminate possible duplicates values in ra, dec columns
        ra,  raindx  = np.unique(col_data[ra_colname].round(decimals=7),
                        return_index=True)
        dec, decindx = np.unique(col_data[dec_colname].round(decimals=7),
                        return_index=True)

        # Duplicates are those with the same index in raindx and decindx lists.
        # Look for elements with differents indices; to do this we need to sort
        # the lists.
        raindx.sort()
        decindx.sort()

        # See if the 2 arrays have the same length
        ilen = min(len(raindx), len(decindx))

        # Get the indices from the 2 lists of the same size
        v, = np.where(raindx[:ilen] != decindx[:ilen])
        if len(v) > 0:
            # Filter the duplicates
           try:
               for name in col_names:
                   col_data[name] = col_data[name][v]
           except:
               print 'ERRR:',len(v),name

        # Now that we have the catalog data from all extensions in the dictionary,
        # we calculate the new pixel position w/r to the reference WCS.
        # Only an Object table contains X,Y column information. Reference catalog
        # do not.
        #
        if (recalculate_xy == 'wcs') and (Xcolname != None):

            xx = col_data[Xcolname]
            yy = col_data[Ycolname]
            ra = col_data[ra_colname]
            dec = col_data[dec_colname]

            # Get new pixel coordinates for all ra,dec in the dictionary.
            # Use the input wcs object.
            newx,newy = ref_wcs.wcs_sky2pix(ra,dec,1)

            # Update pixel position in the dictionary to the new values.
            col_data[Xcolname] = newx
            col_data[Ycolname] = newy

        # Create columns information
        columns = {}
        table_columns = []
        for name,format in zip(col_names,col_fmts):
            # Let add_catalog auto-number sources
            if name=="NUMBER":
                continue

            # Define pyfits columns
            data = columns.get(name, pf.Column(name=name,format=format,
                            array=col_data[name]))
            table_columns.append(data)

        # Make the output table using pyfits functions
        col_def = pf.ColDefs(table_columns)
        tb_hdu = pf.new_table(col_def)

        # Now make an AD object from this table
        adout = AstroData(tb_hdu)
        adout.rename_ext(tab_extname,1)

        # Append to any other new table we might have
        adoutput_list.append(adout)

        return adoutput_list
    def _calculate_var(self, adinput=None, add_read_noise=False,
                       add_poisson_noise=False):
        """
        The _calculate_var helper function is used to calculate the variance
        and add a variance extension to the single input AstroData object.
        
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Get the gain and the read noise using the appropriate descriptors.
        gain_dv = adinput.gain()
        read_noise_dv = adinput.read_noise()

        # Only check read_noise here as gain descriptor is only used if units
        # are in ADU
        if read_noise_dv.is_none() and add_read_noise:
            # The descriptor functions return None if a value cannot be found
            # and stores the exception info. Re-raise the exception.
            if hasattr(adinput, "exception_info"):
                raise adinput.exception_info
            else:
                raise Errors.InputError("read_noise descriptor "
                                        "returned None...\n%s"
                                        % (read_noise_dv.info()))
            
        # Set the data type of the final variance array
        var_dtype = np.dtype(np.float32)
        
        # Loop over the science extensions in the dataset
        for ext in adinput[SCI]:
            extver = ext.extver()
            bunit  = ext.get_key_value("BUNIT")

            if bunit == "adu":
                # Get the gain value using the appropriate descriptor. The gain
                # is only used if the units are in ADU. Raise if gain is None
                gain = gain_dv.get_value(extver=extver)
                if gain is not None:
                    log.fullinfo("Gain for %s[%s,%d] = %f"
                                 % (adinput.filename, SCI, extver, gain))
                elif add_read_noise or add_poisson_noise:
                    err_msg = ("Gain for %s[%s,%d] is None. Cannot calculate "
                                "variance properly. Setting to zero."
                                % (adinput.filename, SCI, extver))
                    raise Errors.InputError(err_msg)
                
                units = "ADU"
            elif bunit == "electron" or bunit == "electrons":
                units = "electrons"
            else:
                # Perhaps something more sensible should be done here?
                raise Errors.InputError("No units found. Not calculating "
                                        "variance.")
            
            if add_read_noise:
                # Get the read noise value (in units of electrons) using the
                # appropriate descriptor. The read noise is only used if
                # add_read_noise is True
                read_noise = read_noise_dv.get_value(extver=extver)
                if read_noise is not None:
                    log.fullinfo("Read noise for %s[%s,%d] = %f"
                                 % (adinput.filename, SCI, extver, read_noise))
                    
                    # Determine the variance value to use when calculating the
                    # read noise component of the variance.
                    read_noise_var_value = read_noise
                    if units == "ADU":
                        read_noise_var_value = read_noise / gain
                    
                    # Add the read noise component of the variance to a zeros
                    # array that is the same size as the pixel data in the
                    # science extension
                    log.fullinfo("Calculating the read noise component of the "
                                 "variance in %s" % units)
                    var_array_rn = np.add(
                      np.zeros(ext.data.shape), (read_noise_var_value)**2)
                else:
                    logwarning("Read noise for %s[%s,%d] is None. Setting to "
                               "zero" % (adinput.filename, SCI, extver))
                    var_array_rn = np.zeros(ext.data.shape)
                    
            if add_poisson_noise:
                # Determine the variance value to use when calculating the
                # poisson noise component of the variance
                poisson_noise_var_value = ext.data
                if units == "ADU":
                    poisson_noise_var_value = ext.data / gain
                
                # Calculate the poisson noise component of the variance. Set
                # pixels that are less than or equal to zero to zero.
                log.fullinfo("Calculating the poisson noise component of "
                             "the variance in %s" % units)
                var_array_pn = np.where(
                  ext.data > 0, poisson_noise_var_value, 0)
            
            # Create the final variance array
            if add_read_noise and add_poisson_noise:
                var_array_final = np.add(var_array_rn, var_array_pn)
            
            if add_read_noise and not add_poisson_noise:
                var_array_final = var_array_rn
            
            if not add_read_noise and add_poisson_noise:
                var_array_final = var_array_pn
            
            var_array_final = var_array_final.astype(var_dtype)
            
            # If the read noise component and the poisson noise component are
            # calculated and added separately, then a variance extension will
            # already exist in the input AstroData object. In this case, just
            # add this new array to the current variance extension
            if adinput[VAR, extver]:
                
                # If both the read noise component and the poisson noise
                # component have been calculated, don't add to the variance
                # extension
                if add_read_noise and add_poisson_noise:
                    raise Errors.InputError(
                        "Cannot add read noise component and poisson noise "
                        "component to variance extension as the variance "
                        "extension already exists")
                else:
                    log.fullinfo("Combining the newly calculated variance "
                                 "with the current variance extension "
                                 "%s[%s,%d]" % (adinput.filename, VAR, extver))
                    adinput[VAR, extver].data = np.add(
                      adinput[VAR, extver].data,
                      var_array_final).astype(var_dtype)
            else:
                # Create the variance AstroData object
                var = AstroData(data=var_array_final)
                var.rename_ext(VAR, ver=extver)
                var.filename = adinput.filename
                
                # Call the _update_var_header helper function to update the
                # header of the variance extension with some useful keywords
                var = self._update_var_header(sci=ext, var=var, bunit=bunit)
                
                # Append the variance AstroData object to the input AstroData
                # object. 
                log.fullinfo("Adding the [%s,%d] extension to the input "
                             "AstroData object %s" % (VAR, extver,
                                                      adinput.filename))
                adinput.append(moredata=var)
        
        return adinput
示例#6
0
def test_method_rename_ext_2():
    ad = AstroData(TESTFILE)      
    with pytest.raises(SingleHDUMemberExcept):
        ad.rename_ext("FOO")
示例#7
0
def test_method_rename_ext_4():
    ad = AstroData(TESTFILE2)
    ad.rename_ext("FOO", ver=99)
    assert ad.extname() == "FOO"
    assert ad.extver() == 99
    def makeFringeFrame(self,rc):

        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "makeFringeFrame", 
                                 "starting"))

        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Check for at least 3 input frames
        adinput = rc.get_inputs_as_astrodata()
        if len(adinput)<3:
            log.stdinfo('Fewer than 3 frames provided as input. ' +
                        'Not making fringe frame.')

            # Report the empty list to the reduction context
            rc.report_output(adoutput_list)
        
        else:
            rc.run("correctBackgroundToReferenceImage"\
                       "(remove_zero_level=True)")

            # If needed, do a rough median on all frames, subtract,
            # and then redetect to help distinguish sources from fringes
            sub_med = rc["subtract_median_image"]
            if sub_med:
                adinput = rc.get_inputs_as_astrodata()

                # Get data by science extension
                data = {}
                for ad in adinput:
                    for sciext in ad["SCI"]:
                        key = (sciext.extname(),sciext.extver())
                        if data.has_key(key):
                            data[key].append(sciext.data)
                        else:
                            data[key] = [sciext.data]


                # Make a median image for each extension
                import pyfits as pf
                median_ad = AstroData()
                median_ad.filename = gt.filename_updater(
                    adinput=adinput[0], suffix="_stack_median", strip=True)
                for key in data:
                    med_data = np.median(np.dstack(data[key]),axis=2)
                    hdr = pf.Header()
                    ext = AstroData(data=med_data, header=hdr)
                    ext.rename_ext(key)
                    median_ad.append(ext)

                # Subtract the median image
                rc["operand"] = median_ad
                rc.run("subtract")

                # Redetect to get a good object mask
                rc.run("detectSources")

                # Add the median image back in to the input
                rc.run("add")

            # Add the object mask into the DQ plane
            rc.run("addObjectMaskToDQ")
            
            # Stack frames with masking from DQ plane
            rc.run("stackFrames(operation=%s)" % rc["operation"])

        yield rc
    def addReferenceCatalog(self, rc):
        """
        The reference catalog is a dictionary in jhk_catalog.py


        Append the catalog as a FITS table with extenstion name
        'REFCAT', containing the following columns:

        - 'Id'       : Unique ID. Simple running number
        - 'Name'     : SDSS catalog source name
        - 'RAJ2000'  : RA as J2000 decimal degrees
        - 'DEJ2000'  : Dec as J2000 decimal degrees
        - 'J'     : SDSS u band magnitude
        - 'e_umag'   : SDSS u band magnitude error estimage
        - 'H'     : SDSS g band magnitude
        - 'e_gmag'   : SDSS g band magnitude error estimage
        - 'rmag'     : SDSS r band magnitude
        - 'e_rmag'   : SDSS r band magnitude error estimage
        - 'K'     : SDSS i band magnitude
        - 'e_imag'   : SDSS i band magnitude error estimage

        :param source: Source catalog to query. This used as the catalog
                       name on the vizier server
        :type source: string

        :param radius: The radius of the cone to query in the catalog, 
                       in degrees. Default is 4 arcmin
        :type radius: float
        """

        import pyfits as pf

        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "addReferenceCatalog", "starting"))

        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["addReferenceCatalog"]

        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Get the necessary parameters from the RC
        source = rc["source"]
        radius = rc["radius"]

        # Get Local JHK catalog as a dictionary

        jhk = Lookups.get_lookup_table("Gemini/NIRI/jhk_catalog", "jhk") 

        #form arrays with input dict 
        ra=[]; dec=[]; vals=[]
        for key in jhk.keys():    
            ra.append(key[0])
            dec.append(key[1])
            vals.append(jhk[key])
        # sort in ra
        order = np.argsort(ra)
        ra,dec = map(np.asarray, (ra,dec))
        ra = ra[order]
        dec = dec[order]
        vals = [vals[k] for k in order]
        # Get the magnitudes and errs from each record (j,je,h,he,k,ke,name)
        vals = np.asarray([vals[k][:6] for k in range(len(ra))])
        # Separate mags into J,H,K mags arrays for clarity
        irmag={}
        irmag['Jmag']=     vals[:,0]
        irmag['Jmag_err']= vals[:,1]
        irmag['Hmag']=     vals[:,2]
        irmag['Hmag_err']= vals[:,3]
        irmag['Kmag']=     vals[:,4]
        irmag['Kmag_err']= vals[:,5]

        #print 'JMAG00:',[(irmag['Jmag'][i],irmag['Jmag_err'][i]) 
        #                for i in range(5)]

        # Loop over each input AstroData object in the input list
        adinput = rc.get_inputs_as_astrodata()
        for ad in adinput:

            try:
                input_ra = ad.ra().as_pytype()
                input_dec = ad.dec().as_pytype()
            except:
                if "qa" in rc.context:
                    log.warning("No RA/Dec in header of %s; cannot find "\
                                "reference sources" % ad.filename)
                    adoutput_list.append(ad)
                    continue
                else:
                    raise

            table_name = 'jhk.tab'
            # Loop through the science extensions
            for sciext in ad['SCI']:
                extver = sciext.extver()

                # Did we get anything?
                if (1): # We do have a dict with ra,dec
                    # Create on table per extension

                    # Create a running id number
                    refid=range(1, len(ra)+1)

                    # Make the pyfits columns and table
                    c1 = pf.Column(name="Id",format="J",array=refid)
                    c3 = pf.Column(name="RAJ2000",format="D",unit="deg",array=ra)
                    c4 = pf.Column(name="DEJ2000",format="D",unit="deg",array=dec)
                    c5 = pf.Column(name="Jmag",format="E",array=irmag['Jmag'])
                    c6 = pf.Column(name="e_Jmag",format="E",array=irmag['Jmag_err'])
                    c7 = pf.Column(name="Hmag",format="E",array=irmag['Hmag'])
                    c8 = pf.Column(name="e_Hmag",format="E",array=irmag['Hmag_err'])
                    c9 = pf.Column(name="Kmag",format="E",array=irmag['Kmag'])
                    c10= pf.Column(name="e_Kmag",format="E",array=irmag['Kmag_err'])
                    col_def = pf.ColDefs([c1,c3,c4,c5,c6,c7,c8,c9,c10])
                    tb_hdu = pf.new_table(col_def)

                    # Add comments to the REFCAT header to describe it.
                    tb_hdu.header.add_comment('Source catalog derived from the %s'
                                         ' catalog on vizier' % table_name)

                    tb_ad = AstroData(tb_hdu)
                    tb_ad.rename_ext('REFCAT', extver)

                    if(ad['REFCAT',extver]):
                        log.fullinfo("Replacing existing REFCAT in %s" % ad.filename)
                        ad.remove(('REFCAT', extver))
                    else:
                        log.fullinfo("Adding REFCAT to %s" % ad.filename)
                    ad.append(tb_ad)

            # Match the object catalog against the reference catalog
            # Update the refid and refmag columns in the object catalog
            if ad.count_exts("OBJCAT")>0:
                ad = _match_objcat_refcat(adinput=ad)[0]
            else:
                log.warning("No OBJCAT found; not matching OBJCAT to REFCAT")

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)

            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                              strip=True)

            # Append the output AstroData object to the list 
            # of output AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)

        yield rc
示例#10
0
def test_method_rename_ext_4():
    ad = AstroData(TESTFILE2)
    ad.rename_ext("FOO", ver=99)
    assert ad.extname() == "FOO"
    assert ad.extver() == 99
示例#11
0
def test_method_rename_ext_5():  # TypeError w/ only 'ver=' param
    ad = AstroData(TESTFILE2)
    with pytest.raises(TypeError):
        ad.rename_ext(ver=2)
示例#12
0
def test_method_rename_ext_3():
    ad = AstroData(TESTFILE2)  # Single 'SCI' ext
    ad.rename_ext("FOO")
    assert ad.extname() == "FOO"
示例#13
0
def test_method_rename_ext_2():
    ad = AstroData(TESTFILE)
    with pytest.raises(SingleHDUMemberExcept):
        ad.rename_ext("FOO")
示例#14
0
def test_method_rename_ext_1():  # Raise on multi-ext
    ad = AstroData(TESTFILE)
    with pytest.raises(SingleHDUMemberExcept):
        ad.rename_ext("SCI", ver=99)
    def getRefs(self):
        """ Run the add reference catalog. Actually adding the 
            Bintable to the input ad object.
 
        """

        from pyfits import Column

        log = self.log

        extname = 'REFCAT'

        outad = self.outad

        # Select catalog and format the output data
        usecols,formats,band,delimiter = self.selStdsCatalog()
        refid,ra,dec,fmag = self.readStds(usecols, formats, delimiter)

        # Loop through the SCI extensions
        for scix in outad['SCI']:

            xtver = scix.extver()

            # x,y are the coordinates of the reference stars within the 
            # input image field.
            g,x,y = self.search4standards(ra, dec, xtver)

            log.info("Found %d standards for field in  %s['SCI',%d]"%\
                (len(g[0]),outad.filename,xtver))

            # g: index array with the index of the standards within the field.
            if len(g[0])>0:
                nlines = len(ra)

                # If extension already exists, just update
                if outad[extname,xtver]:
                    log.info('Table already exists,updating values.')
                    tdata = outad[extname,xtver].data
                    theader = outad[extname, xtver].header
                    tdata.field('refid')[:] = refid[g]
                    tdata.field('ra')[:]    = ra[g]
                    tdata.field('dec')[:]   = dec[g]
                    tdata.field('x')[:]     = x
                    tdata.field('y')[:]     = y
                    tdata.field('refmag')[:]  = fmag[g]
                else:
                    c1 = Column (name='refid', format='22A', array=refid[g])
                    c2 = Column (name='ra',    format='E', array=ra[g])
                    c3 = Column (name='dec',   format='E', array=dec[g])
                    c4 = Column (name='x',     format='E', array=x)
                    c5 = Column (name='y',     format='E', array=y)
                    # band:       1-char:  'u','g','r','i' or 'z'
                    c6 = Column (name='refmag',unit=band,format='E',array=fmag[g])
                    colsdef = pf.ColDefs([c1,c2,c3,c4,c5,c6])

                    tbhdu = pf.new_table(colsdef)         # Creates a BINTABLE

                    # pyfits to AstroData
                    tabad = AstroData(tbhdu)

                    # Add or append keywords EXTNAME, EXTVER
                    tabad.rename_ext(extname, xtver)
                
                    outad.append(tabad)
            else:
                log.warning( 'No standard stars were found for this field.')

        return outad
示例#16
0
    def runDS(self):        
        """ Do the actual object detection.
            - Create the table OBJCAT
            - return the output Astrodata object

        """
        log = self.log
        extname = 'OBJCAT'

        outad = self.outad

        for scix in outad['SCI']:

            xtver = scix.extver()
  
            # Mask the non illuminated regions.
            if outad['BPM',xtver]:
                sdata = scix.data
                bpmdata = outad['BPM',xtver].data
                if bpmdata.shape != sdata.shape:       # bpmdata is already trimmed.
                    try :                            # See if DATASEC is in the header
                        dsec = scix.data_section()
                    except:
                        log.error("*** ERROR: DATASEC not found in SCI header.")
                        log.error("*** Cannot masked SCI: size(BPM) != size(SCI)."+\
                                   " bpmsize: "+str(bpmdata.shape))
                        log.error(" *** SCI data not masked.")
                    else:
                        # bpmdata is already trimmed.
                        s,e = map(int, dsec.split(',')[0][1:].split(':'))
                        #Trim number of columns to match the bpm data.
                        sdata = sdata[:,s-1:e]
                else:
                    bpmdata = np.where(bpmdata==0,1,0)
                    scix.data = sdata*bpmdata
              
            self.findObjects(scix)

            sciHeader = scix.header
            if len(self.x) == 0:
                log.warning( " **** WARNING: No objects were detected: Table OBJCAT, not created")
                continue
       
            wcs = pywcs.WCS(sciHeader)

            # Convert pixel coordinates to world coordinates
            # The second argument is "origin" -- in this case we're declaring we
            # have 1-based (Fortran-like) coordinates.

            xy = np.array(zip(self.x, self.y),np.float32)
            radec = wcs.wcs_pix2sky(xy, 1)

            ra,dec = radec[:,0],radec[:,1]

            nobjs = len(ra)
            log.info("Found %d sources for field in  %s['SCI',%d]"%\
                (nobjs ,outad.filename,xtver))
            
            if outad[extname,xtver]:
                log.info('Table already exists,updating values.')
                tdata = outad[extname, xtver].data
                theader = outad[extname, xtver].header
                tdata.field('id')[:]    = range(len(ra))
                tdata.field('x')[:]     = self.x
                tdata.field('y')[:]     = self.y
                tdata.field('ra')[:]    = ra
                tdata.field('dec')[:]   = dec
                tdata.field('flux')[:]  = self.flux
            else:
                #colsdef = self.define_Table_cols(ra, dec, flux, ellip, fwhm)
                colsdef = self.define_Table_cols(ra, dec, self.flux)
                tbhdu = pf.new_table(colsdef)         # Creates a BINTABLE

                th = tbhdu.header
                
                tabad = AstroData(tbhdu)
                tabad.rename_ext("OBJCAT", xtver)
            
                outad.append(tabad)

        return outad
示例#17
0
def test_method_rename_ext_1():    # Raise on multi-ext 
    ad = AstroData(TESTFILE)
    with pytest.raises(SingleHDUMemberExcept):
        ad.rename_ext("SCI", ver=99)
 def addMDF(self, rc):
     """
     This primitive is used to add an MDF extension to the input AstroData
     object. If only one MDF is provided, that MDF will be add to all input
     AstroData object(s). If more than one MDF is provided, the number of
     MDF AstroData objects must match the number of input AstroData objects.
     If no MDF is provided, the primitive will attempt to determine an
     appropriate MDF.
     
     :param mdf: The file name of the MDF(s) to be added to the input(s)
     :type mdf: string
     
     """
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "addMDF", "starting"))
     
     # Define the keyword to be used for the time stamp for this primitive
     timestamp_key = self.timestamp_keys["addMDF"]
     
     # Initialize the list of output AstroData objects
     adoutput_list = []
     
     # Get the input AstroData objects
     adinput = rc.get_inputs_as_astrodata()
     
     # Loop over each input AstroData object in the input list
     for ad in adinput:
         
         # Check whether the addMDF primitive has been run previously
         if ad.phu_get_key_value(timestamp_key):
             log.warning("No changes will be made to %s, since it has "
                         "already been processed by addMDF" % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check whether the input is spectroscopic data
         if "SPECT" not in ad.types:
             log.stdinfo("%s is not spectroscopic data, so no MDF will be "
                         "added" % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check whether an MDF extension already exists in the input
         # AstroData object
         if ad["MDF"]:
             log.warning("An MDF extension already exists in %s, so no MDF "
                         "will be added" % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Parameters specified on the command line to reduce are converted
         # to strings, including None
         if rc["mdf"] and rc["mdf"] != "None":
             # The user supplied an input to the mdf parameter
             mdf = rc["mdf"]
         else:
             # The user did not supply an input to the mdf parameter, so try
             # to find an appropriate one. Get the dictionary containing the
             # list of MDFs for all instruments and modes.
             all_mdf_dict = Lookups.get_lookup_table("Gemini/MDFDict",
                                                     "mdf_dict")
             
             # The MDFs are keyed by the instrument and the MASKNAME. Get
             # the instrument and the MASKNAME values using the appropriate
             # descriptors 
             instrument = ad.instrument()
             mask_name = ad.phu_get_key_value("MASKNAME")
             
             # Create the key for the lookup table
             if instrument is None or mask_name is None:
                 log.warning("Unable to create the key for the lookup "
                             "table (%s), so no MDF will be added"
                             % ad.exception_info)
                 
                 # Append the input AstroData object to the list of output
                 # AstroData objects without further processing
                 adoutput_list.append(ad)
                 continue
             
             key = "%s_%s" % (instrument, mask_name)
             
             # Get the appropriate MDF from the look up table
             if key in all_mdf_dict:
                 mdf = lookup_path(all_mdf_dict[key])
             else:
                 # The MASKNAME keyword defines the actual name of an MDF
                 if not mask_name.endswith(".fits"):
                     mdf = "%s.fits" % mask_name
                 else:
                     mdf = str(mask_name)
                 
                 # Check if the MDF exists in the current working directory
                 if not os.path.exists(mdf):
                     log.warning("The MDF %s was not found in the current "
                                 "working directory, so no MDF will be "
                                 "added" % mdf)
                 
                 # Append the input AstroData object to the list of output
                 # AstroData objects without further processing
                 adoutput_list.append(ad)
                 continue
         
         # Ensure that the MDFs are AstroData objects
         if not isinstance(mdf, AstroData):
             mdf_ad = AstroData(mdf)
         
         if mdf_ad is None:
             log.warning("Cannot convert %s into an AstroData object, so "
                         "no MDF will be added" % mdf)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check if the MDF is a single extension fits file
         if len(mdf_ad) > 1:
             log.warning("The MDF %s is not a single extension fits file, "
                         "so no MDF will be added" % mdf)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
             
         # Name the extension appropriately
         mdf_ad.rename_ext("MDF", 1)
         
         # Append the MDF AstroData object to the input AstroData object
         log.fullinfo("Adding the MDF %s to the input AstroData object "
                      "%s" % (mdf_ad.filename, ad.filename))
         ad.append(moredata=mdf_ad)
         
         # Add the appropriate time stamps to the PHU
         gt.mark_history(adinput=ad, keyword=timestamp_key)
         
         # Change the filename
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Append the output AstroData object to the list of output
         # AstroData objects
         adoutput_list.append(ad)
     
     # Report the list of output AstroData objects to the reduction context
     rc.report_output(adoutput_list)
     
     yield rc
示例#19
0
def test_method_rename_ext_3():
    ad = AstroData(TESTFILE2)      # Single 'SCI' ext
    ad.rename_ext("FOO")
    assert ad.extname() == "FOO"
    def addDQ(self, rc):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).
        
        :param bpm: The file name, including the full path, of the BPM(s) to be
                    used to flag bad pixels in the DQ extension. If only one
                    BPM is provided, that BPM will be used to flag bad pixels
                    in the DQ extension for all input AstroData object(s). If
                    more than one BPM is provided, the number of BPMs must
                    match the number of input AstroData objects. If no BPM is
                    provided, the primitive will attempt to determine an
                    appropriate BPM.
        :type bpm: string or list of strings
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "addDQ", "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["addDQ"]
        
        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Set the data type of the data quality array
        # It can be uint8 for now, it will get converted up as we assign higher bit values
        # shouldn't need to force it up to 16bpp yet.
        dq_dtype = np.dtype(np.uint8)
        #dq_dtype = np.dtype(np.uint16)
        
        # Get the input AstroData objects
        adinput = rc.get_inputs_as_astrodata()
        
        # Loop over each input AstroData object in the input list
        for ad in adinput:
            
            # Check whether the addDQ primitive has been run previously
            if ad.phu_get_key_value(timestamp_key):
                log.warning("No changes will be made to %s, since it has "
                            "already been processed by addDQ" % ad.filename)
                
                # Append the input AstroData object to the list of output
                # AstroData objects without further processing
                adoutput_list.append(ad)
                continue
            
            # Parameters specified on the command line to reduce are converted
            # to strings, including None
            ##M What about if a user doesn't want to add a BPM at all?
            ##M Are None's not converted to Nonetype from the command line?
            if rc["bpm"] and rc["bpm"] != "None":
                # The user supplied an input to the bpm parameter
                bpm = rc["bpm"]
            else:
                # The user did not supply an input to the bpm parameter, so try
                # to find an appropriate one. Get the dictionary containing the
                # list of BPMs for all instruments and modes.
                all_bpm_dict = Lookups.get_lookup_table("Gemini/BPMDict",
                                                        "bpm_dict")
                
                # Call the _get_bpm_key helper function to get the key for the
                # lookup table 
                key = self._get_bpm_key(ad)
                
                # Get the appropriate BPM from the look up table
                if key in all_bpm_dict:
                    bpm = lookup_path(all_bpm_dict[key])
                else:
                    bpm = None
                    log.warning("No BPM found for %s, no BPM will be "
                                "included" % ad.filename)

            # Ensure that the BPMs are AstroData objects
            bpm_ad = None
            if bpm is not None:
                log.fullinfo("Using %s as BPM" % str(bpm))
                if isinstance(bpm, AstroData):
                    bpm_ad = bpm
                else:
                    bpm_ad = AstroData(bpm)
                    ##M Do we want to fail here depending on context?
                    if bpm_ad is None:
                        log.warning("Cannot convert %s into an AstroData "
                                    "object, no BPM will be added" % bpm)

            final_bpm = None
            if bpm_ad is not None:
                # Clip the BPM data to match the size of the input AstroData
                # object science and pad with overscan region, if necessary
                final_bpm = gt.clip_auxiliary_data(adinput=ad, aux=bpm_ad,
                                                   aux_type="bpm")[0]

            # Get the non-linear level and the saturation level using the
            # appropriate descriptors - Individual values get checked in the
            # next loop 
            non_linear_level_dv = ad.non_linear_level()
            saturation_level_dv = ad.saturation_level()

            # Loop over each science extension in each input AstroData object
            for ext in ad[SCI]:
                
                # Retrieve the extension number for this extension
                extver = ext.extver()
                
                # Check whether an extension with the same name as the DQ
                # AstroData object already exists in the input AstroData object
                if ad[DQ, extver]:
                    log.warning("A [%s,%d] extension already exists in %s"
                                % (DQ, extver, ad.filename))
                    continue
                
                # Get the non-linear level and the saturation level for this
                # extension
                non_linear_level = non_linear_level_dv.get_value(extver=extver)
                saturation_level = saturation_level_dv.get_value(extver=extver)

                # To store individual arrays created for each of the DQ bit
                # types
                dq_bit_arrays = []

                # Create an array that contains pixels that have a value of 2
                # when that pixel is in the non-linear regime in the input
                # science extension
                if non_linear_level is not None:
                    non_linear_array = None
                    if saturation_level is not None:
                        # Test the saturation level against non_linear level
                        # They can be the same or the saturation level can be
                        # greater than but not less than the non-linear level.
                        # If they are the same then only flag saturated pixels
                        # below. This just means not creating an unneccessary
                        # intermediate array.
                        if saturation_level > non_linear_level:
                            log.fullinfo("Flagging pixels in the DQ extension "
                                         "corresponding to non linear pixels "
                                         "in %s[%s,%d] using non linear "
                                         "level = %.2f" % (ad.filename, SCI,
                                                           extver,
                                                           non_linear_level))

                            non_linear_array = np.where(
                                ((ext.data >= non_linear_level) &
                                (ext.data < saturation_level)), 2, 0)
                            
                        elif saturation_level < non_linear_level:
                            log.warning("%s[%s,%d] saturation_level value is"
                                        "less than the non_linear_level not"
                                        "flagging non linear pixels" %
                                        (ad.filname, SCI, extver))
                        else:
                            log.fullinfo("Saturation and non-linear values "
                                         "for %s[%s,%d] are the same. Only "
                                         "flagging saturated pixels."
                                         % (ad.filename, SCI, extver))
                            
                    else:
                        log.fullinfo("Flagging pixels in the DQ extension "
                                     "corresponding to non linear pixels "
                                     "in %s[%s,%d] using non linear "
                                     "level = %.2f" % (ad.filename, SCI, extver,
                                                       non_linear_level))

                        non_linear_array = np.where(
                            (ext.data >= non_linear_level), 2, 0)
                    
                    dq_bit_arrays.append(non_linear_array)

                # Create an array that contains pixels that have a value of 4
                # when that pixel is saturated in the input science extension
                if saturation_level is not None:
                    saturation_array = None
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to saturated pixels in "
                                 "%s[%s,%d] using saturation level = %.2f" %
                                 (ad.filename, SCI, extver, saturation_level))
                    saturation_array = np.where(
                        ext.data >= saturation_level, 4, 0)
                    dq_bit_arrays.append(saturation_array)
                
                # BPMs have an EXTNAME equal to DQ
                bpmname = None
                if final_bpm is not None:
                    bpm_array = None
                    bpmname = os.path.basename(final_bpm.filename)
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to bad pixels in %s[%s,%d] "
                                 "using the BPM %s[%s,%d]" %
                                 (ad.filename, SCI, extver, bpmname, DQ, extver))
                    bpm_array = final_bpm[DQ, extver].data
                    dq_bit_arrays.append(bpm_array)
                
                # Create a single DQ extension from the three arrays (BPM,
                # non-linear and saturated)
                if not dq_bit_arrays:
                    # The BPM, non-linear and saturated arrays were not
                    # created. Create a single DQ array with all pixels set
                    # equal to 0 
                    log.fullinfo("The BPM, non-linear and saturated arrays "
                                 "were not created. Creating a single DQ "
                                 "array with all the pixels set equal to zero")
                    final_dq_array = np.zeros(ext.data.shape).astype(dq_dtype)

                else:
                    final_dq_array = self._bitwise_OR_list(dq_bit_arrays)
                    final_dq_array = final_dq_array.astype(dq_dtype)
                
                # Create a data quality AstroData object
                dq = AstroData(data=final_dq_array)
                dq.rename_ext(DQ, ver=extver)
                dq.filename = ad.filename
                
                # Call the _update_dq_header helper function to update the
                # header of the data quality extension with some useful
                # keywords
                dq = self._update_dq_header(sci=ext, dq=dq, bpmname=bpmname)
                
                # Append the DQ AstroData object to the input AstroData object
                log.fullinfo("Adding extension [%s,%d] to %s"
                             % (DQ, extver, ad.filename))
                ad.append(moredata=dq)
            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)
            
            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                              strip=True)
            
            # Append the output AstroData object to the list of output
            # AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction context
        rc.report_output(adoutput_list)
        
        yield rc
示例#21
0
def test_method_rename_ext_5():    # TypeError w/ only 'ver=' param
    ad = AstroData(TESTFILE2)
    with pytest.raises(TypeError):
        ad.rename_ext(ver=2)
示例#22
0
    def as_astrodata(self, extname=None, tile=False, block=None, return_ROI=True,
                    return_associated_bintables=True, return_non_associations=True,
                    update_catalog_method='wcs'):
        """

          Returns an AstroData object  containing by default the mosaiced 
          IMAGE extensions, the merged associated BINTABLEs and all other 
          non-associated extensions of any other type. WCS information in 
          the headers of the IMAGE extensions and any pixel coordinates in 
          BINTABLEs will be updated appropriately.

          :param extname: If None mosaic all IMAGE extensions. Otherwise 
              only the given extname. This becomes the ref_extname.

          :type extname: (string). Default is None

          :param tile: (boolean). If True, the mosaics returned are not 
              corrected for shifting and rotation.

          :param block: See description below in method 'mosaic_image_data'.

          :param return_ROI: (True). Returns the minimum frame size calculated
              from the location of the amplifiers in a given block. If False uses
              the blocksize value.

          :param return_associated_bintables: (True). If a bintable is associated
              to the ref_extname then is returned as a merged table in the 
              output AD.  If False, they are not returned in the output AD.

          :param return_non_associations (True). Specifies whether to return
              extensions that are not deemed to be associated with the ref_extname.

          :param update_catalog_method: ('wcs').  Specifies if the X 
              and Y pixel coordinates of any source positions in the BINTABLEs
              are to be recalculated using the output WCS and the sources R.A.
              and Dec. values within the table. If set to 'transform' the updated X 
              and Y pixel coordinates will be determined using the transformations
              used to mosaic the pixel data. In the case of tiling, a shift is 
              technically being applied and therefore update_catalog_method='wcs'
              should be set internally (Not yet implemented).

          :type update_catalog_method: (string). Possible values are 
                                                 'wcs' or 'transform'.
                     
        """
        # If extname is None create mosaics of all image data in ad, merge 
        # the bintables if they are associated with the image extensions 
        # and append to adout all non_associatiated extensions. Appending
        # these extensions to the output AD is controlled by 
        # return_associated_bintables and return_non_associations.

        # Make blank ('') same as None; i.e. handle all extensions.
        if extname == '': extname = None
        if (extname != None) and (extname not in self.extnames):
            raise ValueError("as_astrodata: Extname '"+extname+\
                        "' not found in AD object.")

        adin = self.ad      # alias
        
        # Load input data if data_list attribute is not defined. 
        #if not hasattr(self, "data_list"):
        #    self.data_list = self.get_data_list(extname)

        adout = AstroData()               # Prepare output AD
        adout.phu = adin.phu.copy()       # Use input AD phu as output phu

        adout.phu.header.update('TILED', ['FALSE', 'TRUE'][tile],
                 'False: Image Mosaicked, True: tiled')

        # Set up extname lists with all the extension names that are going to 
        # be mosaiced and table extension names to associate.
        #
        if extname is None:                     # Let's work through all extensions
            if self.associated_im_extns:
                extname_list = self.associated_im_extns
            else:
                extname_list = self.im_extnames
        else:
            self.ref_extname = extname          # Redefine reference extname
            if extname in self.associated_im_extns:
                self.associated_im_extns = [extname]    # We need this extname only
                extname_list = [extname]
            elif extname in self.non_associated_extns: 
                # Extname is not in associated lists; so clear these lists.
                extname_list = []                       
                self.associated_im_extns = []
                self.associated_tab_extns = []
            elif extname in self.associated_tab_extns:
                # Extname is an associated bintable.
                extname_list = []                       
                self.associated_im_extns = []
                self.associated_tab_extns = [extname]
            else:
                extname_list = [extname]

        # ------ Create mosaic ndarrays, update the output WCS, create an 
        #        AstroData object and append to the output list. 
        
        # Make the list to have the order 'sci','var','dq'
        svdq = [k for k in ['SCI','VAR','DQ'] if k in extname_list]
        # add the rest of the extension names.
        extname_list = svdq + list(set(extname_list)-set(svdq))

        for extn in  extname_list:
            # Mosaic the IMAGE extensions now
            mosarray = self.mosaic_image_data(extn,tile=tile,block=block,
                                          return_ROI=return_ROI)
            # Create the mosaic FITS header using the reference 
            # extension header.
            header = self.mosaic_header(mosarray.shape,block,tile)

            # Generate WCS object to be used in the merging the object
            # catalog table for updating the objects pixel coordinates
            # w/r to the new crpix1,2.
            ref_wcs = pywcs.WCS(header)

            # Setup output AD 
            new_ext = AstroData(data=mosarray,header=header)

            # Reset extver to 1.
            new_ext.rename_ext(name=extn,ver=1)
            adout.append(new_ext)

        if return_associated_bintables:
            # If we have associated bintables with image extensions, then
            # merge the tables.
            for tab_extn in self.associated_tab_extns:
                # adout will get the merge table
                new_tab = self.merge_table_data(ref_wcs, tile, tab_extn, block, 
                            update_catalog_method) 
                adout.append(new_tab[0])
        
        # If we have a list of extension names that have not tables extension
        # names associated, then mosaic them.
        #
        if return_non_associations:
            for extn in self.non_associated_extns:
                # Now get the list of extver to append
                if extn in self.im_extnames:   #  Image extensions
                    # We need to mosaic image extensions having more
                    # than one extver.
                    #
                    if adin.count_exts(extn) > 1:
                        mosarray = self.mosaic_image_data(extn,
                                    tile=tile,block=block,
                                    return_ROI=return_ROI)

                        # Get reference extension header
                        header = self.mosaic_header(mosarray.shape,block,tile)
                        new_ext = AstroData(data=mosarray,header=header)

                        # Reset extver to 1.
                        new_ext.rename_ext(name=extn,ver=1)
                        adout.append(new_ext)
                    else:
                        self.log.warning("as_astrodata: extension '"+extn+\
                                         "' has 1 extension.")
                        adout.append(adin[extn])

                if extn in self.tab_extnames:   # We have a list of extvers
                    for extv in self.tab_extnames[extn]:
                        adout.append(adin[extn,extv]) 
        # rediscover classifications.
        adout.refresh_types()
        return adout