def _update_dq_header(self, sci=None, dq=None, bpmname=None): # Add the physical units keyword gt.update_key(adinput=dq, keyword="BUNIT", value="bit", comment=None, extname=DQ) # Add the name of the bad pixel mask if bpmname is not None: gt.update_key(adinput=dq, keyword="BPMNAME", value=bpmname, comment=None, extname=DQ) # These should probably be done using descriptors (?) keywords_from_sci = [ "AMPNAME", "BIASSEC", "CCDNAME", "CCDSEC", "CCDSIZE", "CCDSUM", "CD1_1", "CD1_2", "CD2_1", "CD2_2", "CRPIX1", "CRPIX2", "CRVAL1", "CRVAL2", "CTYPE1", "CTYPE2", "DATASEC", "DETSEC", "EXPTIME", "GAIN", "GAINSET", "NONLINEA", "RDNOISE", "SATLEVEL", "LOWROW", "LOWCOL", "HIROW", "HICOL"] dq_comment = "Copied from ['%s',%d]" % (SCI, sci.extver()) for keyword in keywords_from_sci: # Check if the keyword exists in the header of the input science # extension keyword_value = sci.get_key_value(key=keyword) if keyword_value is not None: gt.update_key(adinput=dq, keyword=keyword, value=keyword_value, comment=dq_comment, extname=DQ) return dq
def standardizeGeminiHeaders(self, rc): """ This primitive is used to make the changes and additions to the keywords in the headers of Gemini data. """ # Instantiate the log log = logutils.get_logger(__name__) # Log the standard "starting primitive" debug message log.debug(gt.log_message("primitive", "standardizeGeminiHeaders", "starting")) # Define the keyword to be used for the time stamp for this primitive timestamp_key = self.timestamp_keys["standardizeGeminiHeaders"] # Initialize the list of output AstroData objects adoutput_list = [] # Loop over each input AstroData object in the input list for ad in rc.get_inputs_as_astrodata(): # Check whether the standardizeGeminiHeaders primitive has been run # previously if ad.phu_get_key_value(timestamp_key): log.warning("No changes will be made to %s, since it has " "already been processed by " "standardizeGeminiHeaders" % ad.filename) # Append the input AstroData object to the list of output # AstroData objects without further processing adoutput_list.append(ad) continue # Standardize the headers of the input AstroData object. Update the # keywords in the headers that are common to all Gemini data. log.status("Updating keywords that are common to all Gemini data") # Original name ad.store_original_name() # Number of science extensions gt.update_key(adinput=ad, keyword="NSCIEXT", value=ad.count_exts("SCI"), comment=None, extname="PHU") # Number of extensions gt.update_key(adinput=ad, keyword="NEXTEND", value=len(ad), comment=None, extname="PHU") # Physical units (assuming raw data has units of ADU) gt.update_key(adinput=ad, keyword="BUNIT", value="adu", comment=None, extname="SCI") # Add the appropriate time stamps to the PHU gt.mark_history(adinput=ad, keyword=timestamp_key) # Change the filename ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], strip=True) # Append the output AstroData object to the list of output # AstroData objects adoutput_list.append(ad) # Report the list of output AstroData objects to the reduction context rc.report_output(adoutput_list) yield rc
def standardizeGeminiHeaders(self, rc): """ This primitive is used to make the changes and additions to the keywords in the headers of Gemini data. """ # Instantiate the log log = logutils.get_logger(__name__) # Log the standard "starting primitive" debug message log.debug( gt.log_message("primitive", "standardizeGeminiHeaders", "starting")) # Define the keyword to be used for the time stamp for this primitive timestamp_key = self.timestamp_keys["standardizeGeminiHeaders"] # Initialize the list of output AstroData objects adoutput_list = [] # Loop over each input AstroData object in the input list for ad in rc.get_inputs_as_astrodata(): # Check whether the standardizeGeminiHeaders primitive has been run # previously if ad.phu_get_key_value(timestamp_key): log.warning("No changes will be made to %s, since it has " "already been processed by " "standardizeGeminiHeaders" % ad.filename) # Append the input AstroData object to the list of output # AstroData objects without further processing adoutput_list.append(ad) continue # Standardize the headers of the input AstroData object. Update the # keywords in the headers that are common to all Gemini data. log.status("Updating keywords that are common to all Gemini data") # Original name ad.store_original_name() # Number of science extensions gt.update_key(adinput=ad, keyword="NSCIEXT", value=ad.count_exts("SCI"), comment=None, extname="PHU", keyword_comments=self.keyword_comments) # Number of extensions gt.update_key(adinput=ad, keyword="NEXTEND", value=len(ad), comment=None, extname="PHU", keyword_comments=self.keyword_comments) # Physical units (assuming raw data has units of ADU) gt.update_key(adinput=ad, keyword="BUNIT", value="adu", comment=None, extname="SCI", keyword_comments=self.keyword_comments) # Add the appropriate time stamps to the PHU gt.mark_history(adinput=ad, primname=self.myself(), keyword=timestamp_key) # Change the filename ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], strip=True) # Append the output AstroData object to the list of output # AstroData objects adoutput_list.append(ad) # Report the list of output AstroData objects to the reduction context rc.report_output(adoutput_list) yield rc
def ADUToElectrons(self, rc): """ This primitive will convert the units of the pixel data extensions of the input AstroData object from ADU to electrons by multiplying by the gain. """ # Instantiate the log log = logutils.get_logger(__name__) # Log the standard "starting primitive" debug message log.debug(gt.log_message("primitive", "ADUToElectrons", "starting")) # Define the keyword to be used for the time stamp for this primitive timestamp_key = self.timestamp_keys["ADUToElectrons"] # Initialize the list of output AstroData objects adoutput_list = [] # Loop over each input AstroData object in the input list for ad in rc.get_inputs_as_astrodata(): # Check whether the ADUToElectrons primitive has been run # previously if ad.phu_get_key_value(timestamp_key): log.warning("No changes will be made to %s, since it has " "already been processed by ADUToElectrons" % ad.filename) # Append the input AstroData object to the list of output # AstroData objects without further processing adoutput_list.append(ad) continue # Convert the pixel data in the AstroData object from ADU to # electrons. First, get the gain value using the appropriate # descriptor gain = ad.gain() # Now multiply the pixel data in the science extension by the gain # and the pixel data in the variance extension by the gain squared log.status("Converting %s from ADU to electrons by multiplying by " "the gain" % (ad.filename)) for ext in ad[SCI]: extver = ext.extver() log.stdinfo(" gain for [%s,%d] = %s" % (SCI, extver, gain.get_value(extver=extver))) ad = ad.mult(gain) # Update the headers of the AstroData Object. The pixel data now # has units of electrons so update the physical units keyword. gt.update_key(adinput=ad, keyword="BUNIT", value="electron", comment=None, extname=SCI) if ad[VAR]: gt.update_key(adinput=ad, keyword="BUNIT", value="electron*electron", comment=None, extname=VAR) # Add the appropriate time stamps to the PHU gt.mark_history(adinput=ad, keyword=timestamp_key) # Change the filename ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], strip=True) # Append the output AstroData object to the list of output # AstroData objects adoutput_list.append(ad) # Report the list of output AstroData objects to the reduction context rc.report_output(adoutput_list) yield rc
def stackFrames(self, rc): """ This primitive will stack each science extension in the input dataset. New variance extensions are created from the stacked science extensions and the data quality extensions are propagated through to the final file. :param operation: type of combining operation to use. :type operation: string, options: 'average', 'median'. :param reject_method: type of rejection algorithm :type reject_method: string, options: 'avsigclip', 'minmax', None :param mask: Use the data quality extension to mask bad pixels? :type mask: bool :param nlow: number of low pixels to reject (used with reject_method=minmax) :type nlow: int :param nhigh: number of high pixels to reject (used with reject_method=minmax) :type nhigh: int """ # Instantiate the log log = logutils.get_logger(__name__) # Log the standard "starting primitive" debug message log.debug(gt.log_message("primitive", "stackFrames", "starting")) # Define the keyword to be used for the time stamp for this primitive timestamp_key = self.timestamp_keys["stackFrames"] # Initialize the list of output AstroData objects ad_output_list = [] # Get the input AstroData objects ad_input_list = rc.get_inputs_as_astrodata() # Ensure that each input AstroData object has been prepared for ad in ad_input_list: if not "PREPARED" in ad.types: raise Errors.InputError("%s must be prepared" % ad.filename) if len(ad_input_list) <= 1: log.stdinfo("No stacking will be performed, since at least two " "input AstroData objects are required for stackFrames") # Set the list of input AstroData objects to the list of output # AstroData objects without further processing ad_output_list = ad_input_list else: # Get the gain and read noise from the first AstroData object in # the input list using the appropriate descriptors # Determine the average gain from the input AstroData objects and # add in quadrature the read noise gain_dict = {} read_noise_dict = {} gain_dvs = [ad.gain() for ad in ad_input_list] read_noise_dvs = [ad.read_noise() for ad in ad_input_list] # Check for Nones: if True in [gain_dv.is_none() for gain_dv in gain_dvs]: raise Errors.InputError("One or more gain DVs are None") if True in [read_noise_dv.is_none() for read_noise_dv in read_noise_dvs]: raise Errors.InputError("One or more read noise DVs are None") # Sum the values for extver in gain_dvs[0].ext_vers(): for gain_dv in gain_dvs: if extver not in gain_dict: gain_dict.update({extver: 0}) gain_dict[extver] += gain_dv.get_value(extver) for read_noise_dv in read_noise_dvs: if extver not in read_noise_dict: read_noise_dict.update({extver: 0}) read_noise_dict[extver] += read_noise_dv.get_value( extver)**2 for key in gain_dict.keys(): gain_dict[key] /= len(ad_input_list) read_noise_dict[key] = math.sqrt(read_noise_dict[key]) # Instantiate ETI and then run the task gemcombine_task = eti.gemcombineeti.GemcombineETI(rc) ad_output = gemcombine_task.run() # Revert the BUNIT for the variance extension (gemcombine sets it # to the same value as the science extension) bunit = ad_output[SCI,1].get_key_value("BUNIT") if ad_output[VAR]: for ext in ad_output[VAR]: if bunit is not None: gt.update_key(adinput=ext, keyword="BUNIT", value="%s*%s" % (bunit, bunit), comment=None, extname=VAR) # Revert the dtype and BUNIT for the data quality extension # (gemcombine sets them to int32 and the same value as the science # extension, respectively) if ad_output[DQ]: for ext in ad_output[DQ]: ext.data = ext.data.astype(np.int16) if bunit is not None: gt.update_key(adinput=ext, keyword="BUNIT", value="bit", comment=None, extname=DQ) # Gemcombine sets the GAIN keyword to the sum of the gains; # reset it to the average instead. Set the RDNOISE to the # sum in quadrature of the input read noise. Set the keywords in # the variance and data quality extensions to be the same as the # science extensions. for ext in ad_output: extver = ext.extver() gain = gain_dict[extver] read_noise = read_noise_dict[extver] gt.update_key(adinput=ext, keyword="GAIN", value=gain, comment=None, extname="pixel_exts") gt.update_key(adinput=ext, keyword="RDNOISE", value=read_noise, comment=None, extname="pixel_exts") gain = gain_dict[1] read_noise = read_noise_dict[1] gt.update_key(adinput=ad_output, keyword="GAIN", value=gain, comment=None, extname="PHU") gt.update_key(adinput=ad_output, keyword="RDNOISE", value=read_noise, comment=None, extname="PHU") suffix = rc["suffix"] # The ORIGNAME keyword should not be updated in this way, since it # defeats the point of having the ORIGNAME keyword. # Add suffix to the ORIGNAME to prevent future stripping #ad_output.phu_set_key_value("ORIGNAME", # gt.filename_updater(adinput=adinput[0], # suffix=suffix,strip=True), # comment=self.keyword_comments["ORIGNAME"]) # Add suffix to the datalabel to distinguish from the reference # frame orig_datalab = ad_output.phu_get_key_value("DATALAB") new_datalab = "%s%s" % (orig_datalab, suffix) gt.update_key(adinput=ad_output, keyword="DATALAB", value=new_datalab, comment=None, extname="PHU") # Add the appropriate time stamps to the PHU gt.mark_history(adinput=ad_output, keyword=timestamp_key) # Change the filename ad_output.filename = gt.filename_updater(adinput=ad_output, suffix=suffix, strip=True) # Append the output AstroData object to the list of output # AstroData objects ad_output_list.append(ad_output) # Report the list of output AstroData objects to the reduction context rc.report_output(ad_output_list) yield rc