예제 #1
0
    def standardizeObservatoryHeaders(self, adinputs=None, **params):
        """
        This primitive is used to make the changes and additions to the
        keywords in the headers of Gemini data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by standardize"
                            "ObservatoryHeaders".format(ad.filename))
                continue

            # Update various header keywords
            log.status("Updating keywords that are common to all Gemini data")
            ad.phu.set('NSCIEXT', len(ad), self.keyword_comments['NSCIEXT'])
            ad.hdr.set('BUNIT', 'adu', self.keyword_comments['BUNIT'])

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
예제 #2
0
    def _markAsCalibration(self,
                           adinputs=None,
                           suffix=None,
                           update_datalab=True,
                           primname=None,
                           keyword=None):
        """
        Updates filenames, datalabels (if asked) and adds header keyword
        prior to storing AD objects as calibrations
        """
        for ad in adinputs:
            # if user mode: not uploading and sq, don't add mode.
            if self.mode == 'sq' and (not self.upload
                                      or 'calibs' not in self.upload):
                proc_suffix = f""
            else:
                proc_suffix = f"_{self.mode}"

            if suffix:
                proc_suffix += suffix
            ad.update_filename(suffix=proc_suffix, strip=True)
            if update_datalab:
                _update_datalab(ad, suffix, self.keyword_comments)
            gt.mark_history(adinput=ad, primname=primname, keyword=keyword)
            ad.phu.set('PROCMODE', self.mode)
        return adinputs
예제 #3
0
    def standardizeObservatoryHeaders(self, adinputs=None, **params):
        """
        This primitive is used to make the changes and additions to the
        keywords in the headers of Gemini data.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by standardize"
                            "ObservatoryHeaders".format(ad.filename))
                continue

            # Update various header keywords
            log.status("Updating keywords that are common to all Gemini data")
            ad.phu.set('NSCIEXT', len(ad), self.keyword_comments['NSCIEXT'])
            ad.hdr.set('BUNIT', 'adu', self.keyword_comments['BUNIT'])
            for ext in ad:
                if 'RADECSYS' in ext.hdr:
                    ext.hdr['RADESYS'] = (ext.hdr['RADECSYS'],
                                          ext.hdr.comments['RADECSYS'])
                    del ext.hdr['RADECSYS']

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
예제 #4
0
    def trimOverscan(self, adinputs=None, suffix=None):
        """
        The trimOverscan primitive trims the overscan region from the input
        AstroData object and updates the headers.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key) is not None:
                log.warning('No changes will be made to {}, since it has '
                            'already been processed by trimOverscan'.
                            format(ad.filename))
                continue

            ad = gt.trim_to_data_section(ad,
                                    keyword_comments=self.keyword_comments)

            # Set keyword, timestamp, and update filename
            ad.phu.set('TRIMMED', 'yes', self.keyword_comments['TRIMMED'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #5
0
    def standardizeWCS(self,
                       adinputs=None,
                       suffix=None,
                       reference_extension=None):
        """
        This primitive updates the WCS attribute of each NDAstroData extension
        in the input AstroData objects. For spectroscopic data, it means
        replacing an imaging WCS with an approximate spectroscopic WCS.
        For multi-extension ADs, it means prepending a tiling and/or mosaic
        transform before the pixel->world transform, and giving all extensions
        copies of the reference extension's pixel->world transform.

        Parameters
        ----------
        suffix: str/None
            suffix to be added to output files
        reference_extension: int/None
            reference extension whose WCS is inherited by others
        """
        log = self.log
        timestamp_key = self.timestamp_keys[self.myself()]
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        for ad in adinputs:
            # TODO: work towards making this "if 'SPECT' in ad.tags"
            # which is why it's here and not in primitives_gmos_spect
            log.stdinfo(f"Adding spectroscopic WCS to {ad.filename}")
            if {'GMOS', 'SPECT', 'LS'}.issubset(ad.tags):
                add_longslit_wcs(ad)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #6
0
    def trimOverscan(self, adinputs=None, suffix=None):
        """
        The trimOverscan primitive trims the overscan region from the input
        AstroData object and updates the headers.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key) is not None:
                log.warning('No changes will be made to {}, since it has '
                            'already been processed by trimOverscan'.format(
                                ad.filename))
                continue

            ad = gt.trim_to_data_section(
                ad, keyword_comments=self.keyword_comments)

            # Set keyword, timestamp, and update filename
            ad.phu.set('TRIMMED', 'yes', self.keyword_comments['TRIMMED'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
 def storeProcessedFlat(self, rc):
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "storeProcessedFlat",
                              "starting"))
     
     # Loop over each input AstroData object in the input list
     for ad in rc.get_inputs_as_astrodata():
         
         # Updating the file name with the suffix for this primitive and
         # then report the new file to the reduction context
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Adding a PROCFLAT time stamp to the PHU
         gt.mark_history(adinput=ad, keyword="PROCFLAT")
         
         # Refresh the AD types to reflect new processed status
         ad.refresh_types()
     
     # Upload to cal system
     rc.run("storeCalibration")
     
     yield rc
예제 #8
0
    def storeProcessedScience(self, adinputs=None, suffix=None):
        if self.mode not in ['sq', 'ql', 'qa']:
            self.log.warning('Mode %s not recognized in storeScience, not saving anything' % self.mode)
            return adinputs

        for ad in adinputs:
            gt.mark_history(adinput=ad, primname=self.myself(), keyword="PROCSCI")
            ad.update_filename(suffix=suffix, strip=True)
            ad.phu.set('PROCMODE', self.mode)

            if self.mode != 'qa' and self.upload and 'science' in self.upload:
                old_filename = ad.filename
                ad.update_filename(suffix=f"_{self.mode}"+suffix, strip=True)
                ad.write(overwrite=True)
                try:
                    upload_calibration(ad.filename, is_science=True)
                except:
                    self.log.warning("Unable to upload file to science system")
                else:
                    msg = "File {} uploaded to fitsstore."
                    self.log.stdinfo(msg.format(os.path.basename(ad.filename)))
                # Rename file on disk to avoid writing twice
                os.replace(ad.filename, old_filename)
                ad.filename = old_filename
            else:
                ad.write(overwrite=True)

        return adinputs
예제 #9
0
    def prepare(self, adinputs=None, **params):
        """
        Validate and standardize the datasets to ensure compatibility
        with the subsequent primitives.  The outputs, if written to
        disk will be given the suffix "_prepared".

        Currently, there are no input parameters associated with
        this primitive.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", "prepare", "starting"))
        timestamp_key = self.timestamp_keys["prepare"]
        sfx = params["suffix"]
        for primitive in ('validateData', 'standardizeStructure',
                          'standardizeHeaders'):
            passed_params = self._inherit_params(params, primitive)
            adinputs = getattr(self, primitive)(adinputs, **passed_params)

        for ad in adinputs:
            gt.mark_history(ad, self.myself(), timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)
        return adinputs
예제 #10
0
    def standardizeInstrumentHeaders(self, adinputs=None, suffix=None):
        """
        This primitive is used to make the changes and additions to the
        keywords in the headers of NIRI data, specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        # Instantiate the log
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by "
                            "standardizeInstrumentHeaders".format(ad.filename))
                continue

            # Standardize the headers of the input AstroData object. Update the
            # keywords in the headers that are specific to FLAMINGOS-2.
            log.status("Updating keywords that are specific to NIRI")

            # Filter name (required for IRAF?)
            ad.phu.set('FILTER', ad.filter_name(stripID=True, pretty=True),
                       self.keyword_comments['FILTER'])

            # Pixel scale (CJS: I'm putting this in the extension too!)
            pixel_scale = ad.pixel_scale()
            ad.phu.set('PIXSCALE', pixel_scale, self.keyword_comments['PIXSCALE'])
            ad.hdr.set('PIXSCALE', pixel_scale, self.keyword_comments['PIXSCALE'])

            for desc in ('read_noise', 'gain', 'non_linear_level',
                         'saturation_level'):
                kw = ad._keyword_for(desc)
                ad.hdr.set(kw, getattr(ad, desc)()[0], self.keyword_comments[kw])
                try:
                    ad.phu.remove(kw)
                except (KeyError, AttributeError):
                    pass

            # The exposure time keyword in raw data the exptime of each coadd
            # but the data have been summed, not averaged, so it needs to be
            # reset to COADDS*EXPTIME. The descriptor always returns that value,
            # regardless of whether the data are prepared or unprepared.
            kw = ad._keyword_for('exposure_time')
            ad.phu.set(kw, ad.exposure_time(), self.keyword_comments[kw])

            if 'SPECT' in ad.tags:
                kw = ad._keyword_for('dispersion_axis')
                self.hdr.set(kw, 1, self.keyword_comments(kw))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #11
0
    def standardizeInstrumentHeaders(self, adinputs=None, suffix=None):
        """
        This primitive is used to make the changes and additions to the
        keywords in the headers of NIRI data, specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        # Instantiate the log
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by "
                            "standardizeInstrumentHeaders".format(ad.filename))
                continue

            # Standardize the headers of the input AstroData object. Update the
            # keywords in the headers that are specific to FLAMINGOS-2.
            log.status("Updating keywords that are specific to NIRI")

            # Filter name (required for IRAF?)
            ad.phu.set('FILTER', ad.filter_name(stripID=True, pretty=True),
                       self.keyword_comments['FILTER'])

            # Pixel scale (CJS: I'm putting this in the extension too!)
            pixel_scale = ad.pixel_scale()
            ad.phu.set('PIXSCALE', pixel_scale, self.keyword_comments['PIXSCALE'])
            ad.hdr.set('PIXSCALE', pixel_scale, self.keyword_comments['PIXSCALE'])

            for desc in ('read_noise', 'gain', 'non_linear_level',
                         'saturation_level'):
                kw = ad._keyword_for(desc)
                ad.hdr.set(kw, getattr(ad, desc)()[0], self.keyword_comments[kw])
                try:
                    ad.phu.remove(kw)
                except (KeyError, AttributeError):
                    pass

            # The exposure time keyword in raw data the exptime of each coadd
            # but the data have been summed, not averaged, so it needs to be
            # reset to COADDS*EXPTIME. The descriptor always returns that value,
            # regardless of whether the data are prepared or unprepared.
            kw = ad._keyword_for('exposure_time')
            ad.phu.set(kw, ad.exposure_time(), self.keyword_comments[kw])

            if 'SPECT' in ad.tags:
                kw = ad._keyword_for('dispersion_axis')
                self.hdr.set(kw, 1, self.keyword_comments(kw))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
    def normalizeFlat(self, rc):
        """
        This primitive normalizes each science extension of the input
        AstroData object by its mean
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "normalizeFlat", "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["normalizeFlat"]

        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Loop over each input AstroData object in the input list
        for ad in rc.get_inputs_as_astrodata():
            
            # Check whether the normalizeFlat primitive has been run previously
            if ad.phu_get_key_value(timestamp_key):
                log.warning("No changes will be made to %s, since it has " \
                            "already been processed by normalizeFlat" \
                            % (ad.filename))
                # Append the input AstroData object to the list of output
                # AstroData objects without further processing
                adoutput_list.append(ad)
                continue
            
            # Loop over each science extension in each input AstroData object
            for ext in ad[SCI]:
                
                # Normalise the input AstroData object. Calculate the mean
                # value of the science extension
                mean = np.mean(ext.data, dtype=np.float64)
                # Divide the science extension by the mean value of the science
                # extension
                log.fullinfo("Normalizing %s[%s,%d] by dividing by the mean " \
                             "= %f" % (ad.filename, ext.extname(),
                                       ext.extver(), mean))
                ext = ext.div(mean)

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)

            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], 
                                              strip=True)

            # Append the output AstroData object to the list
            # of output AstroData objects
            adoutput_list.append(ad)
        
        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)
        
        yield rc
예제 #13
0
    def mosaicDetectors(self, adinputs=None, **params):
        """
        This primitive does a full mosaic of all the arrays in an AD object.
        An appropriate geometry_conf.py module containing geometric information
        is required.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files.
        sci_only: bool
            mosaic only SCI image data. Default is False
        order: int (1-5)
            order of spline interpolation
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        order = params['order']
        attributes = ['data'] if params['sci_only'] else None
        geotable = import_module('.geometry_conf', self.inst_lookups)

        adoutputs = []
        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by mosaicDetectors".
                            format(ad.filename))
                adoutputs.append(ad)
                continue

            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to mosaic".format(ad.filename))
                adoutputs.append(ad)
                continue

            # If there's an overscan section, we must trim it before mosaicking
            try:
                overscan_kw = ad._keyword_for('overscan_section')
            except AttributeError:  # doesn't exist for this AD, so carry on
                pass
            else:
                if overscan_kw in ad.hdr:
                    ad = gt.trim_to_data_section(ad, self.keyword_comments)

            adg = transform.create_mosaic_transform(ad, geotable)
            ad_out = adg.transform(attributes=attributes, order=order,
                                   process_objcat=False)

            ad_out.orig_filename = ad.filename
            gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)

        return adoutputs
    def skyCorrectNodAndShuffle(self, adinputs=None, suffix=None):
        """
        Perform sky correction on GMOS N&S images bytaking each image and
        subtracting from it a shifted version of the same image.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            # Check whether the myScienceStep primitive has been run previously
            if ad.phu.get(timestamp_key):
                log.warning(
                    "No changes will be made to {}, since it has "
                    "already been processed by skyCorrectNodShuffle".format(
                        ad.filename))
                continue

            # Determine N&S offset in (binned) pixels
            shuffle = ad.shuffle_pixels() // ad.detector_y_bin()
            a_nod_count, b_nod_count = ad.nod_count()

            ad_nodded = deepcopy(ad)

            # Shuffle B position data up for all extensions (SCI, DQ, VAR)
            for ext, ext_nodded in zip(ad, ad_nodded):
                #TODO: Add DQ=16 to top and bottom?
                # Set image initially to zero
                ext_nodded.multiply(0)
                # Then replace with the upward-shifted data
                for attr in ('data', 'mask', 'variance'):
                    getattr(ext_nodded,
                            attr)[shuffle:] = getattr(ext, attr)[:-shuffle]

            # Normalize if the A and B nod counts differ
            if a_nod_count != b_nod_count:
                log.stdinfo(
                    "{} A and B nod counts differ...normalizing".format(
                        ad.filename))
                ad.multiply(0.5 * (a_nod_count + b_nod_count) / a_nod_count)
                ad_nodded.multiply(0.5 * (a_nod_count + b_nod_count) /
                                   b_nod_count)

            # Subtract nodded image from image to complete the process
            ad.subtract(ad_nodded)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #15
0
    def subtract(self,rc):
        # This is a bare-bones primitive interface to the ad sub
        # function from the arith module.  The value, dictionary,
        # or AD instance to be subtracted from the input is stored in
        # rc["operand"]

        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "subtract", "starting"))

        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["subtract"]

        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Get data to be subtracted from the RC
        operand = rc["operand"]
        if operand is None:
            log.stdinfo("No operand to subtract; no changes will be "\
                            "made to input")
        elif type(operand)==AstroData:
            log.stdinfo("Subtracting %s from input" % 
                        (operand.filename))
        else:
            log.stdinfo("Subtracting %s from input" % 
                        (repr(operand)))
            
        # Loop over each input AstroData object in the input list
        for ad in rc.get_inputs_as_astrodata():

            if operand is not None:
                # Subtract operand from data
                ad.sub(operand)

                # Add the appropriate time stamps to the PHU
                gt.mark_history(adinput=ad, keyword=timestamp_key)

                # Change the filename
                ad.filename = gt.filename_updater(
                    adinput=ad, suffix=rc["suffix"], strip=True)

            # Append the output AstroData object to the list
            # of output AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)
        
        yield rc
예제 #16
0
    def scaleByIntensity(self, adinputs=None, **params):
        """
        This primitive scales input images to the mean value of the first
        image. It is intended to be used to scale flats to the same
        level before stacking.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        ref_mean = None
        for ad in adinputs:
            # If this input hasn't been tiled at all, tile it
            ad_for_stats = self.tileArrays([deepcopy(ad)], tile_all=False)[0] \
                if len(ad)>3 else ad

            # Use CCD2, or the entire mosaic if we can't find a second extn
            try:
                data = ad_for_stats[1].data
            except IndexError:
                data = ad_for_stats[0].data

            # Take off 5% of the width as a border
            xborder = max(int(0.05 * data.shape[1]), 20)
            yborder = max(int(0.05 * data.shape[0]), 20)
            log.fullinfo("Using data section [{}:{},{}:{}] from CCD2 for "
                         "statistics".format(xborder, data.shape[1] - xborder,
                                             yborder, data.shape[0] - yborder))

            stat_region = data[yborder:-yborder, xborder:-xborder]
            mean = np.mean(stat_region)

            # Set reference level to the first image's mean
            if ref_mean is None:
                ref_mean = mean
            scale = ref_mean / mean

            # Log and save the scale factor, and multiply by it
            log.fullinfo("Relative intensity for {}: {:.3f}".format(
                ad.filename, scale))
            ad.phu.set("RELINT",
                       scale,
                       comment=self.keyword_comments["RELINT"])
            ad.multiply(scale)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
예제 #17
0
    def stackFrames(self, adinputs=None, **params):
        """
        Combines all the extensions in a slit-viewer frame(s) into a single-
        extension AD instance.

        This primitive wraps the higher level
        :meth:`geminidr.core.primitives_stack.Stack.stackFrames` primitive,
        but rather than stacking separate files to form a combined file,
        it is used to stack the extensions within each slit viewer 'image'
        (collection of exposures).

        This primitive can accept the same parameter set as
        :meth:`geminidr.core.primitives_stack.Stack.stackFrames`.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Keep hold of the first SLITV input's filename so the stacked
        # output has a sensible name.
        first_filename = None

        # CJS: This could be rewritten to produce one output AD for each
        # input AD by combining the extensions from each input separately.
        # That behaviour would not need the addToList/getList primitives.
        # But this is a better match to how the original code behaved.
        adoutputs = []
        extinputs = []
        for ad in adinputs:
            # CJS: Worth doing this check, I feel
            if 'SLITV' not in ad.tags:
                log.warning(
                    "{} is not a slit-viewer image. Continuing.".format(
                        ad.filename))
                adoutputs.append(ad)
                continue

            if not first_filename:
                first_filename = ad.phu['ORIGNAME'] or ad.filename
            # DQ plane is still needed so call stackFrames for ease
            # CJS: This is ugly but should go with pythonic stacking
            for index, ext in enumerate(ad, start=1):
                adext = deepcopy(ext)
                filename = filename_updater(ad, suffix='{:04d}'.format(index))
                adext.filename = filename
                adext.phu['ORIGNAME'] = filename
                extinputs.append(adext)

        adout = super(GHOSTSlit, self).stackFrames(extinputs, **params)[0]
        if first_filename:
            adout.phu['ORIGNAME'] = first_filename
        gt.mark_history(adout, primname=self.myself(), keyword=timestamp_key)
        adoutputs.append(adout)
        return adoutputs
예제 #18
0
    def standardizeInstrumentHeaders(self, adinputs=None, **params):
        """
        This primitive is used to make the changes and additions to the
        keywords in the headers of F2 data, specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by "
                            "standardizeInstrumentHeaders".format(ad.filename))
                continue

            # Standardize the headers of the input AstroData object. Update the
            # keywords in the headers that are specific to FLAMINGOS-2.
            log.status("Updating keywords that are specific to FLAMINGOS-2")

            # Filter name (required for IRAF?)
            ad.phu.set('FILTER', ad.filter_name(stripID=True, pretty=True),
                       self.keyword_comments['FILTER'])

            # Pixel scale (CJS: I'm putting this in the extension too!)
            pixel_scale = ad.pixel_scale()
            ad.phu.set('PIXSCALE', pixel_scale,
                       self.keyword_comments['PIXSCALE'])
            ad.hdr.set('PIXSCALE', pixel_scale,
                       self.keyword_comments['PIXSCALE'])

            for desc in ('read_noise', 'gain', 'non_linear_level',
                         'saturation_level'):
                kw = ad._keyword_for(desc)
                ad.hdr.set(kw,
                           getattr(ad, desc)()[0], self.keyword_comments[kw])
                try:
                    ad.phu.remove(kw)
                except (KeyError, AttributeError):
                    pass

            if 'SPECT' in ad.tags:
                kw = ad._keyword_for('dispersion_axis')
                self.hdr.set(kw, 2, self.keyword_comments(kw))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
    def determineWavelengthSolution(self,rc):

        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Define the keyword to be used for the time stamp
        timestamp_key = self.timestamp_keys["determineWavelengthSolution"]

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "determineWavelengthSolution",
                                 "starting"))
                
        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Loop over each input AstroData object in the input list
        for ad in rc.get_inputs_as_astrodata():

            # Instantiate ETI and then run the task 
            # Run in a try/except because gswavelength sometimes fails
            # badly, and we want to be able to continue without
            # wavelength calibration in the QA case
            gswavelength_task = eti.gswavelengtheti.GswavelengthETI(rc,ad)
            try:
                adout = gswavelength_task.run()
            except Errors.OutputError:
                gswavelength_task.clean()
                if "qa" in rc.context:
                    log.warning("gswavelength failed for input " + ad.filename)
                    adoutput_list.append(ad)
                    continue
                else:
                    raise Errors.ScienceError("gswavelength failed for input "+
                                              ad.filename + ". Try interactive"+
                                              "=True")

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=adout, keyword=timestamp_key)

            # Change the filename
            adout.filename = gt.filename_updater(
                adinput=adout, suffix=rc["suffix"], strip=True)
            
            # Append the output AstroData object to the list
            # of output AstroData objects
            adoutput_list.append(adout)
        
        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)

        yield rc
    def skyCorrectFromSlit(self,rc):

        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Define the keyword to be used for the time stamp
        timestamp_key = self.timestamp_keys["skyCorrectFromSlit"]

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "skyCorrectFromSlit", "starting"))
                
        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Loop over each input AstroData object in the input list
        for ad in rc.get_inputs_as_astrodata():

            try:
                xbin = ad.detector_x_bin().as_pytype()
                ybin = ad.detector_y_bin().as_pytype()
                bin_factor = xbin*ybin
                roi = ad.detector_roi_setting().as_pytype()
            except:
                bin_factor = 1
                roi = "unknown"

            if bin_factor<=2 and roi=="Full Frame" and "qa" in rc.context:
                log.warning("Frame is too large to subtract sky efficiently; not "\
                            "subtracting sky for %s" % ad.filename)
                adoutput_list.append(ad)
                continue

            # Instantiate ETI and then run the task 
            gsskysub_task = eti.gsskysubeti.GsskysubETI(rc,ad)
            adout = gsskysub_task.run()

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=adout, keyword=timestamp_key)

            # Change the filename
            adout.filename = gt.filename_updater(
                adinput=adout, suffix=rc["suffix"], strip=True)
            
            # Append the output AstroData object to the list
            # of output AstroData objects
            adoutput_list.append(adout)
        
        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)
        
        yield rc
예제 #21
0
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        if illum_mask is None:
            illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            if illum is None:
                # So it can be zipped with the AD
                final_illum = [None] * len(ad)
            else:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

            for ext, illum_ext in zip(ad, final_illum):
                if illum_ext is not None:
                    # Ensure we're only adding the unilluminated bit
                    iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                    0).astype(DQ.datatype)
                    ext.mask = iext if ext.mask is None else ext.mask | iext

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
예제 #22
0
    def wcalResampleToLinearCoords(self,rc):

        """ Uses the Wavecal fit_image solution
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Define the keyword to be used for the time stamp
        timestamp_key = self.timestamp_keys["wcalResampleToLinearCoords"]

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "wcalResampleToLinearCoords", 
                                 "starting"))
                
        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Loop over each input AstroData object in the input list
        for ad in rc.get_inputs_as_astrodata():

            # Check for a wavelength solution
            if ad["WAVECAL"] is None:
                if "qa" in rc.context:
                    log.warning("No wavelength solution found for %s" %
                                ad.filename)

                    adout=ad   # Don't do anything 
                else:
                    raise Errors.InputError("No wavelength solution found "\
                                            "for %s" % ad.filename)
            else:
                # Wavelength solution found. 
                wc = Wavecal(ad)
                wc.read_wavecal_table()
                adout = wc.resample_image_asAstrodata()

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=adout, keyword=timestamp_key)

            # Change the filename
            adout.filename = gt.filename_updater(
                adinput=adout, suffix=rc["suffix"], strip=True)
            
            # Append the output AstroData object to the list
            # of output AstroData objects
            adoutput_list.append(adout)
        
        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)
        
        yield rc
예제 #23
0
 def validateData(self, rc):
     """
     This primitive is used to validate NIRI data, specifically.
     
     :param repair: Set to True to repair the data, if necessary. Note: this
                    feature does not work yet. 
     :type repair: Python boolean
     """
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "validateData", "starting"))
     
     # Define the keyword to be used for the time stamp for this primitive
     timestamp_key = self.timestamp_keys["validateData"]
     
     # Initialize the list of output AstroData objects
     adoutput_list = []
     
     # Loop over each input AstroData object in the input list
     for ad in rc.get_inputs_as_astrodata():
         
         # Check whether the validateData primitive has been run previously
         if ad.phu_get_key_value(timestamp_key):
             log.warning("No changes will be made to %s, since it has "
                         "already been processed by validateData"
                         % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Validate the input AstroData object.
         log.status("No validation required for %s" % ad.filename)
         
         # Add the appropriate time stamps to the PHU
         gt.mark_history(adinput=ad, keyword=timestamp_key)
         
         # Change the filename
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Append the output AstroData object to the list of output
         # AstroData objects
         adoutput_list.append(ad)
     
     # Report the list of output AstroData objects to the reduction context
     rc.report_output(adoutput_list)
     
     yield rc
예제 #24
0
 def _markAsCalibration(self, adinputs=None, suffix=None, update_datalab=True,
                       primname=None, keyword=None):
     """
     Updates filenames, datalabels (if asked) and adds header keyword
     prior to storing AD objects as calibrations
     """
     for ad in adinputs:
         if suffix:
             ad.update_filename(suffix=suffix, strip=True)
         if update_datalab:
             _update_datalab(ad, suffix, self.keyword_comments)
         gt.mark_history(adinput=ad, primname=primname, keyword=keyword)
     return adinputs
예제 #25
0
    def standardizeInstrumentHeaders(self, adinputs=None, **params):
        """
        This primitive is used to make the changes and additions to the
        keywords in the headers of F2 data, specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by "
                            "standardizeInstrumentHeaders".format(ad.filename))
                continue

            # Standardize the headers of the input AstroData object. Update the
            # keywords in the headers that are specific to FLAMINGOS-2.
            log.status("Updating keywords that are specific to FLAMINGOS-2")

            # Filter name (required for IRAF?)
            ad.phu.set('FILTER', ad.filter_name(stripID=True, pretty=True),
                       self.keyword_comments['FILTER'])

            # Pixel scale (CJS: I'm putting this in the extension too!)
            pixel_scale = ad.pixel_scale()
            ad.phu.set('PIXSCALE', pixel_scale, self.keyword_comments['PIXSCALE'])
            ad.hdr.set('PIXSCALE', pixel_scale, self.keyword_comments['PIXSCALE'])

            for desc in ('read_noise', 'gain', 'non_linear_level',
                         'saturation_level'):
                kw = ad._keyword_for(desc)
                ad.hdr.set(kw, getattr(ad, desc)()[0], self.keyword_comments[kw])
                try:
                    ad.phu.remove(kw)
                except (KeyError, AttributeError):
                    pass

            if 'SPECT' in ad.tags:
                kw = ad._keyword_for('dispersion_axis')
                self.hdr.set(kw, 2, self.keyword_comments(kw))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
예제 #26
0
    def validateData(self, adinputs=None, suffix=None):
        """
        This is the data validation primitive. It checks that the instrument
        matches the primitivesClass and that there are the correct number
        of extensions.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        timestamp_key = self.timestamp_keys[self.myself()]
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by validateData".format(
                                ad.filename))
                continue

            # Check that the input is appropriate for this primitivesClass
            # Only the instrument is checked
            inst_name = ad.instrument(generic=True)
            if not inst_name in self.tagset:
                prim_class_name = self.__class__.__name__
                raise OSError("Input file {} is {} data and not suitable for "
                              "{} class".format(ad.filename, inst_name,
                                                prim_class_name))

            # Report if this is an image without square binned pixels
            if 'IMAGE' in ad.tags:
                xbin = ad.detector_x_bin()
                ybin = ad.detector_y_bin()
                if xbin != ybin:
                    log.warning("Image {} is {} x {} binned data".format(
                        ad.filename, xbin, ybin))

            if self._has_valid_extensions(ad):
                log.fullinfo("The input file has been validated: {} contains "
                             "{} extension(s)".format(ad.filename, len(ad)))
            else:
                raise OSError("The {} extension(s) in {} does not match the "
                              "number of extensions expected in raw {} "
                              "data.".format(len(ad), ad.filename, inst_name))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #27
0
    def makeIRAFCompatible(self, adinputs=None):
        """
        Add keywords to make the pipeline-processed file compatible
        with the tasks in the Gemini IRAF package.
        """
        log = self.log
        log.debug(gt.log_message('primitive', self.myself(), 'starting'))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            irafcompat.pipeline2iraf(ad)
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)

        return adinputs
예제 #28
0
    def makeIRAFCompatible(self, adinputs=None):
        """
        Add keywords to make the pipeline-processed file compatible
        with the tasks in the Gemini IRAF package.
        """
        log = self.log
        log.debug(gt.log_message('primitive', self.myself(), 'starting'))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            irafcompat.pipeline2iraf(ad)
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)

        return adinputs
예제 #29
0
    def validateData(self, adinputs=None, suffix=None):
        """
        This is the data validation primitive. It checks that the instrument
        matches the primitivesClass and that there are the correct number
        of extensions.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        timestamp_key = self.timestamp_keys[self.myself()]
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by validateData".
                            format(ad.filename))
                continue

            # Check that the input is appropriate for this primitivesClass
            # Only the instrument is checked
            inst_name = ad.instrument(generic=True)
            if not inst_name in self.tagset:
                prim_class_name = self.__class__.__name__
                raise IOError("Input file {} is {} data and not suitable for "
                    "{} class".format(ad.filename, inst_name, prim_class_name))

            # Report if this is an image without square binned pixels
            if 'IMAGE' in ad.tags:
                xbin = ad.detector_x_bin()
                ybin = ad.detector_y_bin()
                if xbin != ybin:
                    log.warning("Image {} is {} x {} binned data".
                                format(ad.filename, xbin, ybin))

            if self._has_valid_extensions(ad):
                log.fullinfo("The input file has been validated: {} contains "
                             "{} extension(s)".format(ad.filename, len(ad)))
            else:
                raise IOError("The {} extension(s) in {} does not match the "
                              "number of extensions expected in raw {} "
                              "data.".format(len(ad), ad.filename, inst_name))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #30
0
 def _markAsCalibration(self, adinputs=None, suffix=None, update_datalab=True,
                        primname=None, keyword=None):
     """
     Updates filenames, datalabels (if asked) and adds header keyword
     prior to storing AD objects as calibrations
     """
     proc_suffix = f"_{self.mode}"
     for ad in adinputs:
         if suffix:
             proc_suffix += suffix
         ad.update_filename(suffix=proc_suffix, strip=True)
         if update_datalab:
             _update_datalab(ad, suffix, self.keyword_comments)
         gt.mark_history(adinput=ad, primname=primname, keyword=keyword)
         ad.phu.set('PROCMODE', self.mode)
     return adinputs
예제 #31
0
    def addVAR(self, adinputs=None, **params):
        """
        This primitive adds noise components to the VAR plane of each extension
        of each input AstroData object (creating the VAR plane if necessary).
        The calculations for these components are abstracted out to separate
        methods that operate on an individual AD object in-place.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        read_noise: bool (optional, default: False)
            add the read noise component?
        poisson_noise: bool (optional, default: False)
            add the Poisson noise component?
        """
        log = self.log
        log.debug(gt.log_message("primitive", "addVAR", "starting"))
        timestamp_key = self.timestamp_keys["addVAR"]
        read_noise = params['read_noise']
        poisson_noise = params['poisson_noise']
        suffix = params['suffix']

        if read_noise:
            if poisson_noise:
                log.stdinfo('Adding the read noise component and the Poisson '
                            'noise component of the variance')
            else:
                log.stdinfo('Adding the read noise component of the variance')
        else:
            if poisson_noise:
                log.stdinfo(
                    'Adding the Poisson noise component of the variance')
            else:
                log.warning(
                    'Cannot add a variance extension since no variance '
                    'component has been selected')
                return adinputs

        for ad in adinputs:
            if read_noise:
                self._addReadNoise(ad)
            if poisson_noise:
                self._addPoissonNoise(ad)
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
예제 #32
0
    def myNewPrimitive(self, adinputs=None, **params):
        """
        Description...

        Parameters
        ----------
        suffix: str
            suffix to be added to output files

        param2: blah
            blah, blah

        Returns
        -------

        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Get params out
        param2 = params['param2']

        # Initialize the list of output AstroData objects
        # It is also possible to modify adinputs in place.
        adoutputs = []

        for ad in adinputs:

            # Do whatever checks on the input are necessary, for example:
            # Check whether this primitive as been run already.
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has"
                            "already been processed by myNewPrimitive".format(
                                ad.filename))
                continue

            # -----------------------
            # DR algorithm goes here
            # -----------------------

            # Timestamp
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)

            adoutputs.append(ad_out)

        return adoutputs
예제 #33
0
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        if illum_mask is None:
            illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]

        for ad, illum in zip(*gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.
                            format(ad.filename))
                continue

            if illum is None:
                # So it can be zipped with the AD
                final_illum = [None] * len(ad)
            else:
                log.fullinfo("Using {} as illumination mask".format(illum.filename))
                final_illum = gt.clip_auxiliary_data(ad, aux=illum, aux_type='bpm',
                                          return_dtype=DQ.datatype)

            for ext, illum_ext in zip(ad, final_illum):
                if illum_ext is not None:
                    # Ensure we're only adding the unilluminated bit
                    iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                    0).astype(DQ.datatype)
                    ext.mask = iext if ext.mask is None else ext.mask | iext

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
예제 #34
0
    def standardizeStructure(self, adinputs=None, **params):
        """
        This primitive is used to standardize the structure of F2 data,
        specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        attach_mdf: bool
            attach an MDF to the AD objects? (ignored if not tagged as SPECT)
        mdf: str
            full path of the MDF to attach
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        adoutputs = []
        for ad, mdf in zip(*gt.make_lists(adinputs, params['mdf'])):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by standardizeStructure".
                            format(ad.filename))
                adoutputs.append(ad)
                continue

            # Attach an MDF to each input AstroData object
            if params["attach_mdf"] and 'SPECT' in ad.tags:
                ad = self.addMDF([ad], mdf=mdf)[0]

            # Raw FLAMINGOS-2 pixel data have three dimensions (2048x2048x1).
            # Remove the single length dimension from the pixel data.
            # CD3_3 keyword must also be removed or alignAndStack complains.
            ad = remove_single_length_dimension(ad)

            # Need to change dtype from int32 to float32, or else numpy will
            # promote to float64. There's no VAR or DQ at this stage.
            ad[0].data = ad[0].data.astype(np.float32)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
            adoutputs.append(ad)

        return adoutputs
예제 #35
0
    def addVAR(self, adinputs=None, **params):
        """
        This primitive adds noise components to the VAR plane of each extension
        of each input AstroData object (creating the VAR plane if necessary).
        The calculations for these components are abstracted out to separate
        methods that operate on an individual AD object in-place.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        read_noise: bool
            add the read noise component?
        poisson_noise: bool
            add the Poisson noise component?
        """
        log = self.log
        log.debug(gt.log_message("primitive", "addVAR", "starting"))
        timestamp_key = self.timestamp_keys["addVAR"]
        read_noise = params['read_noise']
        poisson_noise = params['poisson_noise']
        suffix = params['suffix']

        if read_noise:
            if poisson_noise:
                log.stdinfo('Adding the read noise component and the Poisson '
                            'noise component of the variance')
            else:
                log.stdinfo('Adding the read noise component of the variance')
        else:
            if poisson_noise:
                log.stdinfo('Adding the Poisson noise component of the variance')
            else:
                log.warning('Cannot add a variance extension since no variance '
                            'component has been selected')
                return adinputs

        for ad in adinputs:
            if read_noise:
                self._addReadNoise(ad)
            if poisson_noise:
                self._addPoissonNoise(ad)
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
    def correctImageOrientation(self, adinputs=None, debug_level=0, **params):
        """
        Correct image orientation to proper echelle format.

        flips image so that left lower corner is bluest wavelength, upper right corner is reddest wavelength.
        Echelle orders go from left to right.

        Args:
            adinputs --- previously --- img (np.ndarray): input 2d echelle spectrum
            debug_level (int): debug level
        Returns:
            np.ndarray: image with correct orientation
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        adoutputs = []
        for ad in adinputs:
            adout = copy.deepcopy(ad)
            if debug_level > 0:
                plt.figure()
                plt.title('Original Image')
                plt.imshow(ad.data[0], origin='lower')
                plt.show()

            if ad.image_orientation(
            )['vertical orientation flip'] and ad.image_orientation(
            )['horizontal orientation flip']:  # flip up-down
                adout[0].data = np.fliplr(np.flipud(ad[0].data))

            if debug_level > 0:
                plt.figure()
                plt.title('Orientation Corrected image')
                plt.imshow(adout.data[0], origin='lower')
                plt.show()
            adout.update_filename(suffix=params['suffix'], strip=True)
            adoutputs.append(adout)

        gt.mark_history(adoutputs,
                        primname=self.myself(),
                        keyword=timestamp_key)

        return adoutputs
예제 #37
0
    def prepare(self, adinputs=None, **params):
        """
        Validate and standardize the datasets to ensure compatibility
        with the subsequent primitives.  The outputs, if written to
        disk will be given the suffix "_prepared".

        Currently, there are no input parameters associated with
        this primitive.

        Parameters
        ----------
        adinputs : None or list
            Input files that will be prepared. If `None`, it runs on the
            list of AstroData objects in the main stream.
        suffix: str
            Suffix to be added to output files (Default: "_prepared").
        """
        log = self.log
        log.debug(gt.log_message("primitive", "prepare", "starting"))

        filenames = [ad.filename for ad in adinputs]
        paths = [ad.path for ad in adinputs]

        timestamp_key = self.timestamp_keys["prepare"]
        sfx = params["suffix"]
        for primitive in ('validateData', 'standardizeStructure',
                          'standardizeHeaders', 'standardizeWCS'):
            # No need to call standardizeWCS if all adinputs are single-extension
            # images (tags should be the same for all adinputs)
            if ('WCS' not in primitive or 'SPECT' in adinputs[0].tags
                    or any(len(ad) > 1 for ad in adinputs)):
                passed_params = self._inherit_params(params, primitive)
                adinputs = getattr(self, primitive)(adinputs, **passed_params)

        for ad in adinputs:
            gt.mark_history(ad, self.myself(), timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)
        for ad, filename, path in zip(adinputs, filenames, paths):
            if path:
                add_provenance(ad, filename, md5sum(path) or "", self.myself())
        return adinputs
    def makeFlat(self,rc):

        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Define the keyword to be used for the time stamp
        timestamp_key = self.timestamp_keys["makeFlat"]

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "makeFlat", "starting"))
                
        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Check if inputs prepared
        for ad in rc.get_inputs_as_astrodata():
            if "PREPARED" not in ad.types:
                raise Errors.InputError("%s must be prepared" % ad.filename)

        # Instantiate ETI and then run the task 
        gsflat_task = eti.gsflateti.GsflatETI(rc)
        adout = gsflat_task.run()

        # Set any zero-values to 1 (to avoid dividing by zero)
        for sciext in adout["SCI"]:
            sciext.data[sciext.data==0] = 1.0

        # Blank out any position or program information in the
        # header (spectroscopy flats are often taken with science data)
        adout = gt.convert_to_cal_header(adinput=adout,caltype="flat")[0]

        # Add the appropriate time stamps to the PHU
        gt.mark_history(adinput=adout, keyword=timestamp_key)

        adoutput_list.append(adout)

        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)
        
        yield rc
 def markAsPrepared(self, rc):
     """
     This primitive is used to add a time stamp keyword to the PHU of the
     AstroData object and update the AstroData type, allowing the output
     AstroData object to be recognised as PREPARED.
     
     """
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "markAsPrepared", "starting"))
     
     # Define the keyword to be used for the time stamp for this primitive
     timestamp_key = self.timestamp_keys["prepare"]
     
     # Initialize the list of output AstroData objects
     adoutput_list = []
     
     # Loop over each input AstroData object in the input list
     for ad in rc.get_inputs_as_astrodata():
         
         # Add the appropriate time stamps to the PHU
         gt.mark_history(adinput=ad, keyword=timestamp_key)
         
         # Update the AstroData type so that the AstroData object is
         # recognised as being prepared
         ad.refresh_types()
         
         # Change the filename
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Append the output AstroData object to the list of output
         # AstroData objects
         adoutput_list.append(ad)
     
     # Report the list of output AstroData objects to the reduction context
     rc.report_output(adoutput_list)
     
     yield rc
예제 #40
0
    def storeProcessedScience(self, adinputs=None, suffix=None):
        if self.mode not in ['sq', 'ql', 'qa']:
            self.log.warning(
                f'Mode {self.mode} not recognized in storeScience, not saving anything'
            )
            return adinputs

        for ad in adinputs:
            gt.mark_history(adinput=ad,
                            primname=self.myself(),
                            keyword="PROCSCI")
            ad.update_filename(suffix=suffix, strip=True)
            ad.phu.set('PROCMODE', self.mode)

            if self.mode != 'qa' and self.upload and 'science' in self.upload:
                old_filename = ad.filename
                ad.update_filename(suffix=f"_{self.mode}" + suffix, strip=True)
                self.caldb.store_calibration(ad, caltype="processed_science")
                ad.filename = old_filename

        return adinputs
 def storeProcessedFringe(self, rc):
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "storeProcessedFringe",
                              "starting"))
     
     # Initialize the list of output AstroData objects
     adoutput_list = []
     
     # Loop over each input AstroData object in the input list
     for ad in rc.get_inputs_as_astrodata():
         
         # Updating the file name with the suffix for this primitive and
         # then report the new file to the reduction context
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Sanitize the headers of the file so that it looks like
         # a public calibration file rather than a science file
         ad = gt.convert_to_cal_header(adinput=ad, caltype="fringe")[0]
         
         # Adding a PROCFRNG time stamp to the PHU
         gt.mark_history(adinput=ad, keyword="PROCFRNG")
         
         # Refresh the AD types to reflect new processed status
         ad.refresh_types()
         
         adoutput_list.append(ad)
     
     # Upload to cal system
     rc.run("storeCalibration")
     
     # Report the list of output AstroData objects to the reduction
     # context
     rc.report_output(adoutput_list)
     
     yield rc
예제 #42
0
    def standardizeStructure(self, adinputs=None, **params):
        """
        This primitive is used to standardize the structure of GMOS data,
        specifically.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        attach_mdf: bool
            attach an MDF to the AD objects? (ignored if not tagged as SPECT)
        mdf: str
            full path of the MDF to attach
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        adoutputs = []
        # If attach_mdf=False, this just zips up the ADs with a list of Nones,
        # which has no side-effects.
        for ad, mdf in zip(*gt.make_lists(adinputs, params['mdf'])):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by standardizeStructure".
                            format(ad.filename))
                adoutputs.append(ad)
                continue

            # Attach an MDF to each input AstroData object
            if params["attach_mdf"] and 'SPECT' in ad.tags:
                ad = self.addMDF([ad], mdf=mdf)[0]

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
            adoutputs.append(ad)
        return adoutputs
예제 #43
0
    def approximateWaveCal(self,rc):

        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Define the keyword to be used for the time stamp
        timestamp_key = self.timestamp_keys["appwave"]

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "appwave", "starting"))
                
        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Loop over each input AstroData object in the input list
        for ad in rc.get_inputs_as_astrodata():

            applyApproxWaveCal(ad)

            adout = ad

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=adout, keyword=timestamp_key)

            # Change the filename
            adout.filename = gt.filename_updater(
                adinput=adout, suffix=rc["suffix"], strip=True)
            
            # Append the output AstroData object to the list
            # of output AstroData objects
            adoutput_list.append(adout)
        
        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)

        yield rc
예제 #44
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_bias=True):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, getProcessedBias will be called
        to ensure a bias exists for every adinput.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_bias: bool
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if not do_bias:
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            self.getProcessedBias(adinputs, refresh=False)
            bias_list = self._get_cal(adinputs, 'processed_bias')
        else:
            bias_list = bias

        # Provide a bias AD object for every science frame
        for ad, bias in zip(
                *gt.make_lists(adinputs, bias_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by biasCorrect".format(
                                ad.filename))
                continue

            if bias is None:
                if 'qa' in self.mode:
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    raise OSError('No processed bias listed for {}'.format(
                        ad.filename))

            try:
                gt.check_inputs_match(ad,
                                      bias,
                                      check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad,
                                      bias,
                                      check_filter=False,
                                      check_units=True)

            log.fullinfo('Subtracting this bias from {}:\n{}'.format(
                ad.filename, bias.filename))
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename,
                       self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
            if bias.path:
                add_provenance(ad, bias.filename,
                               md5sum(bias.path) or "", self.myself())

            timestamp = datetime.now()
        return adinputs
    def addDQ(self, rc):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).
        
        :param bpm: The file name, including the full path, of the BPM(s) to be
                    used to flag bad pixels in the DQ extension. If only one
                    BPM is provided, that BPM will be used to flag bad pixels
                    in the DQ extension for all input AstroData object(s). If
                    more than one BPM is provided, the number of BPMs must
                    match the number of input AstroData objects. If no BPM is
                    provided, the primitive will attempt to determine an
                    appropriate BPM.
        :type bpm: string or list of strings
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "addDQ", "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["addDQ"]
        
        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Set the data type of the data quality array
        # It can be uint8 for now, it will get converted up as we assign higher bit values
        # shouldn't need to force it up to 16bpp yet.
        dq_dtype = np.dtype(np.uint8)
        #dq_dtype = np.dtype(np.uint16)
        
        # Get the input AstroData objects
        adinput = rc.get_inputs_as_astrodata()
        
        # Loop over each input AstroData object in the input list
        for ad in adinput:
            
            # Check whether the addDQ primitive has been run previously
            if ad.phu_get_key_value(timestamp_key):
                log.warning("No changes will be made to %s, since it has "
                            "already been processed by addDQ" % ad.filename)
                
                # Append the input AstroData object to the list of output
                # AstroData objects without further processing
                adoutput_list.append(ad)
                continue
            
            # Parameters specified on the command line to reduce are converted
            # to strings, including None
            ##M What about if a user doesn't want to add a BPM at all?
            ##M Are None's not converted to Nonetype from the command line?
            if rc["bpm"] and rc["bpm"] != "None":
                # The user supplied an input to the bpm parameter
                bpm = rc["bpm"]
            else:
                # The user did not supply an input to the bpm parameter, so try
                # to find an appropriate one. Get the dictionary containing the
                # list of BPMs for all instruments and modes.
                all_bpm_dict = Lookups.get_lookup_table("Gemini/BPMDict",
                                                        "bpm_dict")
                
                # Call the _get_bpm_key helper function to get the key for the
                # lookup table 
                key = self._get_bpm_key(ad)
                
                # Get the appropriate BPM from the look up table
                if key in all_bpm_dict:
                    bpm = lookup_path(all_bpm_dict[key])
                else:
                    bpm = None
                    log.warning("No BPM found for %s, no BPM will be "
                                "included" % ad.filename)

            # Ensure that the BPMs are AstroData objects
            bpm_ad = None
            if bpm is not None:
                log.fullinfo("Using %s as BPM" % str(bpm))
                if isinstance(bpm, AstroData):
                    bpm_ad = bpm
                else:
                    bpm_ad = AstroData(bpm)
                    ##M Do we want to fail here depending on context?
                    if bpm_ad is None:
                        log.warning("Cannot convert %s into an AstroData "
                                    "object, no BPM will be added" % bpm)

            final_bpm = None
            if bpm_ad is not None:
                # Clip the BPM data to match the size of the input AstroData
                # object science and pad with overscan region, if necessary
                final_bpm = gt.clip_auxiliary_data(adinput=ad, aux=bpm_ad,
                                                   aux_type="bpm")[0]

            # Get the non-linear level and the saturation level using the
            # appropriate descriptors - Individual values get checked in the
            # next loop 
            non_linear_level_dv = ad.non_linear_level()
            saturation_level_dv = ad.saturation_level()

            # Loop over each science extension in each input AstroData object
            for ext in ad[SCI]:
                
                # Retrieve the extension number for this extension
                extver = ext.extver()
                
                # Check whether an extension with the same name as the DQ
                # AstroData object already exists in the input AstroData object
                if ad[DQ, extver]:
                    log.warning("A [%s,%d] extension already exists in %s"
                                % (DQ, extver, ad.filename))
                    continue
                
                # Get the non-linear level and the saturation level for this
                # extension
                non_linear_level = non_linear_level_dv.get_value(extver=extver)
                saturation_level = saturation_level_dv.get_value(extver=extver)

                # To store individual arrays created for each of the DQ bit
                # types
                dq_bit_arrays = []

                # Create an array that contains pixels that have a value of 2
                # when that pixel is in the non-linear regime in the input
                # science extension
                if non_linear_level is not None:
                    non_linear_array = None
                    if saturation_level is not None:
                        # Test the saturation level against non_linear level
                        # They can be the same or the saturation level can be
                        # greater than but not less than the non-linear level.
                        # If they are the same then only flag saturated pixels
                        # below. This just means not creating an unneccessary
                        # intermediate array.
                        if saturation_level > non_linear_level:
                            log.fullinfo("Flagging pixels in the DQ extension "
                                         "corresponding to non linear pixels "
                                         "in %s[%s,%d] using non linear "
                                         "level = %.2f" % (ad.filename, SCI,
                                                           extver,
                                                           non_linear_level))

                            non_linear_array = np.where(
                                ((ext.data >= non_linear_level) &
                                (ext.data < saturation_level)), 2, 0)
                            
                        elif saturation_level < non_linear_level:
                            log.warning("%s[%s,%d] saturation_level value is"
                                        "less than the non_linear_level not"
                                        "flagging non linear pixels" %
                                        (ad.filname, SCI, extver))
                        else:
                            log.fullinfo("Saturation and non-linear values "
                                         "for %s[%s,%d] are the same. Only "
                                         "flagging saturated pixels."
                                         % (ad.filename, SCI, extver))
                            
                    else:
                        log.fullinfo("Flagging pixels in the DQ extension "
                                     "corresponding to non linear pixels "
                                     "in %s[%s,%d] using non linear "
                                     "level = %.2f" % (ad.filename, SCI, extver,
                                                       non_linear_level))

                        non_linear_array = np.where(
                            (ext.data >= non_linear_level), 2, 0)
                    
                    dq_bit_arrays.append(non_linear_array)

                # Create an array that contains pixels that have a value of 4
                # when that pixel is saturated in the input science extension
                if saturation_level is not None:
                    saturation_array = None
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to saturated pixels in "
                                 "%s[%s,%d] using saturation level = %.2f" %
                                 (ad.filename, SCI, extver, saturation_level))
                    saturation_array = np.where(
                        ext.data >= saturation_level, 4, 0)
                    dq_bit_arrays.append(saturation_array)
                
                # BPMs have an EXTNAME equal to DQ
                bpmname = None
                if final_bpm is not None:
                    bpm_array = None
                    bpmname = os.path.basename(final_bpm.filename)
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to bad pixels in %s[%s,%d] "
                                 "using the BPM %s[%s,%d]" %
                                 (ad.filename, SCI, extver, bpmname, DQ, extver))
                    bpm_array = final_bpm[DQ, extver].data
                    dq_bit_arrays.append(bpm_array)
                
                # Create a single DQ extension from the three arrays (BPM,
                # non-linear and saturated)
                if not dq_bit_arrays:
                    # The BPM, non-linear and saturated arrays were not
                    # created. Create a single DQ array with all pixels set
                    # equal to 0 
                    log.fullinfo("The BPM, non-linear and saturated arrays "
                                 "were not created. Creating a single DQ "
                                 "array with all the pixels set equal to zero")
                    final_dq_array = np.zeros(ext.data.shape).astype(dq_dtype)

                else:
                    final_dq_array = self._bitwise_OR_list(dq_bit_arrays)
                    final_dq_array = final_dq_array.astype(dq_dtype)
                
                # Create a data quality AstroData object
                dq = AstroData(data=final_dq_array)
                dq.rename_ext(DQ, ver=extver)
                dq.filename = ad.filename
                
                # Call the _update_dq_header helper function to update the
                # header of the data quality extension with some useful
                # keywords
                dq = self._update_dq_header(sci=ext, dq=dq, bpmname=bpmname)
                
                # Append the DQ AstroData object to the input AstroData object
                log.fullinfo("Adding extension [%s,%d] to %s"
                             % (DQ, extver, ad.filename))
                ad.append(moredata=dq)
            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)
            
            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                              strip=True)
            
            # Append the output AstroData object to the list of output
            # AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction context
        rc.report_output(adoutput_list)
        
        yield rc
예제 #46
0
    def subtractOverscan(self, adinputs=None, **params):
        """
        This primitive subtracts the overscan level from the image. The
        level for each row (currently the primitive requires that the overscan
        region be a vertical strip) is determined in one of the following
        ways, according to the *function* and *order* parameters:

        "poly":   a polynomial of degree *order* (1=linear, etc)
        "spline": using *order* equally-sized cubic spline pieces or, if
                  order=None or 0, a spline that provides a reduced chi^2=1
        "none":   no function is fit, and the value for each row is determined
                  by the overscan pixels in that row

        The fitting is done iteratively but, in the first instance, a running
        median of the rows is calculated and rows that deviate from this median
        are rejected (and used in place of the actual value if function="none")

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        niterate: int
            number of rejection iterations
        high_reject: float/None
            number of standard deviations above which to reject high pixels
        low_reject: float/None
            number of standard deviations above which to reject low pixels
        nbiascontam: int/None
            number of columns adjacent to the illuminated region to reject
        function: str/None
            function to fit ("poly" | "spline" | "none")
        order: int/None
            order of polynomial fit or number of spline pieces
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        niterate = params["niterate"]
        lo_rej = params["low_reject"]
        hi_rej = params["high_reject"]
        order = params["order"] or 0  # None is the same as 0
        func = (params["function"] or 'none').lower()
        nbiascontam = params["nbiascontam"]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning(
                    "No changes will be made to {}, since it has "
                    "already been processed by subtractOverscan".format(
                        ad.filename))
                continue

            osec_list = ad.overscan_section()
            dsec_list = ad.data_section()
            for ext, osec, dsec in zip(ad, osec_list, dsec_list):
                x1, x2, y1, y2 = osec.x1, osec.x2, osec.y1, osec.y2
                if x1 > dsec.x1:  # Bias on right
                    x1 += nbiascontam
                    x2 -= 1
                else:  # Bias on left
                    x1 += 1
                    x2 -= nbiascontam

                row = np.arange(y1, y2)
                data = np.mean(ext.data[y1:y2, x1:x2], axis=1)
                # Weights are used to determine number of spline pieces
                # should be the estimate of the mean
                wt = np.sqrt(x2 - x1) / ext.read_noise()
                if ext.is_in_adu():
                    wt *= ext.gain()

                medboxsize = 2  # really 2n+1 = 5
                for iter in range(niterate + 1):
                    # The UnivariateSpline will make reduced-chi^2=1 so it will
                    # fit bad rows. Need to mask these before starting, so use a
                    # running median. Probably a good starting point for all fits.
                    if iter == 0 or func == 'none':
                        medarray = np.full((medboxsize * 2 + 1, y2 - y1),
                                           np.nan)
                        for i in range(-medboxsize, medboxsize + 1):
                            mx1 = max(i, 0)
                            mx2 = min(y2 - y1, y2 - y1 + i)
                            medarray[medboxsize + i,
                                     mx1:mx2] = data[:mx2 - mx1]
                        runmed = np.ma.median(np.ma.masked_where(
                            np.isnan(medarray), medarray),
                                              axis=0)
                        residuals = data - runmed
                        sigma = np.sqrt(x2 - x1) / wt  # read noise

                    mask = np.logical_or(
                        residuals > hi_rej * sigma if hi_rej is not None else
                        False, residuals < -lo_rej * sigma
                        if lo_rej is not None else False)

                    # Don't clip any pixels if iter==0
                    if func == 'none' and iter < niterate:
                        # Replace bad data with running median
                        data = np.where(mask, runmed, data)
                    elif func != 'none':
                        if func == 'spline':
                            if order:
                                # Equally-spaced knots (like IRAF)
                                knots = np.linspace(row[0], row[-1],
                                                    order + 1)[1:-1]
                                bias = LSQUnivariateSpline(
                                    row[~mask], data[~mask], knots)
                            else:
                                bias = UnivariateSpline(row[~mask],
                                                        data[~mask],
                                                        w=[wt] * np.sum(~mask))
                        else:
                            bias_init = models.Chebyshev1D(degree=order,
                                                           c0=np.median(
                                                               data[~mask]))
                            fit_f = fitting.LinearLSQFitter()
                            bias = fit_f(bias_init, row[~mask], data[~mask])

                        residuals = data - bias(row)
                        sigma = np.std(residuals[~mask])

                # using "-=" won't change from int to float
                if func != 'none':
                    data = bias(np.arange(0, ext.data.shape[0]))
                ext.data = ext.data - np.tile(
                    data, (ext.data.shape[1], 1)).T.astype(np.float32)

                ext.hdr.set('OVERSEC',
                            '[{}:{},{}:{}]'.format(x1 + 1, x2, y1 + 1, y2),
                            self.keyword_comments['OVERSEC'])
                ext.hdr.set('OVERSCAN', np.mean(data),
                            self.keyword_comments['OVERSCAN'])
                ext.hdr.set('OVERRMS', sigma, self.keyword_comments['OVERRMS'])

            # Timestamp, and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
예제 #47
0
    def splitBundle(self, adinputs=None, **params):
        """
        Break a GHOST observation bundle into individual exposures.

        This primitive breaks up a GHOST observation bundle into 3 files: one
        containing the Red camera frame, one containing the Blue camera frame,
        and another containing the Slit Viewer (SV) frames.

        The Red and Blue
        output files are MEF because each amp quadrant is in its own extension,
        while the SV output file will contain all SV exposures taken during the
        observation run and will thus be single-extension for zero-duration
        BIAS observations, but it may also be a MEF for other observation types
        due to their longer exposures.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message('primitive', self.myself(), 'starting'))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by {}".format(
                                ad.filename, self.myself()))
                continue

            log.stdinfo("Unbundling {}:".format(ad.filename))

            # as a special case, write all slitv extns to a single file
            # TODO: may need to make multiple SV files, not one per SV exposure
            # but one per RED/BLUE exposure which contains all SV exposures that
            # overlap with the RED/BLUE one in time (check with Jon)
            extns = [
                x for x in ad
                if (x.hdr.get('CAMERA').lower().startswith('slit')) and (
                    len(x.data) > 0)
            ]
            if len(extns) > 0:
                _write_newfile(extns, '_slit', ad, log)

            # now do non-slitv extensions
            extns = [
                x for x in ad
                if not x.hdr.get('CAMERA').lower().startswith('slit')
            ]
            key = lambda x: '_' + x.hdr.get('CAMERA').lower() + str(
                x.hdr.get('EXPID'))
            extns = sorted(extns, key=key)
            for k, g in itertools.groupby(extns, key=key):
                _write_newfile(list(g), k, ad, log)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params['suffix'], strip=True)

        # returning [] avoids writing copy of bundle out to CWD, which then begs
        # the question: why then bother updating the timestamp & filename?
        return []  #adinputs
예제 #48
0
    def tileArrays(self, adinputs=None, **params):
        """
        This primitive combines extensions by tiling (no interpolation).
        The array_section() and detector_section() descriptors are used
        to derive the geometry of the tiling, so outside help (from the
        instrument's geometry_conf module) is only required if there are
        multiple arrays being tiled together, as the gaps need to be
        specified.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        tile_all: bool
            tile to a single extension, rather than one per array?
            (array=physical detector)
        sci_only: bool
            tile only the data plane?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        tile_all = params['tile_all']
        attributes = ['data'] if params["sci_only"] else None

        adoutputs = []
        for ad in adinputs:
            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to tile".format(ad.filename))
                adoutputs.append(ad)
                continue

            # Get information to calculate the output geometry
            # TODO: Think about arbitrary ROIs
            array_info = gt.array_information(ad)
            detshape = array_info.detector_shape
            if not tile_all and set(array_info.array_shapes) == {(1, 1)}:
                log.warning("{} has nothing to tile, as tile_all=False but "
                            "each array has only one amplifier.")
                adoutputs.append(ad)
                continue

            blocks = [
                Block(ad[arrays], shape=shape) for arrays, shape in zip(
                    array_info.extensions, array_info.array_shapes)
            ]
            offsets = [
                ad[exts[0]].array_section() for exts in array_info.extensions
            ]

            if tile_all and detshape != (1, 1):  # We need gaps!
                geotable = import_module('.geometry_conf', self.inst_lookups)
                chip_gaps = geotable.tile_gaps[ad.detector_name()]
                try:
                    xgap, ygap = chip_gaps
                except TypeError:  # single number, applies to both
                    xgap = ygap = chip_gaps
                transforms = []
                for i, (origin,
                        offset) in enumerate(zip(array_info.origins, offsets)):
                    xshift = (origin[1] + offset.x1 + xgap *
                              (i % detshape[1])) // ad.detector_x_bin()
                    yshift = (origin[0] + offset.y1 + ygap *
                              (i // detshape[1])) // ad.detector_y_bin()
                    transforms.append(
                        Transform(models.Shift(xshift) & models.Shift(yshift)))
                adg = AstroDataGroup(blocks, transforms)
                adg.set_reference()
                ad_out = adg.transform(attributes=attributes,
                                       process_objcat=True)
            else:
                # ADG.transform() produces full AD objects so we start with
                # the first one, and then append the single extensions created
                # by later calls to it.
                for i, block in enumerate(blocks):
                    # Simply create a single tiled array
                    adg = AstroDataGroup([block])
                    adg.set_reference()
                    if i == 0:
                        ad_out = adg.transform(attributes=attributes,
                                               process_objcat=True)
                    else:
                        ad_out.append(
                            adg.transform(attributes=attributes,
                                          process_objcat=True)[0])

            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.orig_filename = ad.filename
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)
        return adoutputs
예제 #49
0
    def mosaicDetectors(self, adinputs=None, **params):
        """
        This primitive does a full mosaic of all the arrays in an AD object.
        An appropriate geometry_conf.py module containing geometric information
        is required.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files.
        sci_only: bool
            mosaic only SCI image data. Default is False
        order: int (1-5)
            order of spline interpolation
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        suffix = params['suffix']
        order = params['order']
        attributes = ['data'] if params['sci_only'] else None
        geotable = import_module('.geometry_conf', self.inst_lookups)

        adoutputs = []
        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by mosaicDetectors".format(
                                ad.filename))
                adoutputs.append(ad)
                continue

            if len(ad) == 1:
                log.warning("{} has only one extension, so there's nothing "
                            "to mosaic".format(ad.filename))
                adoutputs.append(ad)
                continue

            # If there's an overscan section, we must trim it before mosaicking
            try:
                overscan_kw = ad._keyword_for('overscan_section')
            except AttributeError:  # doesn't exist for this AD, so carry on
                pass
            else:
                if overscan_kw in ad.hdr:
                    ad = gt.trim_to_data_section(ad, self.keyword_comments)

            # Create the blocks (individual physical detectors)
            array_info = gt.array_information(ad)
            blocks = [
                Block(ad[arrays], shape=shape) for arrays, shape in zip(
                    array_info.extensions, array_info.array_shapes)
            ]
            offsets = [
                ad[exts[0]].array_section() for exts in array_info.extensions
            ]

            detname = ad.detector_name()
            xbin, ybin = ad.detector_x_bin(), ad.detector_y_bin()
            geometry = geotable.geometry[detname]
            default_shape = geometry.get('default_shape')
            adg = AstroDataGroup()

            for block, origin, offset in zip(blocks, array_info.origins,
                                             offsets):
                # Origins are in (x, y) order in LUT
                block_geom = geometry[origin[::-1]]
                nx, ny = block_geom.get('shape', default_shape)
                nx /= xbin
                ny /= ybin
                shift = block_geom.get('shift', (0, 0))
                rot = block_geom.get('rotation', 0.)
                mag = block_geom.get('magnification', (1, 1))
                transform = Transform()

                # Shift the Block's coordinates based on its location within
                # the full array, to ensure any rotation takes place around
                # the true centre.
                if offset.x1 != 0 or offset.y1 != 0:
                    transform.append(
                        models.Shift(float(offset.x1) / xbin)
                        & models.Shift(float(offset.y1) / ybin))

                if rot != 0 or mag != (1, 1):
                    # Shift to centre, do whatever, and then shift back
                    transform.append(
                        models.Shift(-0.5 * (nx - 1)) & models.Shift(-0.5 *
                                                                     (ny - 1)))
                    if rot != 0:
                        # Cope with non-square pixels by scaling in one
                        # direction to make them square before applying the
                        # rotation, and then reversing that.
                        if xbin != ybin:
                            transform.append(
                                models.Identity(1) & models.Scale(ybin / xbin))
                        transform.append(models.Rotation2D(rot))
                        if xbin != ybin:
                            transform.append(
                                models.Identity(1) & models.Scale(xbin / ybin))
                    if mag != (1, 1):
                        transform.append(
                            models.Scale(mag[0]) & models.Scale(mag[1]))
                    transform.append(
                        models.Shift(0.5 * (nx - 1)) & models.Shift(0.5 *
                                                                    (ny - 1)))
                transform.append(
                    models.Shift(float(shift[0]) / xbin)
                    & models.Shift(float(shift[1]) / ybin))
                adg.append(block, transform)

            adg.set_reference()
            ad_out = adg.transform(attributes=attributes,
                                   order=order,
                                   process_objcat=False)

            ad_out.orig_filename = ad.filename
            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)
            adoutputs.append(ad_out)

        return adoutputs
예제 #50
0
    def nonlinearityCorrect(self, adinputs=None, suffix=None):
        """
        Run on raw or nprepared Gemini NIRI data, this script calculates and
        applies a per-pixel linearity correction based on the counts in the
        pixel, the exposure time, the read mode, the bias level and the ROI.
        Pixels over the maximum correctable value are set to BADVAL unless
        given the force flag. Note that you may use glob expansion in infile,
        however, any pattern matching characters (*,?) must be either quoted
        or escaped with a backslash. Do we need a badval parameter that defines
        a value to assign to uncorrectable pixels, or do we want to just add
        those pixels to the DQ plane with a specific value?

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        # Instantiate the log
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad in adinputs:
            if ad.phu.get(timestamp_key):
                log.warning(
                    "No changes will be made to {}, since it has "
                    "already been processed by nonlinearityCorrect".format(
                        ad.filename))
                continue

            in_adu = ad.is_in_adu()
            total_exptime = ad.exposure_time()
            coadds = ad.coadds()
            # Check the raw exposure time (i.e., per coadd). First, convert
            # the total exposure time returned by the descriptor back to
            # the raw exposure time
            exptime = total_exptime / coadds
            if exptime > 600.:
                log.warning(
                    "Exposure time {} for {} is outside the range "
                    "used to derive correction.", format(exptime, ad.filename))

            for ext, coeffs in zip(ad, ad.nonlinearity_coeffs()):
                if coeffs is None:
                    log.warning("No nonlinearity coefficients found for "
                                "{}:{} - no correction applied".format(
                                    ad.filename, ext.hdr['EXTVER']))
                    continue

                raw_mean_value = np.mean(ext.data) / coadds
                log.fullinfo("The mean value of the raw pixel data in " \
                             "{} is {:.8f}".format(ext.filename, raw_mean_value))

                log.fullinfo(
                    "Coefficients used = {:.12f} {:.9e} {:.9e}".format(
                        coeffs.time_delta, coeffs.gamma, coeffs.eta))

                # Convert back to ADU per exposure if data are in electrons
                conv_factor = (1 if in_adu else ext.gain()) * coadds

                raw_pixel_data = ext.data / conv_factor  # ADU per 1 coadd
                # Create a new array that contains the corrected pixel data
                corrected_pixel_data = raw_pixel_data * (
                    1 + raw_pixel_data *
                    (coeffs.gamma + coeffs.eta * raw_pixel_data)) * conv_factor

                # Try to do something useful with the VAR plane, if it exists
                # Since the data are fairly pristine, VAR will simply be the
                # Poisson noise (divided by gain if in ADU), possibly plus RN**2
                # So making an additive correction will sort this out,
                # irrespective of whether there's read noise
                conv_factor = ext.gain() if in_adu else 1
                if ext.variance is not None:
                    ext.variance += (corrected_pixel_data -
                                     ext.data) / conv_factor
                # Now update the SCI extension
                ext.data = corrected_pixel_data

                # Correct for the exposure time issue by scaling the counts
                # to the nominal exposure time
                ext.multiply(exptime / (exptime + coeffs.time_delta))

                # Determine the mean of the corrected pixel data
                corrected_mean_value = np.mean(ext.data) / coadds
                log.fullinfo("The mean value of the corrected pixel data in "
                             "{} is {:.8f}".format(ext.filename,
                                                   corrected_mean_value))

                # Correct the exposure time by adding coeff1 * coadds
                total_exptime = total_exptime + coeffs.time_delta * coadds
                log.fullinfo(
                    "The true total exposure time = {}".format(total_exptime))

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
    def slitIllumCorrect(self,
                         adinputs=None,
                         slit_illum=None,
                         do_illum=True,
                         suffix="_illumCorrected"):
        """
        This primitive will divide each SCI extension of the inputs by those
        of the corresponding slit illumination image. If the inputs contain
        VAR or DQ frames, those will also be updated accordingly due to the
        division on the data.

        Parameters
        ----------
        adinputs : list of AstroData
            Data to be corrected.
        slit_illum : str or AstroData
            Slit illumination path or AstroData object.
        do_illum: bool, optional
            Perform slit illumination correction? (Default: True)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        qecorr_key = self.timestamp_keys['QECorrect']

        if not do_illum:
            log.warning("Slit Illumination correction has been turned off.")
            return adinputs

        if slit_illum is None:
            raise NotImplementedError
        else:
            slit_illum_list = slit_illum

        # Provide a Slit Illum Ad object for every science frame
        ad_outputs = []
        for ad, slit_illum_ad in zip(
                *gt.make_lists(adinputs, slit_illum_list, force_ad=True)):

            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by flatCorrect".format(
                                ad.filename))
                continue

            if slit_illum_ad is None:
                if self.mode in ['sq']:
                    raise OSError(
                        "No processed slit illumination listed for {}".format(
                            ad.filename))
                else:
                    log.warning("No changes will be made to {}, since no slit "
                                "illumination has been specified".format(
                                    ad.filename))
                    continue

            gt.check_inputs_match(ad, slit_illum_ad, check_shape=False)

            if not all(
                [e1.shape == e2.shape for (e1, e2) in zip(ad, slit_illum_ad)]):
                slit_illum_ad = gt.clip_auxiliary_data(adinput=[ad],
                                                       aux=[slit_illum_ad])[0]

            log.info("Dividing the input AstroData object {} by this \n"
                     "slit illumination file:  \n{}".format(
                         ad.filename, slit_illum_ad.filename))

            ad_out = deepcopy(ad)
            ad_out.divide(slit_illum_ad)

            # Update the header and filename, copying QECORR keyword from flat
            ad_out.phu.set("SLTILLIM", slit_illum_ad.filename,
                           self.keyword_comments["SLTILLIM"])

            try:
                qecorr_value = slit_illum_ad.phu[qecorr_key]
            except KeyError:
                pass
            else:
                log.fullinfo(
                    "Copying {} keyword from slit illumination".format(
                        qecorr_key))
                ad_out.phu.set(qecorr_key, qecorr_value,
                               slit_illum_ad.phu.comments[qecorr_key])

            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)

            if slit_illum_ad.path:
                add_provenance(ad_out, slit_illum_ad.filename,
                               md5sum(slit_illum_ad.path) or "", self.myself())

            ad_outputs.append(ad_out)

        return ad_outputs
예제 #52
0
    def updateWCS(self, rc):
        """
        This primitive applies a previously calculated WCS correction.
        The solution should be stored in the RC as a dictionary, with
        astrodata instances as the keys and pywcs.WCS objects as the
        values.
        """

        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"], logLevel=rc["logLevel"])

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "updateWCS", "starting"))

        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["updateWCS"]

        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Get the necessary parameters from the RC
        wcs = rc["wcs"]
        if wcs is None:
            log.warning("No new WCS supplied; no correction will be " "performed.")
        else:

            # Loop over each input AstroData object in the input list
            for ad in rc.get_inputs_as_astrodata():

                ad_wcs = None
                if isinstance(wcs, dict):
                    try:
                        ad_wcs = wcs[ad]
                    except KeyError:
                        ad_wcs = wcs
                elif isinstance(wcs, pywcs.WCS):
                    ad_wcs = wcs

                if ad_wcs is None:
                    log.warning("No new WCS supplied for %s; " "no correction will be performed" % ad.filename)
                    adoutput_list.append(ad)
                    continue

                for ext in ad:
                    extname = ext.extname()
                    extver = ext.extver()

                    ext_wcs = None
                    if isinstance(ad_wcs, dict):
                        try:
                            ext_wcs = ad_wcs[extver]
                        except KeyError:
                            pass
                    elif isinstance(ad_wcs, pywcs.WCS):
                        ext_wcs = wcs

                    if ext_wcs is None:
                        log.warning(
                            "No new WCS supplied for %s[%s,%d]; "
                            "no correction will be performed" % (ad.filename, extname, extver)
                        )
                        continue
                    elif not isinstance(ext_wcs, pywcs.WCS):
                        raise Errors.InputError(
                            "Parameter wcs must be "
                            "either a pywcs.WCS object "
                            "or a dictionary of pywcs.WCS "
                            "objects"
                        )

                    # If image extension, correct the header values
                    if extname in ["SCI", "VAR", "DQ"]:
                        log.fullinfo(
                            "Correcting CRVAL, CRPIX, and CD in "
                            "image extension headers for %s[%s,%d]" % (ad.filename, extname, extver)
                        )
                        log.fullinfo("CRVAL: " + repr(ext_wcs.wcs.crval))
                        log.fullinfo("CRPIX: " + repr(ext_wcs.wcs.crpix))
                        log.fullinfo("CD: " + repr(ext_wcs.wcs.cd))

                        ext.set_key_value("CRVAL1", ext_wcs.wcs.crval[0], comment=keyword_comments["CRVAL1"])
                        ext.set_key_value("CRVAL2", ext_wcs.wcs.crval[1], comment=keyword_comments["CRVAL2"])
                        ext.set_key_value("CRPIX1", ext_wcs.wcs.crpix[0], comment=keyword_comments["CRPIX1"])
                        ext.set_key_value("CRPIX2", ext_wcs.wcs.crpix[1], comment=keyword_comments["CRPIX2"])
                        ext.set_key_value("CD1_1", ext_wcs.wcs.cd[0, 0], comment=keyword_comments["CD1_1"])
                        ext.set_key_value("CD1_2", ext_wcs.wcs.cd[0, 1], comment=keyword_comments["CD1_2"])
                        ext.set_key_value("CD2_1", ext_wcs.wcs.cd[1, 0], comment=keyword_comments["CD2_1"])
                        ext.set_key_value("CD2_2", ext_wcs.wcs.cd[1, 1], comment=keyword_comments["CD2_2"])

                    # If objcat, fix the RA/Dec columns
                    elif extname == "OBJCAT":
                        log.fullinfo(
                            "Correcting RA, Dec columns in OBJCAT "
                            "extension for %s[%s,%d]" % (ad.filename, extname, extver)
                        )
                        for row in ext.data:
                            xy = np.array([row["X_IMAGE"], row["Y_IMAGE"]])
                            radec = ext_wcs.wcs_pix2sky([xy], 1)[0]
                            # FIXME - is it correct to set oring to 1 here?
                            # Also we should be setting ra_dec_order=True, but
                            # that breaks with the wcs missing the lattype
                            # property
                            row["X_WORLD"] = radec[0]
                            row["Y_WORLD"] = radec[1]

                # Add the appropriate time stamps to the PHU
                gt.mark_history(adinput=ad, keyword=timestamp_key)

                # Change the filename
                ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], strip=True)

                adoutput_list.append(ad)

            # Report the list of output AstroData objects to the reduction
            # context
            rc.report_output(adoutput_list)

        yield rc
예제 #53
0
    def correctWCSToReferenceFrame(self, rc):
        """ 
        This primitive registers images to a reference image by correcting
        the relative error in their world coordinate systems. The function
        uses points of reference common to the reference image and the
        input images to fit the input WCS to the reference one. The fit
        is done by a least-squares minimization of the difference between
        the reference points in the input image pixel coordinate system.
        This function is intended to be followed by the
        align_to_reference_image function, which applies the relative
        transformation encoded in the WCS to transform input images into the
        reference image pixel coordinate system.
        
        The primary registration method is intended to be by direct mapping
        of sources in the image frame to correlated sources in the reference
        frame. This method fails when there are no correlated sources in the
        field, or when the WCSs are very far off to begin with. As a back-up
        method, the user can try correcting the WCS by the shifts indicated 
        in the POFFSET and QOFFSET header keywords (option fallback='header'), 
        By default, only the direct method is
        attempted, as it is expected that the relative WCS will generally be
        more correct than either indirect method. If the user prefers not to
        attempt direct mapping at all, they may set method to 'header'.
        
        In order to use the direct mapping method, sources must have been
        detected in the frame and attached to the AstroData instance in an 
        OBJCAT extension. This can be accomplished via the detectSources
        primitive. Running time is optimal, and sometimes the solution is 
        more robust, when there are not too many sources in the OBJCAT. Try
        running detectSources with threshold=20. The solution may also be
        more robust if sub-optimal sources are rejected from the set of 
        correlated sources (use option cull_sources=True). This option may
        substantially increase the running time if there are many sources in
        the OBJCAT.
        
        It is expected that the relative difference between the WCSs of 
        images to be combined should be quite small, so it may not be necessary
        to allow rotation and scaling degrees of freedom when fitting the image
        WCS to the reference WCS. However, if it is desired, the options 
        rotate and scale can be used to allow these degrees of freedom. Note
        that these options refer to rotation/scaling of the WCS itself, not the
        images. Significant rotation and scaling of the images themselves 
        will generally already be encoded in the WCS, and will be corrected for
        when the images are aligned.
        
        The WCS keywords in the headers of the output images are updated
        to contain the optimal registration solution.
        
        :param method: method to use to generate reference points. Options
                       are 'sources' to directly map sources from the input
                       image to the reference image,
                       or 'header' to generate reference points from the 
                       POFFSET and QOFFSET keywords in the image headers.
        :type method: string, either 'sources' or 'header'
        
        :param fallback: back-up method for generating reference points.
                         if the primary method fails. The 'sources' option
                         cannot be used as the fallback.
        :type fallback: string, either 'header' or None.
        
        :param cull_sources: flag to indicate whether sub-optimal sources 
                             should be rejected before attempting a direct
                             mapping. If True, sources that are saturated,
                             or otherwise unlikely to be point sources
                             will be eliminated from the list of reference
                             points.
        :type cull_sources: bool
        
        :param rotate: flag to indicate whether the input image WCSs should
                       be allowed to rotate with respect to the reference image
                       WCS
        :type rotate: bool
        
        :param scale: flag to indicate whether the input image WCSs should
                      be allowed to scale with respect to the reference image
                      WCS. The same scale factor is applied to all dimensions.
        :type scale: bool
        """

        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"], logLevel=rc["logLevel"])

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "correctWCSToReferenceFrame", "starting"))

        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["correctWCSToReferenceFrame"]

        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Check whether two or more input AstroData objects were provided
        adinput = rc.get_inputs_as_astrodata()
        correcting = True
        if len(adinput) <= 1:
            log.warning(
                "No correction will be performed, since at least "
                "two input AstroData objects are required for "
                "correctWCSToReferenceFrame"
            )
            # Set the input AstroData object list equal to the output AstroData
            # objects list without further processing
            adoutput_list = adinput
            correcting = False

        # Check that method is sensibly defined
        if correcting:

            # Get the necessary parameters from the RC
            method = rc["method"]
            fallback = rc["fallback"]
            cull_sources = rc["cull_sources"]
            rotate = rc["rotate"]
            scale = rc["scale"]

            if method == "None":
                method = None
            if fallback == "None":
                fallback = None

            if method is None:
                if fallback is None:
                    log.warning("No correction will be performed, since both " "method and fallback are None")
                    adoutput_list = adinput
                    correcting = False
                else:
                    method = fallback

        # Check that images have one SCI extension, and if necessary,
        # sources defined in an OBJCAT extension
        if correcting:
            n_test = []
            for ad in adinput:

                # Make sure all images have one science extension
                if len(ad["SCI"]) != 1:
                    raise Errors.InputError("Input images must have only one " "SCI extension.")

                # Get number of objects from OBJCAT
                objcat = ad["OBJCAT"]
                if objcat is None:
                    num_cat = 0
                else:
                    num_cat = len(objcat)
                if num_cat == 0:
                    n_obj = 0
                elif num_cat > 1:
                    raise Errors.InputError("Input images must have only one " + "OBJCAT extension.")
                else:
                    n_obj = len(objcat.data)

                n_test.append(n_obj)

            if n_test[0] == 0 and method == "sources":
                log.warning("No objects found in reference image.")
                if fallback is not None:
                    log.warning("Only attempting indirect WCS alignment, " + "via " + fallback + " mapping")
                    method = fallback

                else:
                    log.warning(
                        "WCS can only be corrected indirectly "
                        + "and fallback method is set to None. Not "
                        + "attempting WCS correction."
                    )
                    adoutput_list = adinput
                    correcting = False

        # If input passed all checks, apply the selected method
        if correcting:

            # Reference image is first one supplied
            # (won't be modified)
            reference = adinput[0]
            adoutput_list.append(reference)
            log.stdinfo("Reference image: " + reference.filename)

            # If no OBJCAT/no sources in reference image, or user choice,
            # use indirect alignment for all images at once
            if method == "header":
                reg_ad = _header_align(reference, adinput[1:])
                adoutput_list.extend(reg_ad)
            elif method != "sources":
                raise Errors.InputError("Did not recognize method " + method)

            # otherwise try to do direct alignment for each image by correlating
            # sources in the reference and input images
            else:

                for i in range(1, len(adinput)):

                    ad = adinput[i]

                    if n_test[i] == 0:
                        log.warning("No objects found in " + ad.filename)
                        if fallback is not None:
                            log.warning("Only attempting indirect WCS alignment, " + "via " + fallback + " mapping")
                            if fallback == "header":
                                adoutput = _header_align(reference, ad)
                            else:
                                raise Errors.InputError("Did not recognize fallback method " + fallback)

                        else:
                            log.warning(
                                "WCS can only be corrected indirectly "
                                + "and fallback=None. Not attempting WCS "
                                + "correction for "
                                + ad.filename
                            )
                            adoutput_list.append(ad)
                            continue
                    else:
                        log.fullinfo("Number of objects in image %s: %d" % (ad.filename, n_test[i]))

                        log.fullinfo("Cross-correlating sources in %s, %s" % (reference.filename, ad.filename))
                        obj_list = _correlate_sources(reference, ad, cull_sources=cull_sources)

                        n_corr = len(obj_list[0])

                        if n_corr == 0:
                            log.warning("No correlated sources found.")
                            if fallback is not None:
                                log.warning("Only attempting indirect WCS " + "alignment, via " + fallback + " mapping")

                                if fallback == "header":
                                    adoutput = _header_align(reference, ad)
                                else:
                                    raise Errors.InputError("Did not recognize " + "fallback method " + fallback)

                            else:
                                log.warning(
                                    "WCS can only be corrected indirectly "
                                    + "and fallback=None. Not attempting "
                                    + "WCS correction for "
                                    + ad.filename
                                )
                                adoutput_list.append(ad)
                                continue
                        else:
                            log.fullinfo("Number of correlated sources: %d" % n_corr)

                            # Check the fit geometry depending on the
                            # number of objects
                            if n_corr == 1:
                                log.warning("Too few objects. Setting " + "rotate=False, " + "scale=False")
                                rotate = False
                                scale = False

                            log.fullinfo("\nSources used to align frames:")
                            log.fullinfo(
                                "  %7s %7s %7s %7s\n%s" % (" Ref. x", "Ref. y", "Img. x", "Img. y", "  " + "-" * 31)
                            )
                            output_obj = zip(obj_list[0], obj_list[1])
                            for obj in output_obj:
                                obj_string = "  %7.2f %7.2f %7.2f %7.2f" % (obj[0][0], obj[0][1], obj[1][0], obj[1][1])
                                log.fullinfo(obj_string)
                            log.fullinfo("")

                            adoutput = _align_wcs(reference, ad, [obj_list], rotate=rotate, scale=scale)

                    adoutput_list.extend(adoutput)

            # Change the filenames and add the appropriate timestamps
            for ad in adoutput_list:
                gt.mark_history(adinput=ad, keyword=timestamp_key)

                ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], strip=True)

        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)

        yield rc
 def addVAR(self, rc):
     """
     This primitive calculates the variance of each science extension in the
     input AstroData object and adds the variance as an additional
     extension. This primitive will determine the units of the pixel data in
     the input science extension and calculate the variance in the same
     units. The two main components of the variance can be calculated and
     added separately, if desired, using the following formula:
     
     variance(read_noise) [electrons] = (read_noise [electrons])^2 
     variance(read_noise) [ADU] = ((read_noise [electrons]) / gain)^2
     
     variance(poisson_noise) [electrons] =
         (number of electrons in that pixel)
     variance(poisson_noise) [ADU] =
         ((number of electrons in that pixel) / gain)
     
     The pixel data in the variance extensions will be the same size as the
     pixel data in the science extension.
     
     The read noise component of the variance can be calculated and added to
     the variance extension at any time, but should be done before
     performing operations with other datasets.
     
     The Poisson noise component of the variance can be calculated and added
     to the variance extension only after any bias levels have been
     subtracted from the pixel data in the science extension. 
     
     The variance of a raw bias frame contains only a read noise component
     (which represents the uncertainty in the bias level of each pixel),
     since the Poisson noise component of a bias frame is meaningless.
     
     :param read_noise: set to True to add the read noise component of the
                        variance to the variance extension
     :type read_noise: Python boolean
     
     :param poisson_noise: set to True to add the Poisson noise component
                           of the variance to the variance extension
     :type poisson_noise: Python boolean
     
     """
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "addVAR", "starting"))
     
     # Define the keyword to be used for the time stamp for this primitive
     timestamp_key = self.timestamp_keys["addVAR"]
     
     # Initialize the list of output AstroData objects
     adoutput_list = []
     
     # Check to see what component of variance will be added and whether it
     # is sensible to do so
     read_noise = rc["read_noise"]
     poisson_noise = rc["poisson_noise"]
     
     if read_noise and poisson_noise:
         log.stdinfo("Adding the read noise component and the poisson "
                     "noise component of the variance")
     if read_noise and not poisson_noise:
         log.stdinfo("Adding the read noise component of the variance")
     if not read_noise and poisson_noise:
         log.stdinfo("Adding the poisson noise component of the variance")
     if not read_noise and not poisson_noise:
         log.warning("Cannot add a variance extension since no variance "
                     "component has been selected")
     
     # Loop over each input AstroData object in the input list
     for ad in rc.get_inputs_as_astrodata():
         
         if poisson_noise and "BIAS" in ad.types:
             log.warning("It is not recommended to add a poisson noise "
                         "component to the variance of a bias frame")
         if (poisson_noise and "GMOS" in ad.types and not
             ad.phu_get_key_value(self.timestamp_keys["subtractBias"])):
             
             log.warning("It is not recommended to calculate a poisson "
                         "noise component of the variance using data that "
                         "still contains a bias level")
         
         # Call the _calculate_var helper function to calculate and add the
         # variance extension to the input AstroData object
         ad = self._calculate_var(adinput=ad, add_read_noise=read_noise,
                                  add_poisson_noise=poisson_noise)
         
         # Add the appropriate time stamps to the PHU
         gt.mark_history(adinput=ad, keyword=timestamp_key)
         
         # Change the filename
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Append the output AstroData object to the list of output
         # AstroData objects
         adoutput_list.append(ad)
     
     # Report the list of output AstroData objects to the reduction context
     rc.report_output(adoutput_list)
     
     yield rc
예제 #55
0
    def addReferenceCatalog(self, adinputs=None, **params):
        """
        This primitive calls the gemini_catalog_client module to query a
        catalog server and construct a fits table containing the catalog data

        That module will query either gemini catalog servers or vizier.
        Currently, sdss9 and 2mass (point source catalogs are supported.

        For example, with sdss9, the FITS table has the following columns:

        - 'Id'       : Unique ID. Simple running number
        - 'Cat-id'   : SDSS catalog source name
        - 'RAJ2000'  : RA as J2000 decimal degrees
        - 'DEJ2000'  : Dec as J2000 decimal degrees
        - 'umag'     : SDSS u band magnitude
        - 'e_umag'   : SDSS u band magnitude error estimage
        - 'gmag'     : SDSS g band magnitude
        - 'e_gmag'   : SDSS g band magnitude error estimage
        - 'rmag'     : SDSS r band magnitude
        - 'e_rmag'   : SDSS r band magnitude error estimage
        - 'imag'     : SDSS i band magnitude
        - 'e_imag'   : SDSS i band magnitude error estimage
        - 'zmag'     : SDSS z band magnitude
        - 'e_zmag'   : SDSS z band magnitude error estimage

        With 2mass, the first 4 columns are the same, but the photometry
        columns reflect the J H and K bands.

        This primitive then adds the fits table catalog to the Astrodata
        object as 'REFCAT'

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        radius: float
            search radius (in degrees)
        source: str
            identifier for server to be used for catalog search
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        source = params["source"]
        radius = params["radius"]

        for ad in adinputs:
            try:
                ra = ad.wcs_ra()
                dec = ad.wcs_dec()
                if type(ra) is not float:
                    raise ValueError("wcs_ra descriptor did not return a float.")
                if type(ra) is not float:
                    raise ValueError("wcs_dec descriptor did not return a float.")
            except:
                if "qa" in self.mode:
                    log.warning("No RA/Dec in header of {}; cannot find "
                                "reference sources".format(ad.filename))
                    continue
                else:
                    raise

            log.fullinfo("Querying {} for reference catalog".format(source))
            import warnings
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                refcat = get_fits_table(source, ra, dec, radius)

            if refcat is None:
               log.stdinfo("No reference catalog sources found for {}".
                            format(ad.filename))
            else:
                log.stdinfo("Found {} reference catalog sources for {}".
                            format(len(refcat), ad.filename))
                filter_name = ad.filter_name(pretty=True)
                colterm_dict = color_corrections.colorTerms
                try:
                    formulae = colterm_dict[filter_name]
                except KeyError:
                    log.warning("Filter {} not in catalogs - unable to flux "
                                "calibrate".format(filter_name))
                    formulae = []
                ad.REFCAT = _calculate_magnitudes(refcat, formulae)

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
예제 #56
0
    def detectSources(self, adinputs=None, **params):
        """
        Find x,y positions of all the objects in the input image. Append 
        a FITS table extension with position information plus columns for
        standard objects to be updated with position from addReferenceCatalog
        (if any are found for the field).

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: bool
            apply DQ plane as a mask before detection?
        replace_flags: int
            DQ | replace_flags has to be non-zero to be masked
        set_saturation: bool
            set the saturation level of the data for SExtractor?
        detect_minarea: int
            minimum area of detection (pixels)
        detect_thresh: float
            detection threshold (standard deviations)
        analysis_thresh: float
            analysis threshold (standard deviations)
        deblend_mincont: float
            minimum deblending contrast
        phot_min_radius: float
            minimum photometry radius (arcseconds)
        back_size : int
            background mesh size (pixels)
        back_filter_size: int
            background filtering scale
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        sfx = params["suffix"]
        set_saturation = params["set_saturation"]
        # Setting mask_bits=0 is the same as not replacing bad pixels
        mask_bits = params["replace_flags"] if params["mask"] else 0

        # Will raise an Exception if SExtractor is too old or missing
        SExtractorETI(primitives_class=self).check_version()

        # Delete primitive-specific keywords from params so we only have
        # the ones for SExtractor
        for key in ("suffix", "set_saturation", "replace_flags", "mask"):
            del params[key]

        adoutputs = []
        for ad in adinputs:
            # Get a seeing estimate from the header, if available
            seeing_estimate = ad.phu.get('MEANFWHM')

            # Get the appropriate SExtractor input files
            dqtype = 'no_dq' if any(ext.mask is None for ext in ad) else 'dq'
            sexpars = {'config': self.sx_dict[dqtype, 'sex'],
                      'PARAMETERS_NAME': self.sx_dict[dqtype, 'param'],
                      'FILTER_NAME': self.sx_dict[dqtype, 'conv'],
                      'STARNNW_NAME': self.sx_dict[dqtype, 'nnw']}

            # In general, we want the passed parameters to have the same names
            # as the SExtractor params (but in lowercase). PHOT_AUTOPARAMS
            # takes two arguments, and it's only the second we're exposing.
            for key, value in params.items():
                if value is True:
                    value = 'Y'
                elif value is False:
                    value = 'N'
                if key == 'phot_min_radius':
                    sexpars.update({"PHOT_AUTOPARAMS":
                                    "2.5,{}".format(value)})
                else:
                    sexpars.update({key.upper(): value})

            for ext in ad:
                # saturation_level() descriptor always returns level in ADU,
                # so need to multiply by gain if image is not in ADU
                if set_saturation:
                    sat_level = ext.saturation_level()
                    if not ext.is_in_adu():
                        sat_level *= ext.gain()
                    sexpars.update({'SATUR_LEVEL': sat_level})

                # If we don't have a seeing estimate, try to get one
                if seeing_estimate is None:
                    log.debug("Running SExtractor to obtain seeing estimate")
                    sex_task = SExtractorETI(primitives_class=self, inputs=[ext],
                            params=sexpars, mask_dq_bits=mask_bits, getmask=True)
                    sex_task.run()
                    # An OBJCAT is *always* attached, even if no sources found
                    seeing_estimate = _estimate_seeing(ext.OBJCAT)

                # Re-run with seeing estimate (no point re-running if we
                # didn't get an estimate), and get a new estimate
                if seeing_estimate is not None:
                    log.debug("Running SExtractor with seeing estimate "
                              "{:.3f}".format(seeing_estimate))
                    sexpars.update({'SEEING_FWHM': '{:.3f}'.
                                   format(seeing_estimate)})
                    sex_task = SExtractorETI(primitives_class=self, inputs=[ext],
                            params=sexpars, mask_dq_bits=mask_bits, getmask=True)
                    sex_task.run()
                    # We don't want to replace an actual value with "None"
                    temp_seeing_estimate = _estimate_seeing(ext.OBJCAT)
                    if temp_seeing_estimate is not None:
                        seeing_estimate = temp_seeing_estimate

                # Although the OBJCAT has been added to the extension, it
                # needs to be massaged into the necessary format
                # We're deleting the OBJCAT first simply to suppress the
                # "replacing" message in gt.add_objcat, which would otherwise
                # be a bit confusing
                _cull_objcat(ext)
                objcat = ext.OBJCAT
                del ext.OBJCAT
                ad = gt.add_objcat(ad, extver=ext.hdr['EXTVER'], replace=False,
                                   table=objcat, sx_dict=self.sx_dict)
                log.stdinfo("Found {} sources in {}:{}".format(len(ext.OBJCAT),
                                            ad.filename, ext.hdr['EXTVER']))
                # The presence of an OBJCAT demands objects (philosophical)
                if len(ext.OBJCAT) == 0:
                    del ext.OBJCAT

            # Run some profiling code on the best sources to produce a
            # more IRAF-like FWHM number, adding two columns to the OBJCAT
            # (PROFILE_FWHM, PROFILE_EE50)
            ad = _profile_sources(ad, seeing_estimate)

            # We've added a new OBJMASK. It's possible the AD already had an
            # OBJMASK that was dilated. Need to remove this keyword from PHU
            kw = self.timestamp_keys["dilateObjectMask"]
            if kw in ad.phu:
                del ad.phu[kw]

            # Timestamp and update filename, and append to output list
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)
            adoutputs.append(ad)
        return adoutputs
 def addMDF(self, rc):
     """
     This primitive is used to add an MDF extension to the input AstroData
     object. If only one MDF is provided, that MDF will be add to all input
     AstroData object(s). If more than one MDF is provided, the number of
     MDF AstroData objects must match the number of input AstroData objects.
     If no MDF is provided, the primitive will attempt to determine an
     appropriate MDF.
     
     :param mdf: The file name of the MDF(s) to be added to the input(s)
     :type mdf: string
     
     """
     # Instantiate the log
     log = logutils.get_logger(__name__)
     
     # Log the standard "starting primitive" debug message
     log.debug(gt.log_message("primitive", "addMDF", "starting"))
     
     # Define the keyword to be used for the time stamp for this primitive
     timestamp_key = self.timestamp_keys["addMDF"]
     
     # Initialize the list of output AstroData objects
     adoutput_list = []
     
     # Get the input AstroData objects
     adinput = rc.get_inputs_as_astrodata()
     
     # Loop over each input AstroData object in the input list
     for ad in adinput:
         
         # Check whether the addMDF primitive has been run previously
         if ad.phu_get_key_value(timestamp_key):
             log.warning("No changes will be made to %s, since it has "
                         "already been processed by addMDF" % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check whether the input is spectroscopic data
         if "SPECT" not in ad.types:
             log.stdinfo("%s is not spectroscopic data, so no MDF will be "
                         "added" % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check whether an MDF extension already exists in the input
         # AstroData object
         if ad["MDF"]:
             log.warning("An MDF extension already exists in %s, so no MDF "
                         "will be added" % ad.filename)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Parameters specified on the command line to reduce are converted
         # to strings, including None
         if rc["mdf"] and rc["mdf"] != "None":
             # The user supplied an input to the mdf parameter
             mdf = rc["mdf"]
         else:
             # The user did not supply an input to the mdf parameter, so try
             # to find an appropriate one. Get the dictionary containing the
             # list of MDFs for all instruments and modes.
             all_mdf_dict = Lookups.get_lookup_table("Gemini/MDFDict",
                                                     "mdf_dict")
             
             # The MDFs are keyed by the instrument and the MASKNAME. Get
             # the instrument and the MASKNAME values using the appropriate
             # descriptors 
             instrument = ad.instrument()
             mask_name = ad.phu_get_key_value("MASKNAME")
             
             # Create the key for the lookup table
             if instrument is None or mask_name is None:
                 log.warning("Unable to create the key for the lookup "
                             "table (%s), so no MDF will be added"
                             % ad.exception_info)
                 
                 # Append the input AstroData object to the list of output
                 # AstroData objects without further processing
                 adoutput_list.append(ad)
                 continue
             
             key = "%s_%s" % (instrument, mask_name)
             
             # Get the appropriate MDF from the look up table
             if key in all_mdf_dict:
                 mdf = lookup_path(all_mdf_dict[key])
             else:
                 # The MASKNAME keyword defines the actual name of an MDF
                 if not mask_name.endswith(".fits"):
                     mdf = "%s.fits" % mask_name
                 else:
                     mdf = str(mask_name)
                 
                 # Check if the MDF exists in the current working directory
                 if not os.path.exists(mdf):
                     log.warning("The MDF %s was not found in the current "
                                 "working directory, so no MDF will be "
                                 "added" % mdf)
                 
                 # Append the input AstroData object to the list of output
                 # AstroData objects without further processing
                 adoutput_list.append(ad)
                 continue
         
         # Ensure that the MDFs are AstroData objects
         if not isinstance(mdf, AstroData):
             mdf_ad = AstroData(mdf)
         
         if mdf_ad is None:
             log.warning("Cannot convert %s into an AstroData object, so "
                         "no MDF will be added" % mdf)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check if the MDF is a single extension fits file
         if len(mdf_ad) > 1:
             log.warning("The MDF %s is not a single extension fits file, "
                         "so no MDF will be added" % mdf)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
             
         # Name the extension appropriately
         mdf_ad.rename_ext("MDF", 1)
         
         # Append the MDF AstroData object to the input AstroData object
         log.fullinfo("Adding the MDF %s to the input AstroData object "
                      "%s" % (mdf_ad.filename, ad.filename))
         ad.append(moredata=mdf_ad)
         
         # Add the appropriate time stamps to the PHU
         gt.mark_history(adinput=ad, keyword=timestamp_key)
         
         # Change the filename
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Append the output AstroData object to the list of output
         # AstroData objects
         adoutput_list.append(ad)
     
     # Report the list of output AstroData objects to the reduction context
     rc.report_output(adoutput_list)
     
     yield rc