Esempio n. 1
0
 def test_clip_auxiliary_data(self):
     ad = astrodata.open(os.path.join(TESTDATAPATH, 'NIRI',
                                       'N20160620S0035.fits'))
     bpm_ad = astrodata.open('geminidr/niri/lookups/BPM/NIRI_bpm.fits')
     ret = gt.clip_auxiliary_data(ad, bpm_ad, 'bpm', np.int16)
     assert ret[0].data.shape == ad[0].data.shape
     assert np.all(ret[0].data == bpm_ad[0].data[256:768,256:768])
Esempio n. 2
0
    def addLatencyToDQ(self, adinputs=None, **params):
        """
        Flags pixels in the DQ plane of an image based on whether the same
        pixel has been flagged as saturated in a previous image.
        
        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        non_linear : bool
            flag non-linear pixels (as well as saturated ones)?
        time: float
            time (in seconds) for which latency is an issue 
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        flags = DQ.saturated | (DQ.non_linear if params["non_linear"] else 0)
        # Create a timedelta object using the value of the "time" parameter
        seconds = datetime.timedelta(seconds=params["time"])

        # Avoids n^2 calls to the descriptor
        times = [ad.ut_datetime() for ad in adinputs]
        for i, ad in enumerate(adinputs):
            # Find which frames have their bright pixels propagated
            propagated = [
                x for x in zip(adinputs, times)
                if (x[1] < times[i] and times[i] - x[1] < seconds)
            ]
            if propagated:
                log.stdinfo('{} affected by {}'.format(
                    ad.filename,
                    ','.join([x[0].filename for x in propagated])))

                for ad_latent in list(zip(*propagated))[0]:
                    # AD extensions might not be in the same order
                    # Set aux_type to 'bpm' which means hot pixels in a subarray
                    # can still be propagated to a subsequent full-array image
                    ad_latent = gt.clip_auxiliary_data(ad,
                                                       aux=ad_latent,
                                                       aux_type='bpm')
                    for ext, ext_latent in zip(ad, ad_latent):
                        if ext_latent.mask is not None:
                            latency = np.where(ext_latent.mask & flags,
                                               DQ.cosmic_ray,
                                               0).astype(DQ.datatype)
                            ext.mask = latency if ext.mask is None \
                                else ext.mask | latency
            else:
                log.stdinfo('{} is not affected by latency'.format(
                    ad.filename))

            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Esempio n. 3
0
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        if illum_mask is None:
            illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            if illum is None:
                # So it can be zipped with the AD
                final_illum = [None] * len(ad)
            else:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

            for ext, illum_ext in zip(ad, final_illum):
                if illum_ext is not None:
                    # Ensure we're only adding the unilluminated bit
                    iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                    0).astype(DQ.datatype)
                    ext.mask = iext if ext.mask is None else ext.mask | iext

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Esempio n. 4
0
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        if illum_mask is None:
            illum_mask = [self._get_illum_mask_filename(ad) for ad in adinputs]

        for ad, illum in zip(*gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.
                            format(ad.filename))
                continue

            if illum is None:
                # So it can be zipped with the AD
                final_illum = [None] * len(ad)
            else:
                log.fullinfo("Using {} as illumination mask".format(illum.filename))
                final_illum = gt.clip_auxiliary_data(ad, aux=illum, aux_type='bpm',
                                          return_dtype=DQ.datatype)

            for ext, illum_ext in zip(ad, final_illum):
                if illum_ext is not None:
                    # Ensure we're only adding the unilluminated bit
                    iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                    0).astype(DQ.datatype)
                    ext.mask = iext if ext.mask is None else ext.mask | iext

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Esempio n. 5
0
    def addLatencyToDQ(self, adinputs=None, **params):
        """
        Flags pixels in the DQ plane of an image based on whether the same
        pixel has been flagged as saturated in a previous image.
        
        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        non_linear : bool
            flag non-linear pixels (as well as saturated ones)?
        time: float
            time (in seconds) for which latency is an issue 
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        flags = DQ.saturated | (DQ.non_linear if params["non_linear"] else 0)
        # Create a timedelta object using the value of the "time" parameter
        seconds = datetime.timedelta(seconds=params["time"])

        # Avoids n^2 calls to the descriptor
        times = [ad.ut_datetime() for ad in adinputs]
        for i, ad in enumerate(adinputs):
            # Find which frames have their bright pixels propagated
            propagated = [x for x in zip(adinputs, times) if (x[1]<times[i] and times[i]-x[1]<seconds)]
            if propagated:
                log.stdinfo('{} affected by {}'.format(ad.filename,
                                    ','.join([x[0].filename for x in propagated])))

                for ad_latent in list(zip(*propagated))[0]:
                    # AD extensions might not be in the same order
                    # Set aux_type to 'bpm' which means hot pixels in a subarray
                    # can still be propagated to a subsequent full-array image
                    ad_latent = gt.clip_auxiliary_data(ad, aux=ad_latent,
                                                       aux_type='bpm')
                    for ext, ext_latent in zip(ad, ad_latent):
                        if ext_latent.mask is not None:
                            latency = np.where(ext_latent.mask & flags, DQ.cosmic_ray,
                                            0).astype(DQ.datatype)
                            ext.mask = latency if ext.mask is None \
                                else ext.mask | latency
            else:
                log.stdinfo('{} is not affected by latency'.format(ad.filename))

            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Esempio n. 6
0
    def fringeCorrect(self, adinputs=None, **params):
        """
        Correct science frames for the effects of fringing, using a fringe
        frame. The fringe frame is obtained either from a specified parameter,
        or the "fringe" stream, or the calibration database. This is basically
        a bookkeeping wrapper for subtractFringe(), which does all the work.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        fringe: list/str/AstroData/None
            fringe frame(s) to subtract
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Exit now if nothing needs a correction, to avoid an error when the
        # calibration search fails. If images with different exposure times
        # are used, some frames may not require a correction (but the calibration
        # search will succeed), so still need to check individual inputs later.
        if not any(self._needs_fringe_correction(ad) for ad in adinputs):
            log.stdinfo("No input images require a fringe correction.")
            return adinputs

        fringe = params["fringe"]
        scale = params["scale"]
        if fringe is None:
            try:
                fringe_list = self.streams['fringe']
                assert len(fringe_list) == 1
                scale = False
                log.stdinfo("Using fringe frame in 'fringe' stream. "
                            "Setting scale=False")
            except (KeyError, AssertionError):
                self.getProcessedFringe(adinputs)
                fringe_list = self._get_cal(adinputs, "processed_fringe")
        else:
            fringe_list = fringe

        # Usual stuff to ensure that we have an iterable of the correct length
        # for the scale factors regardless of what the input is
        scale_factor = params["scale_factor"]
        try:
            factors = iter(scale_factor)
        except TypeError:
            factors = iter([scale_factor] * len(adinputs))
        else:
            # In case a single-element list was passed
            if len(scale_factor) == 1:
                factors = iter(scale_factor * len(adinputs))

        # Get a fringe AD object for every science frame
        for ad, fringe in zip(*gt.make_lists(adinputs, fringe_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by subtractFringe".
                            format(ad.filename))
                continue

            # Check the inputs have matching filters, binning, and shapes
            try:
                gt.check_inputs_match(ad, fringe)
            except ValueError:
                fringe = gt.clip_auxiliary_data(adinput=ad, aux=fringe,
                                                aux_type="cal")
                gt.check_inputs_match(ad, fringe)

            if scale:
                factor = next(factors)
                if factor is None:
                    factor = self._calculate_fringe_scaling(ad, fringe)
                log.stdinfo("Scaling fringe frame by factor {:.3f} before "
                            "subtracting from {}".format(factor, ad.filename))
                # Since all elements of fringe_list might be references to the
                # same AD, need to make a copy before multiplying
                fringe_copy = deepcopy(fringe)
                fringe_copy.multiply(factor)
                ad.subtract(fringe_copy)
            else:
                ad.subtract(fringe)

            # Timestamp and update header and filename
            ad.phu.set("FRINGEIM", fringe.filename, self.keyword_comments["FRINGEIM"])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Esempio n. 7
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_bias=True):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, getProcessedBias will be called
        to ensure a bias exists for every adinput.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_bias: bool
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if not do_bias:
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            self.getProcessedBias(adinputs, refresh=False)
            bias_list = self._get_cal(adinputs, 'processed_bias')
        else:
            bias_list = bias

        # Provide a bias AD object for every science frame
        for ad, bias in zip(*gt.make_lists(adinputs, bias_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by biasCorrect".
                            format(ad.filename))
                continue

            if bias is None:
                if 'qa' in self.mode:
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    raise IOError('No processed bias listed for {}'.
                                  format(ad.filename))

            try:
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)

            log.fullinfo('Subtracting this bias from {}:\n{}'.
                         format(ad.filename, bias.filename))
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename, self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
        return adinputs
Esempio n. 8
0
def pointing_in_field(pos, refpos, frac_FOV=1.0, frac_slit=1.0):

    """
    See gemini_tools.pointing_in_field() for the API. This is an
    instrument-specific back end that you shouldn't be calling directly.

    No inputs are validated at this level; that's the responsibility of the
    calling function, for reasons of efficiency.
    
    The GNIRS FOV is determined by whether the calculated center point 
    (according to the center of mass of the illumination mask) of the
    image falls within the illumination mask of the reference image.
    
    :param pos: AstroData instance to be checked for whether it belongs
                in the same sky grouping as refpos
    :type pos: AstroData instance
    
    :param refpos: This is the POFFSET and QOFFSET of the reference image
    :type refpos: tuple of floats
    
    :param frac_FOV: For use with spectroscopy data
    :type frac_FOV: float
    
    :param frac_slit: For use with spectroscopy data
    :type frac_slit: float
    """
    # Since this function gets looked up and evaluated, we have to do any
    # essential imports in-line (but Python caches them)
    import math
    
    # Extract pointing info in terms of the x and y offsets
    xshift = refpos[1] - pos.phu['QOFFSET']
    yshift = refpos[0] - pos.phu['POFFSET']
    ad = pos

    # Imaging:
    if 'IMAGE' in pos.tags:
        illum = get_illum_mask_filename(ad)
        if illum:
            illum_ad = gt.clip_auxiliary_data(adinput=pos,
                            aux=astrodata.open(illum), aux_type="bpm")
            illum_data = illum_ad[0].data
        else:
            raise OSError("Cannot find illumination mask for {}".
                          format(ad.filename))

        # Finding the center of the illumination mask
        center_illum = (illum_ad.phu['CENMASSX'], illum_ad.phu['CENMASSY'])
        checkpos = (int(center_illum[0] + xshift),
                    int(center_illum[1] + yshift))
        
        # If the position to check is going to fall outside the illumination
        # mask, return straight away to avoid an error
        if ((abs(xshift) >= abs(center_illum[0])) or 
            (abs(yshift) >= abs(center_illum[1]))):
            return False

        # Note that numpy data arrays are reversed in x and y    
        return illum_data[checkpos[1], checkpos[0]] == 0 

    # Spectroscopy:
    elif 'SPECT' in ad.tags:
        raise NotImplementedError("FOV lookup not yet supported for GNIRS "
                                  "Spectroscopy")

    # Some engineering observation or bad mask value etc.:
    else:
        raise ValueError("Can't determine FOV for unrecognized GNIRS config "
          "({}, {})".format(ad.focal_plane_mask(), ad.disperser()))
Esempio n. 9
0
    def addIllumMaskToDQ(self, adinputs=None, **params):
        """
        This primitive combines the illumination mask from the lookup directory
        into the DQ plane

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))

        # Since this primitive needs a reference, it must no-op without any
        if not adinputs:
            return adinputs

        # Get list of input and identify a suitable reference frame.
        # In most case it will be the first image, but for lamp-on, lamp-off
        # flats, one wants the reference frame to be a lamp-on since there's
        # next to no signal in the lamp-off.
        #
        # BEWARE: See note on the only GCAL_IR_OFF case below this block.   
        lampons = self.selectFromInputs(adinputs, 'GCAL_IR_ON')
        reference = lampons[0] if lampons else adinputs[0]
                   
        # When only a GCAL_IR_OFF is available:
        # To cover the one-at-a-time mode check for a compatible list
        # if list found, try to find a lamp-on in there to use as
        # reference for the mask and the shifts.
        # The mask's name and the shifts should stored in the headers 
        # of the reference to simplify this and speed things up.
        #
        # NOT NEEDED NOW because we calling addIllumMask after the
        # lamp-offs have been subtracted.  But kept the idea here
        # in case we needed.
        
        # Fetching a corrected illumination mask with a keyhole that aligns 
        # with the science data
        illum = self._get_illum_mask_filename(reference)
        if illum is None:
            log.warning("No illumination mask found for {}, no mask can "
                        "be added to the DQ planes of the inputs".
                        format(reference.filename))
            return adinputs

        illum_ad = astrodata.open(illum)
        corr_illum_ad = _position_illum_mask(reference, illum_ad, log)

        for ad in adinputs:
            final_illum = gt.clip_auxiliary_data(ad, aux=corr_illum_ad,
                    aux_type="bpm")
 
            # binary_OR the illumination mask or create a DQ plane from it.
            if ad[0].mask is None:
                ad[0].mask = final_illum[0].data
            else:
                ad[0].mask |= final_illum[0].data

            # Update the filename
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Esempio n. 10
0
    def addDQ(self, adinputs=None, **params):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        static_bpm: str
            Name of bad pixel mask ("default" -> use default from look-up table)
            If set to None, no static_bpm will be added.
        user_bpm: str
            Name of the bad pixel mask created by the user from flats and
            darks.  It is an optional BPM that can be added to the static one.
        illum_mask: bool
            add illumination mask?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["addDQ"]
        sfx = params["suffix"]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        static_bpm_list = params['static_bpm']
        user_bpm_list = params['user_bpm']

        if static_bpm_list == "default":
            static_bpm_list = [self._get_bpm_filename(ad) for ad in adinputs]

        for ad, static, user in zip(*gt.make_lists(
                adinputs, static_bpm_list, user_bpm_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                            'already been processed by addDQ'.format(
                                ad.filename))
                continue

            if static is None:
                # So it can be zipped with the AD
                final_static = [None] * len(ad)
            else:
                log.fullinfo("Using {} as static BPM".format(static.filename))
                final_static = gt.clip_auxiliary_data(ad,
                                                      aux=static,
                                                      aux_type='bpm',
                                                      return_dtype=DQ.datatype)

            if user is None:
                final_user = [None] * len(ad)
            else:
                log.fullinfo("Using {} as user BPM".format(user.filename))
                final_user = gt.clip_auxiliary_data(ad,
                                                    aux=user,
                                                    aux_type='bpm',
                                                    return_dtype=DQ.datatype)

            for ext, static_ext, user_ext in zip(ad, final_static, final_user):
                extver = ext.hdr['EXTVER']
                if ext.mask is not None:
                    log.warning(
                        'A mask already exists in extver {}'.format(extver))
                    continue

                non_linear_level = ext.non_linear_level()
                saturation_level = ext.saturation_level()

                # Need to create the array first for 3D raw F2 data, with 2D BPM
                ext.mask = np.zeros_like(ext.data, dtype=DQ.datatype)
                if static_ext is not None:
                    ext.mask |= static_ext.data
                if user_ext is not None:
                    ext.mask |= user_ext.data

                if saturation_level:
                    log.fullinfo('Flagging saturated pixels in {}:{} '
                                 'above level {:.2f}'.format(
                                     ad.filename, extver, saturation_level))
                    ext.mask |= np.where(ext.data >= saturation_level,
                                         DQ.saturated, 0).astype(DQ.datatype)

                if non_linear_level:
                    if saturation_level:
                        if saturation_level > non_linear_level:
                            log.fullinfo('Flagging non-linear pixels in {}:{} '
                                         'above level {:.2f}'.format(
                                             ad.filename, extver,
                                             non_linear_level))
                            ext.mask |= np.where(
                                (ext.data >= non_linear_level) &
                                (ext.data < saturation_level), DQ.non_linear,
                                0).astype(DQ.datatype)
                            # Readout modes of IR detectors can result in
                            # saturated pixels having values below the
                            # saturation level. Flag those. Assume we have an
                            # IR detector here because both non-linear and
                            # saturation levels are defined and nonlin<sat
                            regions, nregions = measurements.label(
                                ext.data < non_linear_level)
                            # In all my tests, region 1 has been the majority
                            # of the image; however, I cannot guarantee that
                            # this is always the case and therefore we should
                            # check the size of each region
                            region_sizes = measurements.labeled_comprehension(
                                ext.data, regions, np.arange(1, nregions + 1),
                                len, int, 0)
                            # First, assume all regions are saturated, and
                            # remove any very large ones. This is much faster
                            # than progressively adding each region to DQ
                            hidden_saturation_array = np.where(
                                regions > 0, 4, 0).astype(DQ.datatype)
                            for region in range(1, nregions + 1):
                                # Limit of 10000 pixels for a hole is a bit arbitrary
                                if region_sizes[region - 1] > 10000:
                                    hidden_saturation_array[regions ==
                                                            region] = 0
                            ext.mask |= hidden_saturation_array

                        elif saturation_level < non_linear_level:
                            log.warning('{}:{} has saturation level less than '
                                        'non-linear level'.format(
                                            ad.filename, extver))
                        else:
                            log.fullinfo('Saturation and non-linear levels '
                                         'are the same for {}:{}. Only '
                                         'flagging saturated pixels'.format(
                                             ad.filename, extver))
                    else:
                        log.fullinfo('Flagging non-linear pixels in {}:{} '
                                     'above level {:.2f}'.format(
                                         ad.filename, extver,
                                         non_linear_level))
                        ext.mask |= np.where(ext.data >= non_linear_level,
                                             DQ.non_linear,
                                             0).astype(DQ.datatype)

        # Handle latency if reqested
        if params.get("latency", False):
            try:
                adinputs = self.addLatencyToDQ(adinputs,
                                               time=params["time"],
                                               non_linear=params["non_linear"])
            except AttributeError:
                log.warning(
                    "addLatencyToDQ() not defined in primitivesClass " +
                    self.__class__.__name__)

        # Add the illumination mask if requested
        if params['add_illum_mask']:
            adinputs = self.addIllumMaskToDQ(adinputs,
                                             illum_mask=params["illum_mask"])

        # Timestamp and update filenames
        for ad in adinputs:
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
    def divideByFlat(self, rc):
        """
        This primitive will divide each SCI extension of the inputs by those
        of the corresponding flat. If the inputs contain VAR or DQ frames,
        those will also be updated accordingly due to the division on the data.
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "divideByFlat", "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["divideByFlat"]

        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Check for a user-supplied flat
        adinput = rc.get_inputs_as_astrodata()
        flat_param = rc["flat"]
        flat_dict = None
        if flat_param is not None:
            # The user supplied an input to the flat parameter
            if not isinstance(flat_param, list):
                flat_list = [flat_param]
            else:
                flat_list = flat_param

            # Convert filenames to AD instances if necessary
            tmp_list = []
            for flat in flat_list:
                if type(flat) is not AstroData:
                    flat = AstroData(flat)
                tmp_list.append(flat)
            flat_list = tmp_list
            
            flat_dict = gt.make_dict(key_list=adinput, value_list=flat_list)

        # Loop over each input AstroData object in the input list
        for ad in adinput:
            
            # Check whether the divideByFlat primitive has been run previously
            if ad.phu_get_key_value(timestamp_key):
                log.warning("No changes will be made to %s, since it has " \
                            "already been processed by divideByFlat" \
                            % (ad.filename))
                # Append the input AstroData object to the list of output
                # AstroData objects without further processing
                adoutput_list.append(ad)
                continue
            
            # Retrieve the appropriate flat
            if flat_dict is not None:
                flat = flat_dict[ad]
            else:
                flat = rc.get_cal(ad, "processed_flat")
            
                # If there is no appropriate flat, there is no need to divide by
                # the flat in QA context; in SQ context, raise an error
                if flat is None:
                    if "qa" in rc.context:
                        log.warning("No changes will be made to %s, since no " \
                                    "appropriate flat could be retrieved" \
                                    % (ad.filename))
                        # Append the input AstroData object to the list of output
                        # AstroData objects without further processing
                        adoutput_list.append(ad)
                        continue
                    else:
                        raise Errors.PrimitiveError("No processed flat found "\
                                                    "for %s" % ad.filename)
                else:
                    flat = AstroData(flat)
            
            # Check the inputs have matching filters, binning, and SCI shapes.
            try:
                gt.check_inputs_match(ad1=ad, ad2=flat) 
            except Errors.ToolboxError:
                # If not, try to clip the flat frame to the size
                # of the science data
                # For a GMOS example, this allows a full frame flat to
                # be used for a CCD2-only science frame. 
                flat = gt.clip_auxiliary_data(
                    adinput=ad,aux=flat,aux_type="cal")[0]

                # Check again, but allow it to fail if they still don't match
                gt.check_inputs_match(ad1=ad, ad2=flat)


            # Divide the adinput by the flat
            log.fullinfo("Dividing the input AstroData object (%s) " \
                         "by this flat:\n%s" % (ad.filename,
                                                flat.filename))
            ad = ad.div(flat)
                        
            # Record the flat file used
            ad.phu_set_key_value("FLATIM", 
                                 os.path.basename(flat.filename),
                                 comment=self.keyword_comments["FLATIM"])

            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)

            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], 
                                              strip=True)
            
            # Append the output AstroData object to the list 
            # of output AstroData objects
            adoutput_list.append(ad)
        
        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)
        
        yield rc
    def addDQ(self, rc):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).
        
        :param bpm: The file name, including the full path, of the BPM(s) to be
                    used to flag bad pixels in the DQ extension. If only one
                    BPM is provided, that BPM will be used to flag bad pixels
                    in the DQ extension for all input AstroData object(s). If
                    more than one BPM is provided, the number of BPMs must
                    match the number of input AstroData objects. If no BPM is
                    provided, the primitive will attempt to determine an
                    appropriate BPM.
        :type bpm: string or list of strings
        """
        # Instantiate the log
        log = logutils.get_logger(__name__)
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "addDQ", "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["addDQ"]
        
        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Set the data type of the data quality array
        # It can be uint8 for now, it will get converted up as we assign higher bit values
        # shouldn't need to force it up to 16bpp yet.
        dq_dtype = np.dtype(np.uint8)
        #dq_dtype = np.dtype(np.uint16)
        
        # Get the input AstroData objects
        adinput = rc.get_inputs_as_astrodata()
        
        # Loop over each input AstroData object in the input list
        for ad in adinput:
            
            # Check whether the addDQ primitive has been run previously
            if ad.phu_get_key_value(timestamp_key):
                log.warning("No changes will be made to %s, since it has "
                            "already been processed by addDQ" % ad.filename)
                
                # Append the input AstroData object to the list of output
                # AstroData objects without further processing
                adoutput_list.append(ad)
                continue
            
            # Parameters specified on the command line to reduce are converted
            # to strings, including None
            ##M What about if a user doesn't want to add a BPM at all?
            ##M Are None's not converted to Nonetype from the command line?
            if rc["bpm"] and rc["bpm"] != "None":
                # The user supplied an input to the bpm parameter
                bpm = rc["bpm"]
            else:
                # The user did not supply an input to the bpm parameter, so try
                # to find an appropriate one. Get the dictionary containing the
                # list of BPMs for all instruments and modes.
                all_bpm_dict = Lookups.get_lookup_table("Gemini/BPMDict",
                                                        "bpm_dict")
                
                # Call the _get_bpm_key helper function to get the key for the
                # lookup table 
                key = self._get_bpm_key(ad)
                
                # Get the appropriate BPM from the look up table
                if key in all_bpm_dict:
                    bpm = lookup_path(all_bpm_dict[key])
                else:
                    bpm = None
                    log.warning("No BPM found for %s, no BPM will be "
                                "included" % ad.filename)

            # Ensure that the BPMs are AstroData objects
            bpm_ad = None
            if bpm is not None:
                log.fullinfo("Using %s as BPM" % str(bpm))
                if isinstance(bpm, AstroData):
                    bpm_ad = bpm
                else:
                    bpm_ad = AstroData(bpm)
                    ##M Do we want to fail here depending on context?
                    if bpm_ad is None:
                        log.warning("Cannot convert %s into an AstroData "
                                    "object, no BPM will be added" % bpm)

            final_bpm = None
            if bpm_ad is not None:
                # Clip the BPM data to match the size of the input AstroData
                # object science and pad with overscan region, if necessary
                final_bpm = gt.clip_auxiliary_data(adinput=ad, aux=bpm_ad,
                                                   aux_type="bpm")[0]

            # Get the non-linear level and the saturation level using the
            # appropriate descriptors - Individual values get checked in the
            # next loop 
            non_linear_level_dv = ad.non_linear_level()
            saturation_level_dv = ad.saturation_level()

            # Loop over each science extension in each input AstroData object
            for ext in ad[SCI]:
                
                # Retrieve the extension number for this extension
                extver = ext.extver()
                
                # Check whether an extension with the same name as the DQ
                # AstroData object already exists in the input AstroData object
                if ad[DQ, extver]:
                    log.warning("A [%s,%d] extension already exists in %s"
                                % (DQ, extver, ad.filename))
                    continue
                
                # Get the non-linear level and the saturation level for this
                # extension
                non_linear_level = non_linear_level_dv.get_value(extver=extver)
                saturation_level = saturation_level_dv.get_value(extver=extver)

                # To store individual arrays created for each of the DQ bit
                # types
                dq_bit_arrays = []

                # Create an array that contains pixels that have a value of 2
                # when that pixel is in the non-linear regime in the input
                # science extension
                if non_linear_level is not None:
                    non_linear_array = None
                    if saturation_level is not None:
                        # Test the saturation level against non_linear level
                        # They can be the same or the saturation level can be
                        # greater than but not less than the non-linear level.
                        # If they are the same then only flag saturated pixels
                        # below. This just means not creating an unneccessary
                        # intermediate array.
                        if saturation_level > non_linear_level:
                            log.fullinfo("Flagging pixels in the DQ extension "
                                         "corresponding to non linear pixels "
                                         "in %s[%s,%d] using non linear "
                                         "level = %.2f" % (ad.filename, SCI,
                                                           extver,
                                                           non_linear_level))

                            non_linear_array = np.where(
                                ((ext.data >= non_linear_level) &
                                (ext.data < saturation_level)), 2, 0)
                            
                        elif saturation_level < non_linear_level:
                            log.warning("%s[%s,%d] saturation_level value is"
                                        "less than the non_linear_level not"
                                        "flagging non linear pixels" %
                                        (ad.filname, SCI, extver))
                        else:
                            log.fullinfo("Saturation and non-linear values "
                                         "for %s[%s,%d] are the same. Only "
                                         "flagging saturated pixels."
                                         % (ad.filename, SCI, extver))
                            
                    else:
                        log.fullinfo("Flagging pixels in the DQ extension "
                                     "corresponding to non linear pixels "
                                     "in %s[%s,%d] using non linear "
                                     "level = %.2f" % (ad.filename, SCI, extver,
                                                       non_linear_level))

                        non_linear_array = np.where(
                            (ext.data >= non_linear_level), 2, 0)
                    
                    dq_bit_arrays.append(non_linear_array)

                # Create an array that contains pixels that have a value of 4
                # when that pixel is saturated in the input science extension
                if saturation_level is not None:
                    saturation_array = None
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to saturated pixels in "
                                 "%s[%s,%d] using saturation level = %.2f" %
                                 (ad.filename, SCI, extver, saturation_level))
                    saturation_array = np.where(
                        ext.data >= saturation_level, 4, 0)
                    dq_bit_arrays.append(saturation_array)
                
                # BPMs have an EXTNAME equal to DQ
                bpmname = None
                if final_bpm is not None:
                    bpm_array = None
                    bpmname = os.path.basename(final_bpm.filename)
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to bad pixels in %s[%s,%d] "
                                 "using the BPM %s[%s,%d]" %
                                 (ad.filename, SCI, extver, bpmname, DQ, extver))
                    bpm_array = final_bpm[DQ, extver].data
                    dq_bit_arrays.append(bpm_array)
                
                # Create a single DQ extension from the three arrays (BPM,
                # non-linear and saturated)
                if not dq_bit_arrays:
                    # The BPM, non-linear and saturated arrays were not
                    # created. Create a single DQ array with all pixels set
                    # equal to 0 
                    log.fullinfo("The BPM, non-linear and saturated arrays "
                                 "were not created. Creating a single DQ "
                                 "array with all the pixels set equal to zero")
                    final_dq_array = np.zeros(ext.data.shape).astype(dq_dtype)

                else:
                    final_dq_array = self._bitwise_OR_list(dq_bit_arrays)
                    final_dq_array = final_dq_array.astype(dq_dtype)
                
                # Create a data quality AstroData object
                dq = AstroData(data=final_dq_array)
                dq.rename_ext(DQ, ver=extver)
                dq.filename = ad.filename
                
                # Call the _update_dq_header helper function to update the
                # header of the data quality extension with some useful
                # keywords
                dq = self._update_dq_header(sci=ext, dq=dq, bpmname=bpmname)
                
                # Append the DQ AstroData object to the input AstroData object
                log.fullinfo("Adding extension [%s,%d] to %s"
                             % (DQ, extver, ad.filename))
                ad.append(moredata=dq)
            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)
            
            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                              strip=True)
            
            # Append the output AstroData object to the list of output
            # AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction context
        rc.report_output(adoutput_list)
        
        yield rc
    def subtractFringe(self, rc):
        
        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "subtractFringe",
                                 "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["subtractFringe"]

        # Initialize the list of output AstroData objects
        adoutput_list = []
        
        # Check for a user-supplied fringe
        adinput = rc.get_inputs_as_astrodata()
        fringe_param = rc["fringe"]
        fringe_dict = None
        if fringe_param is not None:
            # The user supplied an input to the fringe parameter
            if not isinstance(fringe_param, list):
                fringe_list = [fringe_param]
            else:
                fringe_list = fringe_param

            # Convert filenames to AD instances if necessary
            tmp_list = []
            for fringe in fringe_list:
                if type(fringe) is not AstroData:
                    fringe = AstroData(fringe)
                tmp_list.append(fringe)
            fringe_list = tmp_list
            
            fringe_dict = gt.make_dict(key_list=adinput, value_list=fringe_list)
        

        # Loop over each input AstroData object in the input list
        for ad in adinput:
            
            # Check whether the subtractFringe primitive has been run
            # previously
            if ad.phu_get_key_value(timestamp_key):
                log.warning("No changes will be made to %s, since it has " \
                            "already been processed by subtractFringe" \
                            % (ad.filename))
                # Append the input AstroData object to the list of output
                # AstroData objects without further processing
                adoutput_list.append(ad)
                continue
            
            # Retrieve the appropriate fringe
            if fringe_dict is not None:
                fringe = fringe_dict[ad]
            else:
                fringe = rc.get_cal(ad, "processed_fringe")
            
                # Take care of the case where there was no fringe 
                if fringe is None:
                    log.warning("Could not find an appropriate fringe for %s" \
                                % (ad.filename))
                    # Append the input to the output without further processing
                    adoutput_list.append(ad)
                    continue
                else:
                    fringe = AstroData(fringe)

            # Check the inputs have matching filters, binning and SCI shapes.
            try:
                gt.check_inputs_match(ad1=ad, ad2=fringe)
            except Errors.ToolboxError:
                # If not, try to clip the fringe frame to the size of the
                # science data
                # For a GMOS example, this allows a full frame fringe to
                # be used for a CCD2-only science frame. 
                fringe = gt.clip_auxiliary_data(
                    adinput=ad, aux=fringe, aux_type="cal")[0]

                # Check again, but allow it to fail if they still don't match
                gt.check_inputs_match(ad1=ad, ad2=fringe)


            # Subtract the fringe from the science
            ad = ad.sub(fringe)
            
            # Record the fringe file used
            ad.phu_set_key_value("FRINGEIM", 
                                 os.path.basename(fringe.filename),
                                 comment=self.keyword_comments["FRINGEIM"])

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)

            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"], 
                                              strip=True)
            
            # Append the output AstroData object to the list 
            # of output AstroData objects
            adoutput_list.append(ad)
            
        # Report the list of output AstroData objects to the reduction context
        rc.report_output(adoutput_list)
        yield rc
    def slitIllumCorrect(self,
                         adinputs=None,
                         slit_illum=None,
                         do_illum=True,
                         suffix="_illumCorrected"):
        """
        This primitive will divide each SCI extension of the inputs by those
        of the corresponding slit illumination image. If the inputs contain
        VAR or DQ frames, those will also be updated accordingly due to the
        division on the data.

        Parameters
        ----------
        adinputs : list of AstroData
            Data to be corrected.
        slit_illum : str or AstroData
            Slit illumination path or AstroData object.
        do_illum: bool, optional
            Perform slit illumination correction? (Default: True)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        qecorr_key = self.timestamp_keys['QECorrect']

        if not do_illum:
            log.warning("Slit Illumination correction has been turned off.")
            return adinputs

        if slit_illum is None:
            raise NotImplementedError
        else:
            slit_illum_list = slit_illum

        # Provide a Slit Illum Ad object for every science frame
        ad_outputs = []
        for ad, slit_illum_ad in zip(
                *gt.make_lists(adinputs, slit_illum_list, force_ad=True)):

            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by flatCorrect".format(
                                ad.filename))
                continue

            if slit_illum_ad is None:
                if self.mode in ['sq']:
                    raise OSError(
                        "No processed slit illumination listed for {}".format(
                            ad.filename))
                else:
                    log.warning("No changes will be made to {}, since no slit "
                                "illumination has been specified".format(
                                    ad.filename))
                    continue

            gt.check_inputs_match(ad, slit_illum_ad, check_shape=False)

            if not all(
                [e1.shape == e2.shape for (e1, e2) in zip(ad, slit_illum_ad)]):
                slit_illum_ad = gt.clip_auxiliary_data(adinput=[ad],
                                                       aux=[slit_illum_ad])[0]

            log.info("Dividing the input AstroData object {} by this \n"
                     "slit illumination file:  \n{}".format(
                         ad.filename, slit_illum_ad.filename))

            ad_out = deepcopy(ad)
            ad_out.divide(slit_illum_ad)

            # Update the header and filename, copying QECORR keyword from flat
            ad_out.phu.set("SLTILLIM", slit_illum_ad.filename,
                           self.keyword_comments["SLTILLIM"])

            try:
                qecorr_value = slit_illum_ad.phu[qecorr_key]
            except KeyError:
                pass
            else:
                log.fullinfo(
                    "Copying {} keyword from slit illumination".format(
                        qecorr_key))
                ad_out.phu.set(qecorr_key, qecorr_value,
                               slit_illum_ad.phu.comments[qecorr_key])

            gt.mark_history(ad_out,
                            primname=self.myself(),
                            keyword=timestamp_key)
            ad_out.update_filename(suffix=suffix, strip=True)

            if slit_illum_ad.path:
                add_provenance(ad_out, slit_illum_ad.filename,
                               md5sum(slit_illum_ad.path) or "", self.myself())

            ad_outputs.append(ad_out)

        return ad_outputs
Esempio n. 15
0
    def fringeCorrect(self, adinputs=None, **params):
        """
        Correct science frames for the effects of fringing, using a fringe
        frame. The fringe frame is obtained either from a specified parameter,
        or the "fringe" stream, or the calibration database. This is basically
        a bookkeeping wrapper for subtractFringe(), which does all the work.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        fringe: list/str/AstroData/None
            fringe frame(s) to subtract
        do_fringe: bool/None
            apply fringe correction? (None => use pipeline default for data)
        scale: bool/None
            scale fringe frame? (None => False if fringe frame has same
            group_id() as data
        scale_factor: float/sequence/None
            factor(s) to scale fringe
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]
        fringe = params["fringe"]
        scale = params["scale"]
        do_cal = params["do_cal"]

        # Exit now if nothing needs a correction, to avoid an error when the
        # calibration search fails. If images with different exposure times
        # are used, some frames may not require a correction (but the calibration
        # search will succeed), so still need to check individual inputs later.
        needs_correction = [self._needs_fringe_correction(ad) for ad in adinputs]
        if any(needs_correction):
            if do_cal == 'skip':
                log.warning("Fringe correction has been turned off but is "
                            "recommended.")
                return adinputs
        else:
            if do_cal == 'procmode' or do_cal == 'skip':
                log.stdinfo("No input images require a fringe correction.")
                return adinputs
            else:  # do_cal == 'force':
                log.warning("Fringe correction has been forced on but may not "
                            "be required.")


        if fringe is None:
            # This logic is for QAP
            try:
                fringe_list = self.streams['fringe']
                assert len(fringe_list) == 1
                scale = False
                log.stdinfo("Using fringe frame in 'fringe' stream. "
                            "Setting scale=False")
                fringe_list = (fringe_list[0], "stream")
            except (KeyError, AssertionError):
                fringe_list = self.caldb.get_processed_fringe(adinputs)
        else:
            fringe_list = (fringe, None)

        # Usual stuff to ensure that we have an iterable of the correct length
        # for the scale factors regardless of what the input is
        scale_factor = params["scale_factor"]
        try:
            factors = iter(scale_factor)
        except TypeError:
            factors = iter([scale_factor] * len(adinputs))
        else:
            # In case a single-element list was passed
            if len(scale_factor) == 1:
                factors = iter(scale_factor * len(adinputs))

        # Get a fringe AD object for every science frame
        for ad, fringe, origin, correct in zip(*gt.make_lists(
                adinputs, *fringe_list, needs_correction, force_ad=(1,))):
            if ad.phu.get(timestamp_key):
                log.warning(f"{ad.filename}: already processed by "
                            "fringeCorrect. Continuing.")
                continue

            # Logic to deal with different exposure times where only
            # some inputs might require fringe correction
            # KL: for now, I'm not allowing the "force" to do anything when
            #     the correction is not needed.
            if (do_cal == 'procmode' or do_cal == 'force') and not correct:
                log.stdinfo("{} does not require a fringe correction".
                            format(ad.filename))
                ad.update_filename(suffix=params["suffix"], strip=True)
                continue

            # At this point, we definitely want to do a fringe correction
            # so we'd better have a fringe frame!
            if fringe is None:
                if 'sq' not in self.mode and do_cal != 'force':
                    log.warning("No changes will be made to {}, since no "
                                "fringe frame has been specified".
                                format(ad.filename))
                    continue
                else:
                    log.warning(f"{ad.filename}: no fringe was specified. "
                                "Continuing.")
                    continue

            # Check the inputs have matching filters, binning, and shapes
            try:
                gt.check_inputs_match(ad, fringe)
            except ValueError:
                fringe = gt.clip_auxiliary_data(adinput=ad, aux=fringe,
                                                aux_type="cal")
                gt.check_inputs_match(ad, fringe)

            #
            origin_str = f" (obtained from {origin})" if origin else ""
            log.stdinfo(f"{ad.filename}: using the fringe frame "
                         f"{fringe.filename}{origin_str}")
            matched_groups = (ad.group_id() == fringe.group_id())
            if scale or (scale is None and not matched_groups):
                factor = next(factors)
                if factor is None:
                    factor = self._calculate_fringe_scaling(ad, fringe)
                log.stdinfo("Scaling fringe frame by factor {:.3f} before "
                            "subtracting from {}".format(factor, ad.filename))
                # Since all elements of fringe_list might be references to the
                # same AD, need to make a copy before multiplying
                fringe_copy = deepcopy(fringe)
                fringe_copy.multiply(factor)
                ad.subtract(fringe_copy)
            else:
                if scale is None:
                    log.stdinfo("Not scaling fringe frame with same group ID "
                                "as {}".format(ad.filename))
                ad.subtract(fringe)

            # Timestamp and update header and filename
            ad.phu.set("FRINGEIM", fringe.filename, self.keyword_comments["FRINGEIM"])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
            if fringe.path:
                add_provenance(ad, fringe.filename, md5sum(fringe.path) or "", self.myself())
        return adinputs
Esempio n. 16
0
def pointing_in_field(ad, refpos, frac_FOV=1.0, frac_slit=1.0):
    """
    See gemini_tools.pointing_in_field() for the API. This is an
    instrument-specific back end that you shouldn't be calling directly.

    No inputs are validated at this level; that's the responsibility of the
    calling function, for reasons of efficiency.
    
    The GNIRS FOV is determined by whether the calculated center point 
    (according to the center of mass of the illumination mask) of the
    image falls within the illumination mask of the reference image.
    
    :param pos: AstroData instance to be checked for whether it belongs
                in the same sky grouping as refpos
    :type pos: AstroData instance
    
    :param refpos: This is the POFFSET and QOFFSET of the reference image
    :type refpos: tuple of floats
    
    :param frac_FOV: For use with spectroscopy data
    :type frac_FOV: float
    
    :param frac_slit: For use with spectroscopy data
    :type frac_slit: float
    """
    # Object location in AD = refpos + shift
    xshift = ad.detector_x_offset() - refpos[0]
    yshift = ad.detector_y_offset() - refpos[1]

    # We inverse-scale by frac_FOV
    fov_xshift = -int(xshift / frac_FOV)
    fov_yshift = -int(yshift / frac_FOV)
    # Imaging:
    if 'IMAGE' in ad.tags:
        if (abs(fov_yshift) >= ad[0].shape[1]
                or abs(fov_xshift) >= ad[0].shape[1]):
            return False

        illum = get_illum_mask_filename(ad)
        if illum:
            illum_ad = gt.clip_auxiliary_data(adinput=ad,
                                              aux=astrodata.open(illum),
                                              aux_type="bpm")
            illum_data = illum_ad[0].data
        else:
            raise OSError("Cannot find illumination mask for {}".format(
                ad.filename))

        # Shift the illumination mask and see if the shifted keyhole
        # overlaps with the original keyhole
        shifted_data = shift(illum_data, (fov_yshift, fov_xshift),
                             order=0,
                             cval=DQ.unilluminated)
        return (illum_data | shifted_data == 0).any()

    # Spectroscopy:
    elif 'SPECT' in ad.tags:
        raise NotImplementedError("FOV lookup not yet supported for GNIRS "
                                  "Spectroscopy")

    # Some engineering observation or bad mask value etc.:
    else:
        raise ValueError("Can't determine FOV for unrecognized GNIRS config "
                         "({}, {})".format(ad.focal_plane_mask(),
                                           ad.disperser()))
Esempio n. 17
0
    def processSlits(self, adinputs=None, **params):
        """
        Compute and record the mean exposure epoch for a slit viewer image

        The 'slit viewer image' for each observation will almost certainly
        be a sequence of short exposures of the slit viewer camera,
        collected together for convenience. However, it cannot be guaranteed
        that slit viewer exposures will be taken throughout an entire
        science exposure; therefore, it is necessary to be able to compute
        the mean exposure epoch (i.e. the effective time that the combined
        slit viewer exposures were taken at). This allows a single science
        observation to be calibrated using multiple packets of slit viewer
        exposures, with appropriate weighting for the time delay between them.

        ``processSlits`` effectively computes a weighted average of the
        exposure epoch of all constituent slit viewer exposures, taking into
        account:

        - Length of each exposure;
        - Whether there is any overlap between the start/end of the
          exposure and the start/end of the overall 'image';
        - Time of each exposure, relative to the start of the 'image'.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        slitflat: str/None
            name of the slitflat to use (if None, use the calibration
            system)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        flat_list = params["flat"]
        if flat_list is None:
            self.getProcessedSlitFlat(adinputs)
            flat_list = [
                self._get_cal(ad, 'processed_slitflat') for ad in adinputs
            ]

        for ad, slitflat in zip(
                *gt.make_lists(adinputs, flat_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by processSlits".format(
                                ad.filename))
                continue

            if slitflat is None:
                log.warning("Unable to find slitflat calibration for {}; "
                            "skipping".format(ad.filename))
                continue
            else:
                sv_flat = slitflat[0].data

            # accumulators for computing the mean epoch
            sum_of_weights = 0.0
            accum_weighted_time = 0.0

            # Check the inputs have matching binning and SCI shapes.
            try:
                gt.check_inputs_match(adinput1=ad,
                                      adinput2=slitflat,
                                      check_filter=False)
            except ValueError:
                # This is most likely because the science frame has multiple
                # extensions and the slitflat needs to be copied
                slitflat = gt.clip_auxiliary_data(ad, slitflat, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad, slitflat, check_filter=False)

            # get science start/end times
            sc_start = parse_timestr(ad.phu['UTSTART'])
            sc_end = parse_timestr(ad.phu['UTEND'])

            res = ad.res_mode()
            for ext in ad:
                sv_start = parse_timestr(ext.hdr['EXPUTST'])
                sv_end = parse_timestr(ext.hdr['EXPUTEND'])

                # compute overlap percentage and slit view image duration
                latest_start = max(sc_start, sv_start)
                earliest_end = min(sc_end, sv_end)
                overlap = (earliest_end - latest_start).seconds
                overlap = 0.0 if overlap < 0.0 else overlap  # no overlap edge case
                sv_duration = (sv_end - sv_start).seconds
                overlap /= sv_duration  # convert into a percentage

                # compute the offset (the value to be weighted), in seconds,
                # from the start of the science exposure
                offset = 42.0  # init value: overridden if overlap, else 0-scaled
                if sc_start <= sv_start and sv_end <= sc_end:
                    offset = (sv_start - sc_start).seconds + sv_duration / 2.0
                elif sv_start < sc_start:
                    offset = overlap * sv_duration / 2.0
                elif sv_end > sc_end:
                    offset = overlap * sv_duration / 2.0
                    offset += (sv_start - sc_start).seconds

                # add flux-weighted offset (plus weight itself) to accumulators
                flux = _total_obj_flux(res, ext.data, sv_flat)
                weight = flux * overlap
                sum_of_weights += weight
                accum_weighted_time += weight * offset

            # final mean exposure epoch computation
            if sum_of_weights > 0.0:
                mean_offset = accum_weighted_time / sum_of_weights
                mean_offset = timedelta(seconds=mean_offset)
                # write the mean exposure epoch into the PHU
                sc_start = parse_timestr(ad.phu['UTSTART'])
                mean_epoch = sc_start + mean_offset
                ad.phu['AVGEPOCH'] = (  # hope this keyword string is ok
                    mean_epoch.strftime("%H:%M:%S.%f")[:-3],
                    'Mean Exposure Epoch')

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=params["suffix"], strip=True)
        return adinputs
Esempio n. 18
0
    def addIllumMaskToDQ(self,
                         adinputs=None,
                         suffix=None,
                         illum_mask=None,
                         shift=None,
                         max_shift=20):
        """
        Adds an illumination mask to each AD object. This is only done for
        full-frame (not Central Spectrum) GMOS spectra, and is calculated by
        making a model illumination patter from the attached MDF and cross-
        correlating it with the spatial profile of the data.

        Parameters
        ----------
        suffix : str
            suffix to be added to output files
        illum_mask : str/None
            name of illumination mask mask (None -> use default)
        shift : int/None
            user-defined shift to apply to illumination mask
        max_shift : int
            maximum shift (in unbinned pixels) allowable for the cross-
            correlation
        """
        offset_dict = {
            ("GMOS-N", "Hamamatsu-N"): 1.5,
            ("GMOS-N", "e2vDD"): -0.2,
            ("GMOS-N", "EEV"): 0.7,
            ("GMOS-S", "Hamamatsu-S"): 5.5,
            ("GMOS-S", "EEV"): 3.8
        }
        edges = 50  # try to eliminate issues at the very edges

        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        # Do this now for memory management reasons. We'll be creating large
        # arrays temporarily and don't want the permanent mask arrays to
        # fragment the free memory.
        for ad in adinputs:
            for ext in ad:
                if ext.mask is None:
                    ext.mask = np.zeros_like(ext.data).astype(DQ.datatype)

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            ybin = ad.detector_y_bin()
            ad_detsec = ad.detector_section()
            no_bridges = all(detsec.y1 > 1600 and detsec.y2 < 2900
                             for detsec in ad_detsec)
            has_48rows = (all(detsec.y2 == 4224 for detsec in ad_detsec)
                          and 'Hamamatsu' in ad.detector_name(pretty=True))

            if illum:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

                for ext, illum_ext in zip(ad, final_illum):
                    if illum_ext is not None:
                        # Ensure we're only adding the unilluminated bit
                        iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                        0).astype(DQ.datatype)
                        ext.mask |= iext
            elif not no_bridges:  # i.e. there are bridges.
                try:
                    mdf = ad.MDF
                except AttributeError:
                    log.warning(f"MDF not found for {ad.filename} - cannot "
                                "add illumination mask.")
                    continue

                # Default operation for GMOS full-frame LS
                # Sadly, we cannot do this reliably without concatenating the
                # arrays and using a big chunk of memory.
                row_medians = np.percentile(np.concatenate(
                    [ext.data for ext in ad], axis=1),
                                            95,
                                            axis=1)
                row_medians -= at.boxcar(row_medians, size=50 // ybin)

                # Construct a model of the slit illumination from the MDF
                # coefficients are from G-IRAF except c0, approx. from data
                model = np.zeros_like(row_medians, dtype=int)
                for ypos, ysize in mdf['slitpos_my', 'slitsize_my']:
                    y = ypos + np.array([-0.5, 0.5]) * ysize
                    c0 = offset_dict[ad.instrument(),
                                     ad.detector_name(pretty=True)]
                    if ad.instrument() == "GMOS-S":
                        c1, c2, c3 = (0.99911, -1.7465e-5, 3.0494e-7)
                    else:
                        c1, c2, c3 = (0.99591859227, 5.3042211333437e-8,
                                      1.7447902551997e-7)
                    yccd = ((c0 + y *
                             (c1 + y *
                              (c2 + y * c3))) * 1.611444 / ad.pixel_scale() +
                            0.5 * model.size).astype(int)
                    model[yccd[0]:yccd[1] + 1] = 1
                    log.stdinfo("Expected slit location from pixels "
                                f"{yccd[0]+1} to {yccd[1]+1}")

                if shift is None:
                    max_shift = 50
                    mshift = max_shift // ybin + 2
                    mshift2 = mshift + edges
                    # model[] indexing avoids reduction in signal as slit
                    # is shifted off the top of the image
                    cntr = model.size - edges - mshift2 - 1
                    xcorr = correlate(row_medians[edges:-edges],
                                      model[mshift2:-mshift2],
                                      mode='full')[cntr - mshift:cntr + mshift]
                    # This line avoids numerical errors in the spline fit
                    xcorr -= np.median(xcorr)
                    # This calculates the offsets of each point from the
                    # straight line between its neighbours
                    std = (xcorr[1:-1] - 0.5 *
                           (xcorr + np.roll(xcorr, 2))[2:]).std()
                    xspline = fit_1D(xcorr,
                                     function="spline3",
                                     order=None,
                                     weights=np.full(len(xcorr),
                                                     1. / std)).evaluate()
                    yshift = xspline.argmax() - mshift
                    maxima = xspline[1:-1][np.logical_and(
                        np.diff(xspline[:-1]) > 0,
                        np.diff(xspline[1:]) < 0)]
                    significant_maxima = (maxima >
                                          xspline.max() - 3 * std).sum()
                    if significant_maxima > 1 or abs(
                            yshift // ybin) > max_shift:
                        log.warning(
                            f"{ad.filename}: cross-correlation peak is"
                            " untrustworthy so not adding illumination "
                            "mask. Please re-run with a specified shift.")
                        yshift = None
                else:
                    yshift = shift

                if yshift is not None:
                    log.stdinfo(
                        f"{ad.filename}: Shifting mask by {yshift} pixels")
                    row_mask = np.ones_like(model, dtype=int)
                    if yshift < 0:
                        row_mask[:yshift] = 1 - model[-yshift:]
                    elif yshift > 0:
                        row_mask[yshift:] = 1 - model[:-yshift]
                    else:
                        row_mask[:] = 1 - model
                    for ext in ad:
                        ext.mask |= (row_mask * DQ.unilluminated).astype(
                            DQ.datatype)[:, np.newaxis]

            if has_48rows:
                actual_rows = 48 // ybin
                for ext in ad:
                    ext.mask[:actual_rows] |= DQ.unilluminated

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Esempio n. 19
0
def pointing_in_field(pos, refpos, frac_FOV=1.0, frac_slit=1.0):

    """
    See gemini_tools.pointing_in_field() for the API. This is an
    instrument-specific back end that you shouldn't be calling directly.

    No inputs are validated at this level; that's the responsibility of the
    calling function, for reasons of efficiency.
    
    The GNIRS FOV is determined by whether the calculated center point 
    (according to the center of mass of the illumination mask) of the
    image falls within the illumination mask of the reference image.
    
    :param pos: AstroData instance to be checked for whether it belongs
                in the same sky grouping as refpos
    :type pos: AstroData instance
    
    :param refpos: This is the POFFSET and QOFFSET of the reference image
    :type refpos: tuple of floats
    
    :param frac_FOV: For use with spectroscopy data
    :type frac_FOV: float
    
    :param frac_slit: For use with spectroscopy data
    :type frac_slit: float
    """
    # Since this function gets looked up and evaluated, we have to do any
    # essential imports in-line (but Python caches them)
    import math
    
    # Extract pointing info in terms of the x and y offsets
    xshift = refpos[1] - pos.phu['QOFFSET']
    yshift = refpos[0] - pos.phu['POFFSET']
    ad = pos

    # Imaging:
    if 'IMAGE' in pos.tags:
        illum = get_illum_mask_filename(ad)
        if illum:
            illum_ad = gt.clip_auxiliary_data(adinput=pos,
                            aux=astrodata.open(illum), aux_type="bpm")
            illum_data = illum_ad[0].data
        else:
            raise IOError("Cannot find illumination mask for {}".
                          format(ad.filename))

        # Finding the center of the illumination mask
        center_illum = (illum_ad.phu['CENMASSX'], illum_ad.phu['CENMASSY'])
        checkpos = (int(center_illum[0] + xshift),
                    int(center_illum[1] + yshift))
        
        # If the position to check is going to fall outside the illumination
        # mask, return straight away to avoid an error
        if ((abs(xshift) >= abs(center_illum[0])) or 
            (abs(yshift) >= abs(center_illum[1]))):
            return False

        # Note that numpy data arrays are reversed in x and y    
        return illum_data[checkpos[1], checkpos[0]] == 0 

    # Spectroscopy:
    elif 'SPECT' in ad.tags:
        raise NotImplementedError("FOV lookup not yet supported for GNIRS "
                                  "Spectroscopy")

    # Some engineering observation or bad mask value etc.:
    else:
        raise ValueError("Can't determine FOV for unrecognized GNIRS config "
          "({}, {})".format(ad.focal_plane_mask(), ad.disperser()))
Esempio n. 20
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_cal=None):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, the calibration database(s) will
        be queried.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_cal: str
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if do_cal == 'skip':
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            bias_list = self.caldb.get_processed_bias(adinputs)
        else:
            bias_list = (bias, None)

        # Provide a bias AD object for every science frame, and an origin
        for ad, bias, origin in zip(*gt.make_lists(adinputs, *bias_list,
                                    force_ad=(1,))):
            if ad.phu.get(timestamp_key):
                log.warning(f"{ad.filename}: already processed by "
                            "biasCorrect. Continuing.")
                continue

            if bias is None:
                if 'sq' not in self.mode and do_cal != 'force':
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    log.warning(f"{ad.filename}: no bias was specified. "
                                "Continuing.")
                    continue

            try:
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad, bias, check_filter=False,
                                      check_units=True)

            origin_str = f" (obtained from {origin})" if origin else ""
            log.stdinfo(f"{ad.filename}: subtracting the bias "
                         f"{bias.filename}{origin_str}")
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename, self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
            if bias.path:
                add_provenance(ad, bias.filename, md5sum(bias.path) or "", self.myself())

            timestamp = datetime.now()
        return adinputs
    def addIllumMaskToDQ(self, adinputs=None, suffix=None, illum_mask=None):
        """
        Adds an illumination mask to each AD object

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        illum_mask: str/None
            name of illumination mask mask (None -> use default)
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        for ad, illum in zip(
                *gt.make_lists(adinputs, illum_mask, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning(
                    'No changes will be made to {}, since it has '
                    'already been processed by addIllumMaskToDQ'.format(
                        ad.filename))
                continue

            ad_detsec = ad.detector_section()
            no_bridges = all(detsec.y1 > 1600 and detsec.y2 < 2900
                             for detsec in ad_detsec)
            has_48rows = (all(detsec.y2 == 4224 for detsec in ad_detsec)
                          and 'Hamamatsu' in ad.detector_name(pretty=True))

            if illum:
                log.fullinfo("Using {} as illumination mask".format(
                    illum.filename))
                final_illum = gt.clip_auxiliary_data(ad,
                                                     aux=illum,
                                                     aux_type='bpm',
                                                     return_dtype=DQ.datatype)

                for ext, illum_ext in zip(ad, final_illum):
                    if illum_ext is not None:
                        # Ensure we're only adding the unilluminated bit
                        iext = np.where(illum_ext.data > 0, DQ.unilluminated,
                                        0).astype(DQ.datatype)
                        ext.mask = iext if ext.mask is None else ext.mask | iext
            elif not no_bridges:  # i.e. there are bridges.
                # Default operation for GMOS full-frame LS
                # The 95% cut should ensure that we're sampling something
                # bright (even for an arc)
                # The max is intended to handle R150 data, where many of
                # the extensions are unilluminated

                row_medians = np.max(np.array(
                    [np.percentile(ext.data, 95, axis=1) for ext in ad]),
                                     axis=0)
                rows = np.arange(len(row_medians))
                m_init = models.Polynomial1D(degree=3)
                fit_it = fitting.FittingWithOutlierRemoval(
                    fitting.LinearLSQFitter(),
                    outlier_func=sigma_clip,
                    sigma_upper=1,
                    sigma_lower=3)
                m_final, _ = fit_it(m_init, rows, row_medians)
                model_fit = m_final(rows)
                # Find points which are significantly below the smooth illumination fit
                # First ensure we don't worry about single rows
                row_mask = at.boxcar(model_fit - row_medians > 0.1 * model_fit,
                                     operation=np.logical_and,
                                     size=1)
                row_mask = at.boxcar(row_mask, operation=np.logical_or, size=3)
                for ext in ad:
                    ext.mask |= (row_mask * DQ.unilluminated).astype(
                        DQ.datatype)[:, np.newaxis]

                if has_48rows:
                    actual_rows = 48 // ad.detector_y_bin()
                    for ext in ad:
                        ext.mask[:actual_rows] |= DQ.unilluminated

            # Timestamp and update filename
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)

        return adinputs
Esempio n. 22
0
    def addDQ(self, adinputs=None, **params):
        """
        This primitive is used to add a DQ extension to the input AstroData
        object. The value of a pixel in the DQ extension will be the sum of the
        following: (0=good, 1=bad pixel (found in bad pixel mask), 2=pixel is
        in the non-linear regime, 4=pixel is saturated). This primitive will
        trim the BPM to match the input AstroData object(s).

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        static_bpm: str
            Name of bad pixel mask ("default" -> use default from look-up table)
            If set to None, no static_bpm will be added.
        user_bpm: str
            Name of the bad pixel mask created by the user from flats and
            darks.  It is an optional BPM that can be added to the static one.
        illum_mask: bool
            add illumination mask?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["addDQ"]
        sfx = params["suffix"]

        # Getting all the filenames first prevents reopening the same file
        # for each science AD
        static_bpm_list = params['static_bpm']
        user_bpm_list = params['user_bpm']

        if static_bpm_list == "default":
            static_bpm_list = [self._get_bpm_filename(ad) for ad in adinputs]

        for ad, static, user in zip(*gt.make_lists(adinputs, static_bpm_list,
                                                   user_bpm_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning('No changes will be made to {}, since it has '
                    'already been processed by addDQ'.format(ad.filename))
                continue

            if static is None:
                # So it can be zipped with the AD
                final_static = [None] * len(ad)
            else:
                log.fullinfo("Using {} as static BPM".format(static.filename))
                final_static = gt.clip_auxiliary_data(ad, aux=static,
                                        aux_type='bpm', return_dtype=DQ.datatype)

            if user is None:
                final_user = [None] * len(ad)
            else:
                log.fullinfo("Using {} as user BPM".format(user.filename))
                final_user = gt.clip_auxiliary_data(ad, aux=user,
                                        aux_type='bpm', return_dtype=DQ.datatype)

            for ext, static_ext, user_ext in zip(ad, final_static, final_user):
                extver = ext.hdr['EXTVER']
                if ext.mask is not None:
                    log.warning('A mask already exists in extver {}'.
                                format(extver))
                    continue

                non_linear_level = ext.non_linear_level()
                saturation_level = ext.saturation_level()

                # Need to create the array first for 3D raw F2 data, with 2D BPM
                ext.mask = np.zeros_like(ext.data, dtype=DQ.datatype)
                if static_ext is not None:
                    ext.mask |= static_ext.data
                if user_ext is not None:
                    ext.mask |= user_ext.data

                if saturation_level:
                    log.fullinfo('Flagging saturated pixels in {}:{} '
                                 'above level {:.2f}'.
                                 format(ad.filename, extver, saturation_level))
                    ext.mask |= np.where(ext.data >= saturation_level,
                                         DQ.saturated, 0).astype(DQ.datatype)

                if non_linear_level:
                    if saturation_level:
                        if saturation_level > non_linear_level:
                            log.fullinfo('Flagging non-linear pixels in {}:{} '
                                         'above level {:.2f}'.
                                         format(ad.filename, extver,
                                                non_linear_level))
                            ext.mask |= np.where((ext.data >= non_linear_level) &
                                                 (ext.data < saturation_level),
                                                 DQ.non_linear, 0).astype(DQ.datatype)
                            # Readout modes of IR detectors can result in
                            # saturated pixels having values below the
                            # saturation level. Flag those. Assume we have an
                            # IR detector here because both non-linear and
                            # saturation levels are defined and nonlin<sat
                            regions, nregions = measurements.label(
                                                ext.data < non_linear_level)
                            # In all my tests, region 1 has been the majority
                            # of the image; however, I cannot guarantee that
                            # this is always the case and therefore we should
                            # check the size of each region
                            region_sizes = measurements.labeled_comprehension(
                                ext.data, regions, np.arange(1, nregions+1),
                                len, int, 0)
                            # First, assume all regions are saturated, and
                            # remove any very large ones. This is much faster
                            # than progressively adding each region to DQ
                            hidden_saturation_array = np.where(regions > 0,
                                                    4, 0).astype(DQ.datatype)
                            for region in range(1, nregions+1):
                                # Limit of 10000 pixels for a hole is a bit arbitrary
                                if region_sizes[region-1] > 10000:
                                    hidden_saturation_array[regions==region] = 0
                            ext.mask |= hidden_saturation_array

                        elif saturation_level < non_linear_level:
                            log.warning('{}:{} has saturation level less than '
                                'non-linear level'.format(ad.filename, extver))
                        else:
                            log.fullinfo('Saturation and non-linear levels '
                                         'are the same for {}:{}. Only '
                                         'flagging saturated pixels'.
                                format(ad.filename, extver))
                    else:
                        log.fullinfo('Flagging non-linear pixels in {}:{} '
                                     'above level {:.2f}'.
                                     format(ad.filename, extver,
                                            non_linear_level))
                        ext.mask |= np.where(ext.data >= non_linear_level,
                                             DQ.non_linear, 0).astype(DQ.datatype)


        # Handle latency if reqested
        if params.get("latency", False):
            try:
                adinputs = self.addLatencyToDQ(adinputs, time=params["time"],
                                               non_linear=params["non_linear"])
            except AttributeError:
                log.warning("addLatencyToDQ() not defined in primitivesClass "
                            + self.__class__.__name__)

        # Add the illumination mask if requested
        if params['add_illum_mask']:
            adinputs = self.addIllumMaskToDQ(adinputs, illum_mask=params["illum_mask"])

        # Timestamp and update filenames
        for ad in adinputs:
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=sfx, strip=True)

        return adinputs
Esempio n. 23
0
    def biasCorrect(self, adinputs=None, suffix=None, bias=None, do_bias=True):
        """
        The biasCorrect primitive will subtract the science extension of the
        input bias frames from the science extension of the input science
        frames. The variance and data quality extension will be updated, if
        they exist. If no bias is provided, getProcessedBias will be called
        to ensure a bias exists for every adinput.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        bias: str/list of str
            bias(es) to subtract
        do_bias: bool
            perform bias subtraction?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys[self.myself()]

        if not do_bias:
            log.warning("Bias correction has been turned off.")
            return adinputs

        if bias is None:
            self.getProcessedBias(adinputs, refresh=False)
            bias_list = self._get_cal(adinputs, 'processed_bias')
        else:
            bias_list = bias

        # Provide a bias AD object for every science frame
        for ad, bias in zip(
                *gt.make_lists(adinputs, bias_list, force_ad=True)):
            if ad.phu.get(timestamp_key):
                log.warning("No changes will be made to {}, since it has "
                            "already been processed by biasCorrect".format(
                                ad.filename))
                continue

            if bias is None:
                if 'qa' in self.mode:
                    log.warning("No changes will be made to {}, since no "
                                "bias was specified".format(ad.filename))
                    continue
                else:
                    raise OSError('No processed bias listed for {}'.format(
                        ad.filename))

            try:
                gt.check_inputs_match(ad,
                                      bias,
                                      check_filter=False,
                                      check_units=True)
            except ValueError:
                bias = gt.clip_auxiliary_data(ad, aux=bias, aux_type='cal')
                # An Error will be raised if they don't match now
                gt.check_inputs_match(ad,
                                      bias,
                                      check_filter=False,
                                      check_units=True)

            log.fullinfo('Subtracting this bias from {}:\n{}'.format(
                ad.filename, bias.filename))
            ad.subtract(bias)

            # Record bias used, timestamp, and update filename
            ad.phu.set('BIASIM', bias.filename,
                       self.keyword_comments['BIASIM'])
            gt.mark_history(ad, primname=self.myself(), keyword=timestamp_key)
            ad.update_filename(suffix=suffix, strip=True)
            if bias.path:
                add_provenance(ad, bias.filename,
                               md5sum(bias.path) or "", self.myself())

            timestamp = datetime.now()
        return adinputs
    def scaleFringeToScience(self, rc):
        """
        This primitive will scale the fringes to their matching science data
        The fringes should be in the stream this primitive is called on,
        and the reference science frames should be loaded into the RC,
        as, eg. rc["science"] = adinput.
        
        There are two ways to find the value to scale fringes by:
        1. If stats_scale is set to True, the equation:
        (letting science data = b (or B), and fringe = a (or A))
    
        arrayB = where({where[SCIb < (SCIb.median+2.5*SCIb.std)]} 
                          > [SCIb.median-3*SCIb.std])
        scale = arrayB.std / SCIa.std
    
        The section of the SCI arrays to use for calculating these statistics
        is the CCD2 SCI data excluding the outer 5% pixels on all 4 sides.
        Future enhancement: allow user to choose section
    
        2. If stats_scale=False, then scale will be calculated using:
        exposure time of science / exposure time of fringe

        :param stats_scale: Use statistics to calculate the scale values,
                            rather than exposure time
        :type stats_scale: Python boolean (True/False)
        """
        
        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])
        
        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "scaleFringeToScience",
                                 "starting"))
        
        # Define the keyword to be used for the time stamp for this primitive
        timestamp_key = self.timestamp_keys["scaleFringeToScience"]

        # Check for user-supplied science frames
        fringe = rc.get_inputs_as_astrodata()
        science_param = rc["science"]
        fringe_dict = None
        if science_param is not None:
            # The user supplied an input to the science parameter
            if not isinstance(science_param, list):
                science_list = [science_param]
            else:
                science_list = science_param

            # If there is one fringe and multiple science frames,
            # the fringe must be deepcopied to allow it to be
            # scaled separately for each frame
            if len(fringe)==1 and len(science_list)>1:
                fringe = [deepcopy(fringe[0]) for img in science_list]

            # Convert filenames to AD instances if necessary
            tmp_list = []
            for science in science_list:
                if type(science) is not AstroData:
                    science = AstroData(science)
                tmp_list.append(science)
            science_list = tmp_list
            
            fringe_dict = gt.make_dict(key_list=science_list, 
                                       value_list=fringe)
            fringe_output = []
        else:
            log.warning("No science frames specified; no scaling will be done")
            science_list = []
            fringe_output = fringe

        # Loop over each AstroData object in the science list
        for ad in science_list:
            
            # Retrieve the appropriate fringe
            fringe = fringe_dict[ad]

            # Check the inputs have matching filters, binning and SCI shapes.
            try:
                gt.check_inputs_match(ad1=ad, ad2=fringe)
            except Errors.ToolboxError:
                # If not, try to clip the fringe frame to the size of the
                # science data
                # For a GMOS example, this allows a full frame fringe to
                # be used for a CCD2-only science frame. 
                fringe = gt.clip_auxiliary_data(
                    adinput=ad, aux=fringe, aux_type="cal")[0]

                # Check again, but allow it to fail if they still don't match
                gt.check_inputs_match(ad1=ad, ad2=fringe)

            # Check whether statistics should be used
            stats_scale = rc["stats_scale"]

            # Calculate the scale value
            scale = 1.0
            if not stats_scale:
                # Use the exposure times to calculate the scale
                log.fullinfo("Using exposure times to calculate the scaling"+
                             " factor")
                try:
                    scale = ad.exposure_time() / fringe.exposure_time()
                except:
                    raise Errors.InputError("Could not get exposure times " +
                                            "for %s, %s. Try stats_scale=True" %
                                            (ad.filename,fringe.filename))
            else:

                # Use statistics to calculate the scaling factor
                log.fullinfo("Using statistics to calculate the " +
                             "scaling factor")

                # Deepcopy the input so it can be manipulated without
                # affecting the original
                statsad = deepcopy(ad)
                statsfringe = deepcopy(fringe)

                # Trim off any overscan region still present
                statsad,statsfringe = gt.trim_to_data_section([statsad,
                                                               statsfringe])

                # Check the number of science extensions; if more than
                # one, use CCD2 data only
                nsciext = statsad.count_exts("SCI")
                if nsciext>1:

                    # Get the CCD numbers and ordering information
                    # corresponding to each extension
                    log.fullinfo("Trimming data to data section to remove "\
                                 "overscan region")
                    sci_info,frng_info = gt.array_information([statsad,
                                                               statsfringe])

                    # Pull out CCD2 data
                    scidata = []
                    frngdata = []
                    dqdata = []
                    for i in range(nsciext):

                        # Get the next extension in physical order
                        sciext = statsad["SCI",sci_info["amps_order"][i]]
                        frngext = statsfringe["SCI",frng_info["amps_order"][i]]

                        # Check to see if it is on CCD2; if so, keep it
                        if sci_info[
                            "array_number"][("SCI",sciext.extver())]==2:

                            scidata.append(sciext.data)

                            dqext = statsad["DQ",sci_info["amps_order"][i]]
                            maskext = statsad["OBJMASK",
                                              sci_info["amps_order"][i]]
                            if dqext is not None and maskext is not None:
                                dqdata.append(dqext.data | maskext.data)
                            elif dqext is not None:
                                dqdata.append(dqext.data)
                            elif maskext is not None:
                                dqdata.append(maskext.data)

                        if frng_info[
                            "array_number"][("SCI",frngext.extver())]==2:
                            frngdata.append(frngext.data)
                        
                    # Stack data if necessary
                    if len(scidata)>1:
                        scidata = np.hstack(scidata)
                        frngdata = np.hstack(frngdata)
                    else:
                        scidata = scidata[0]
                        frngdata = frngdata[0]
                    if len(dqdata)>0:
                        if len(dqdata)>1:
                            dqdata = np.hstack(dqdata)
                        else:
                            dqdata = dqdata[0]
                    else:
                        dqdata = None
                else:
                    scidata = statsad["SCI"].data
                    frngdata = statsfringe["SCI"].data

                    dqext = statsad["DQ"]
                    maskext = statsad["OBJMASK"]
                    if dqext is not None and maskext is not None:
                        dqdata = dqext.data | maskext.data
                    elif dqext is not None:
                        dqdata = dqext.data
                    elif maskext is not None:
                        dqdata = maskext.data
                    else:
                        dqdata = None

                if dqdata is not None:
                    # Replace any DQ-flagged data with the median value
                    smed = np.median(scidata[dqdata==0])
                    scidata = np.where(dqdata!=0,smed,scidata)

                # Calculate the maximum and minimum in a box centered on 
                # each data point.  The local depth of the fringe is
                # max - min.  The overall fringe strength is the median
                # of the local fringe depths.

                # Width of the box is binning and
                # filter dependent, determined by experimentation
                # Results don't seem to depend heavily on the box size
                if ad.filter_name(pretty=True).as_pytype=="i":
                    size = 20
                else:
                    size = 40
                size /= ad.detector_x_bin().as_pytype()
                
                # Use ndimage maximum_filter and minimum_filter to
                # get the local maxima and minima
                import scipy.ndimage as ndimage
                sci_max = ndimage.filters.maximum_filter(scidata,size)
                sci_min = ndimage.filters.minimum_filter(scidata,size)


                # Take off 5% of the width as a border
                xborder = int(0.05 * scidata.shape[1])
                yborder = int(0.05 * scidata.shape[0])
                if xborder<20:
                    xborder = 20
                if yborder<20:
                    yborder = 20
                sci_max = sci_max[yborder:-yborder,xborder:-xborder]
                sci_min = sci_min[yborder:-yborder,xborder:-xborder]

                # Take the median difference
                sci_df = np.median(sci_max - sci_min)

                # Do the same for the fringe
                frn_max = ndimage.filters.maximum_filter(frngdata,size)
                frn_min = ndimage.filters.minimum_filter(frngdata,size)
                frn_max = frn_max[yborder:-yborder,xborder:-xborder]
                frn_min = frn_min[yborder:-yborder,xborder:-xborder]
                frn_df = np.median(frn_max - frn_min)

                # Scale factor
                # This tends to overestimate the factor, but it is
                # at least in the right ballpark, unlike the estimation
                # used in girmfringe (masked_sci.std/fringe.std)
                scale = sci_df / frn_df

            log.fullinfo("Scale factor found = "+str(scale))
                
            # Use mult from the arith toolbox to perform the scaling of 
            # the fringe frame
            scaled_fringe = fringe.mult(scale)
            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=scaled_fringe, keyword=timestamp_key)

            # Change the filename
            scaled_fringe.filename = gt.filename_updater(
                adinput=ad, suffix=rc["suffix"], strip=True)
            
            fringe_output.append(scaled_fringe)
            
        # Report the list of output AstroData objects to the reduction context
        rc.report_output(fringe_output)
        yield rc