Exemple #1
0
 def test_measure_bg_from_image(self):
     ad = astrodata.open(os.path.join(TESTDATAPATH, 'GSAOI',
                                 'S20150110S0208_sourcesDetected.fits'))
     ret = gt.measure_bg_from_image(ad, sampling=1000)
     correct = [(4769.078849397978, 136.30732335464836, 4051),
                (4756.7707845272907, 138.45054591959072, 4141),
                (4797.0736783339098, 143.2131578397852, 4130),
                (4762.1949923200627, 136.64564601477898, 4134)]
     for rv, cv in zip(ret, correct):
         for a, b in zip(rv, cv):
             assert abs(a - b) < 0.01, 'Problem with gaussfit=True'
     ret = gt.measure_bg_from_image(ad, sampling=100, gaussfit=False)
     correct = [(4766.5586, 118.92503, 38514), (4750.9131, 124.56567, 39535),
                (4794.6167, 128.12645, 39309), (4757.0063, 121.23917, 39388)]
     for rv, cv in zip(ret, correct):
         for a, b in zip(rv, cv):
             assert abs(a - b) < 0.01, 'Problem with gaussfit=False'
Exemple #2
0
    def stackFrames(self, adinputs=None, **params):
        """
        This primitive will stack each science extension in the input dataset.
        New variance extensions are created from the stacked science extensions
        and the data quality extensions are propagated through to the final
        file.

        Parameters
        ----------
        adinputs : list of :class:`~astrodata.AstroData`
            Any set of 2D.

        suffix : str
            Suffix to be added to output files.

        apply_dq : bool
            Apply DQ mask to data before combining?

        nlow, nhigh : int
            Number of low and high pixels to reject, for the 'minmax' method.
            The way it works is inherited from IRAF: the fraction is specified
            as the number of  high  and low  pixels,  the  nhigh and nlow
            parameters, when data from all the input images are used.  If
            pixels  have  been  rejected  by offseting,  masking, or
            thresholding then a matching fraction of the remaining pixels,
            truncated to an integer, are used.  Thus::

                nl = n * nlow/nimages + 0.001
                nh = n * nhigh/nimages + 0.001

            where n is the number of pixels  surviving  offseting,  masking,
            and  thresholding,  nimages  is the number of input images, nlow
            and nhigh are task parameters  and  nl  and  nh  are  the  final
            number  of  low  and high pixels rejected by the algorithm.  The
            factor of 0.001 is to adjust for rounding of the ratio.

        operation : str
            Combine method.

        reject_method : str
            Pixel rejection method (none, minmax, sigclip, varclip).

        zero : bool
            Apply zero-level offset to match background levels?

        scale : bool
            Scale images to the same intensity?

        memory : float or None
            Available memory (in GB) for stacking calculations.

        statsec : str
            Section for statistics.

        separate_ext : bool
            Handle extensions separately?

        Returns
        -------
        list of :class:`~astrodata.AstroData`
            Sky stacked image. This list contains only one element. The list
            format is maintained so this primitive is consistent with all the
            others.

        Raises
        ------
        IOError
            If the number of extensions in any of the `AstroData` objects is
            different.

        IOError
            If the shape of any extension in any `AstroData` object is different.

        AssertError
            If any of the `.gain()` descriptors is None.

        AssertError
            If any of the `.read_noise()` descriptors is None.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["stackFrames"]
        sfx = params["suffix"]
        memory = params["memory"]
        if memory is not None:
            memory = int(memory * 1000000000)

        zero = params["zero"]
        scale = params["scale"]
        apply_dq = params["apply_dq"]
        separate_ext = params["separate_ext"]
        statsec = params["statsec"]
        reject_method = params["reject_method"]
        save_rejection_map = params["save_rejection_map"]

        if statsec:
            statsec = tuple([
                slice(int(start) - 1, int(end))
                for x in reversed(statsec.strip('[]').split(','))
                for start, end in [x.split(':')]
            ])

        if len(adinputs) <= 1:
            log.stdinfo("No stacking will be performed, since at least two "
                        "input AstroData objects are required for stackFrames")
            return adinputs

        if (reject_method == "minmax" and self.mode == "qa"
                and params["nlow"] + params["nhigh"] >= len(adinputs)):
            log.warning(
                "Trying to reject too many images. Setting nlow=nhigh=0.")
            params["nlow"] = 0
            params["nhigh"] = 0

        if len({len(ad) for ad in adinputs}) > 1:
            raise OSError("Not all inputs have the same number of extensions")
        if len({ext.nddata.shape for ad in adinputs for ext in ad}) > 1:
            raise OSError("Not all inputs images have the same shape")

        # We will determine the average gain from the input AstroData
        # objects and add in quadrature the read noise
        gains = [ad.gain() for ad in adinputs]
        read_noises = [ad.read_noise() for ad in adinputs]

        # Determine whether we can construct these averages
        process_gain = all(g is not None for gain in gains for g in gain)
        process_rn = all(rn is not None for read_noise in read_noises
                         for rn in read_noise)

        # Compute gain and read noise of final stacked images
        num_img = len(adinputs)
        num_ext = len(adinputs[0])
        if process_gain:
            gain_list = [
                np.mean([gain[i] for gain in gains]) for i in range(num_ext)
            ]
        if process_rn:
            read_noise_list = [
                np.sqrt(np.sum([rn[i] * rn[i]
                                for rn in read_noises])) / num_img
                for i in range(num_ext)
            ]

        zero_offsets = np.zeros((num_ext, num_img), dtype=np.float32)
        scale_factors = np.ones_like(zero_offsets)

        # Try to determine how much memory we're going to need to stack and
        # whether it's necessary to flush pixel data to disk first
        # Also determine kernel size from offered memory and bytes per pixel
        bytes_per_ext = []
        for ext in adinputs[0]:
            bytes = 0
            # Count _data twice to handle temporary arrays
            bytes += 2 * ext.data.dtype.itemsize
            if ext.variance is not None:
                bytes += ext.variance.dtype.itemsize

            bytes += 2  # mask always created
            bytes_per_ext.append(bytes * np.product(ext.shape))

        if memory is not None and (num_img * max(bytes_per_ext) > memory):
            adinputs = self.flushPixels(adinputs)

        # Compute the scale and offset values by accessing the memmapped data
        # so we can pass those to the stacking function
        # TODO: Should probably be done better to consider only the overlap
        # regions between frames
        if scale or zero:
            levels = np.empty((num_img, num_ext), dtype=np.float32)
            for i, ad in enumerate(adinputs):
                for index in range(num_ext):
                    nddata = (ad[index].nddata.window[:] if statsec is None
                              else ad[i][index].nddata.window[statsec])
                    #levels[i, index] = np.median(nddata.data)
                    levels[i,
                           index] = gt.measure_bg_from_image(nddata,
                                                             value_only=True)
            if scale and zero:
                log.warning(
                    "Both scale and zero are set. Setting scale=False.")
                scale = False
            if separate_ext:
                # Target value is corresponding extension of first image
                if scale:
                    scale_factors = (levels[0] / levels).T
                else:  # zero=True
                    zero_offsets = (levels[0] - levels).T
            else:
                # Target value is mean of all extensions of first image
                target = np.mean(levels[0])
                if scale:
                    scale_factors = np.tile(target / np.mean(levels, axis=1),
                                            num_ext).reshape(num_ext, num_img)
                else:  # zero=True
                    zero_offsets = np.tile(target - np.mean(levels, axis=1),
                                           num_ext).reshape(num_ext, num_img)
            if scale and np.min(scale_factors) < 0:
                log.warning("Some scale factors are negative. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isinf(scale_factors)):
                log.warning("Some scale factors are infinite. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isnan(scale_factors)):
                log.warning("Some scale factors are undefined. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False

        if reject_method == "varclip" and any(ext.variance is None
                                              for ad in adinputs
                                              for ext in ad):
            log.warning("Rejection method 'varclip' has been chosen but some"
                        " extensions have no variance. 'sigclip' will be used"
                        " instead.")
            reject_method = "sigclip"

        log.stdinfo("Combining {} inputs with {} and {} rejection".format(
            num_img, params["operation"], reject_method))

        stack_function = NDStacker(combine=params["operation"],
                                   reject=reject_method,
                                   log=self.log,
                                   **params)

        # NDStacker uses DQ if it exists; if we don't want that, delete the DQs!
        if not apply_dq:
            [setattr(ext, 'mask', None) for ad in adinputs for ext in ad]

        ad_out = astrodata.create(adinputs[0].phu)
        for index, (ext, sfactors, zfactors) in enumerate(
                zip(adinputs[0], scale_factors, zero_offsets)):
            status = (f"Combining extension {ext.id}."
                      if num_ext > 1 else "Combining images.")
            if scale:
                status += " Applying scale factors."
                numbers = sfactors
            elif zero:
                status += " Applying offsets."
                numbers = zfactors
            log.stdinfo(status)
            if ((scale or zero) and (index == 0 or separate_ext)):
                for ad, value in zip(adinputs, numbers):
                    log.stdinfo("{:40s}{:10.3f}".format(ad.filename, value))

            shape = adinputs[0][index].nddata.shape
            if memory is None:
                kernel = shape
            else:
                # Chop the image horizontally into equal-sized chunks to process
                # This uses the minimum number of steps and uses minimum memory
                # per step.
                oversubscription = (bytes_per_ext[index] *
                                    num_img) // memory + 1
                kernel = ((shape[0] + oversubscription - 1) //
                          oversubscription, ) + shape[1:]

            with_uncertainty = True  # Since all stacking methods return variance
            with_mask = apply_dq and not any(
                ad[index].nddata.window[:].mask is None for ad in adinputs)
            result = windowedOp(stack_function,
                                [ad[index].nddata for ad in adinputs],
                                scale=sfactors,
                                zero=zfactors,
                                kernel=kernel,
                                dtype=np.float32,
                                with_uncertainty=with_uncertainty,
                                with_mask=with_mask,
                                save_rejection_map=save_rejection_map)
            ad_out.append(result)
            log.stdinfo("")

        # Propagate REFCAT as the union of all input REFCATs
        refcats = [ad.REFCAT for ad in adinputs if hasattr(ad, 'REFCAT')]
        if refcats:
            try:
                out_refcat = table.unique(table.vstack(
                    refcats, metadata_conflicts='silent'),
                                          keys=('RAJ2000', 'DEJ2000'))
            except KeyError:
                pass
            else:
                out_refcat['Id'] = list(range(1, len(out_refcat) + 1))
                ad_out.REFCAT = out_refcat

        # Set AIRMASS to be the mean of the input values
        try:
            airmass_kw = ad_out._keyword_for('airmass')
            mean_airmass = np.mean([ad.airmass() for ad in adinputs])
        except Exception:  # generic implementation failure (probably non-Gemini)
            pass
        else:
            ad_out.phu.set(airmass_kw, mean_airmass,
                           "Mean airmass for the exposure")

        # Set GAIN to the average of input gains. Set the RDNOISE to the
        # sum in quadrature of the input read noises.
        if process_gain:
            for ext, gain in zip(ad_out, gain_list):
                ext.hdr.set('GAIN', gain, self.keyword_comments['GAIN'])
            ad_out.phu.set('GAIN', gain_list[0], self.keyword_comments['GAIN'])

        if process_rn:
            for ext, rn in zip(ad_out, read_noise_list):
                ext.hdr.set('RDNOISE', rn, self.keyword_comments['RDNOISE'])
            ad_out.phu.set('RDNOISE', read_noise_list[0],
                           self.keyword_comments['RDNOISE'])

        # Add suffix to datalabel to distinguish from the reference frame
        if sfx[0] == '_':
            extension = sfx.replace('_', '-', 1).upper()
        else:
            extension = '-' + sfx.upper()
        ad_out.phu.set('DATALAB', "{}{}".format(ad_out.data_label(),
                                                extension),
                       self.keyword_comments['DATALAB'])

        # Add other keywords to the PHU about the stacking inputs
        ad_out.orig_filename = ad_out.phu.get('ORIGNAME')
        ad_out.phu.set('NCOMBINE', len(adinputs),
                       self.keyword_comments['NCOMBINE'])
        for i, ad in enumerate(adinputs, start=1):
            ad_out.phu.set('IMCMB{:03d}'.format(i),
                           ad.phu.get('ORIGNAME', ad.filename))

        # Timestamp and update filename and prepare to return single output
        gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
        ad_out.update_filename(suffix=sfx, strip=True)

        return [ad_out]
Exemple #3
0
    def addOIWFSToDQ(self, adinputs=None, **params):
        """
        Flags pixels affected by the OIWFS on a GMOS image. It uses the
        header information to determine the location of the guide star, and
        basically "flood-fills" low-value pixels around it to give a first
        estimate. This map is then grown pixel-by-pixel until the values of
        the new pixels it covers stop increasing (indicating it's got to the
        sky level). Extensions to the right of the one with the guide star
        are handled by taking a starting point near the left-hand edge of the
        extension, level with the location at which the probe met the right-
        hand edge of the previous extension.
        
        This code assumes that data_section extends over all rows.
        
        Parameters
        ----------
        border: int
            distance from edge to start flood fill
        convergence: float
            amount within which successive sky level measurements have to
            agree during dilation phase for this phase to finish
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        border = 5  # Pixels in from edge where sky level is reliable
        convergence = 2.0

        for ad in adinputs:
            wfs = ad.wavefront_sensor()
            if wfs is None or 'OIWFS' not in wfs:
                log.fullinfo('OIWFS not used for image {}.'.format(ad.filename))
                continue

            oira = ad.phu.get('OIARA')
            oidec = ad.phu.get('OIADEC')
            if oira is None or oidec is None:
                log.warning('Cannot determine location of OI probe for {}.'
                            'Continuing.'.format(ad.filename))
                continue

            # DQ planes must exist so the unilluminated region is flagged
            if np.any([ext.mask is None for ext in ad]):
                log.warning('No DQ plane for {}. Continuing.'.format(ad.filename))

            # OIWFS comes in from the right, so we need to have the extensions
            # sorted in order from left to right
            ampsorder = list(np.argsort([detsec.x1
                                         for detsec in ad.detector_section()]))
            datasec_list = ad.data_section()
            gs_index = -1
            for index in ampsorder:
                ext = ad[index]
                wcs = WCS(ext.hdr)
                x, y = wcs.all_world2pix([[oira, oidec]], 0)[0]
                if x < datasec_list[index].x2 + 0.5:
                    gs_index = index
                    log.fullinfo('Guide star location found at ({:.2f},{:.2f})'
                                 ' on EXTVER {}'.format(x, y, ext.hdr['EXTVER']))
                    break
            if gs_index == -1:
                log.warning('Could not find OI probe location on any extensions.')
                continue

            # The OIWFS extends to the left of the actual star location, which
            # might have it vignetting a part of an earlier extension. Also, it
            # may be in a chip gap, which has the same effect
            amp_index = ampsorder.index(gs_index)
            if x < 50:
                amp_index -= 1
                x = (datasec_list[ampsorder[amp_index]].x2 -
                     datasec_list[ampsorder[amp_index]].x1 - border)
            else:
                x -= datasec_list[ampsorder[amp_index]].x1

            dilator = ndimage.morphology.generate_binary_structure(2, 1)
            for index in ampsorder[amp_index:]:
                datasec = datasec_list[index]
                sky, skysig, _ = gt.measure_bg_from_image(ad[index])

                # To avoid hassle with whether the overscan region is present
                # or not and how adjacent extensions relate to each other,
                # just deal with the data sections
                data_region = ad[index].data[:, datasec.x1:datasec.x2]
                mask_region = ad[index].mask[:, datasec.x1:datasec.x2]
                x1 = max(int(x-border), border)
                x2 = max(min(int(x+border), datasec.x2-datasec.x1), x1+border)
                y1 = max(int(y-border), 0)
                y2 = max(min(int(y+border), datasec.y2-datasec.y1), y1+border)
                wfs_sky = np.median(data_region[y1:y2, x1:x2])
                if wfs_sky > sky-convergence:
                    log.warning('Cannot distinguish probe region from sky for '
                                '{}'.format(ad.filename))
                    break

                # Flood-fill region around guide-star with all pixels fainter
                # than this boundary value
                boundary = sky - 0.2 * (sky-wfs_sky)
                regions, nregions = ndimage.measurements.label(
                    np.logical_and(data_region < boundary, mask_region==0))
                wfs_region = regions[int(y+0.5), int(x+0.5)]
                blocked = ndimage.morphology.binary_fill_holes(np.where(regions==wfs_region,
                                                                        True, False))
                this_mean_sky = wfs_sky
                condition_met = False
                while not condition_met:
                    last_mean_sky = this_mean_sky
                    new_blocked = ndimage.morphology.binary_dilation(blocked,
                                                                     structure=dilator)
                    this_mean_sky = np.median(ad[index].data[new_blocked ^ blocked])
                    blocked = new_blocked
                    if index <= gs_index:
                        condition_met = (this_mean_sky - last_mean_sky < convergence)
                    else:
                        # Dilate until WFS width at left of image equals width at
                        # right of previous extension image
                        width = np.sum(blocked[:,0])
                        condition_met = (y_width - width < 2) or index > 9

                # Flag DQ pixels as unilluminated only if not flagged
                # (to avoid problems with the edge extensions and/or saturation)
                datasec_mask = ad[index].mask[:, datasec.x1:datasec.x2]
                datasec_mask |= np.where(blocked, np.where(datasec_mask>0, 0,
                                                        DQ.unilluminated), 0)

                # Set up for next extension. If flood-fill hasn't reached
                # right-hand edge of detector, stop.
                column = blocked[:, -1]
                y_width = np.sum(column)
                if y_width == 0:
                    break
                y = np.mean(np.arange(datasec.y1, datasec.y2)[column])
                x = border

        return adinputs
Exemple #4
0
    def flagCosmicRaysByStacking(self, adinputs=None, **params):
        """
        This primitive flags sky pixels that deviate from the median image of
        the input AD frames by some positive multiple of a random background
        noise estimate. Since a random noise model is liable to underestimate
        the variance between images in the presence of seeing variations, any
        pixels containing significant object flux are excluded from this
        masking, by running detectSources on the median image and applying the
        resulting OBJMASK array. Any strongly-outlying values in those pixels
        will have to be dealt with when stacking, where less aggressive
        rejection based on the empirical variance between images can be used.

        This is loosely based on the algorithm used by imcoadd in the Gemini
        IRAF package.

        Parameters
        ----------
        suffix: str
            Suffix to be added to output files.
        hsigma: float
            Difference from the median image, in units of the background noise
            estimate, above which pixels should be flagged as contaminated.
        dilation: int
            Dilation radius for expanding cosmic ray mask, in pixels.

        Returns
        -------
        list of AstroData
            The input AstroData instances with flagged pixels added to the
            `mask` array for each extension using the `DQ.cosmic_ray` bit.

        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        # timestamp_key = self.timestamp_keys[self.myself()]

        hsigma = params['hsigma']
        dilation = params['dilation']

        if len(adinputs) < 2:
            log.stdinfo("No cosmic rays will be flagged, since at least "
                        "two images are required for {}".format(self.myself()))
            return adinputs

        # This code is taken from dilateObjectMask; factor it out later.
        xgrid, ygrid = np.mgrid[-int(dilation):int(dilation+1),
                       -int(dilation):int(dilation+1)]
        structure = np.where(xgrid*xgrid+ygrid*ygrid <= dilation*dilation,
                             True, False)

        # All inputs should have an OBJMASK, to avoid flagging pixels within
        # objects. If not, we present a warning but continue anyway.
        if not all(hasattr(ext, 'OBJMASK') for ad in adinputs for ext in ad):
            log.warning("Not all input extensions have an OBJMASK. Results "
                        "may be dubious.")

        median_image = self.stackFrames(adinputs, operation='median',
                                        reject_method='none', zero=True)[0]

        median_image = self.detectSources([median_image],
                    **self._inherit_params(params, "detectSources"))[0]

        for ad in adinputs:

            diff = self.subtractSky([deepcopy(ad)], sky=median_image,
                                    offset_sky=True, scale_sky=False)[0]

            # Background will be close to zero, so we only really need this
            # if there's no VAR; however, the overhead is low and it saves
            # us from repeatedly checking if there is a VAR on each extension
            bg_list = gt.measure_bg_from_image(diff, separate_ext=True)

            # Don't flag pixels that are already bad (and may not be CRs;
            # except those that are just near saturation, unilluminated etc.).
            # Also exclude regions with no data, where the variance is 0. so
            # values are always around the threshold.
            bitmask = DQ.bad_pixel | DQ.no_data

            for ext, diff_ext, (bg, noise, npix) in zip(ad, diff, bg_list):
                # Limiting level for good pixels in the median-subtracted data
                # (bkg should be ~0 after subtracting median image with an offset)
                if ext.variance is not None:
                    noise = np.sqrt(ext.variance)
                threshold = bg + hsigma * noise

                # Accumulate CR detections into the DQ mask(s) of the input/output
                crmask = ((diff_ext.data > threshold) &
                          (diff_ext.mask & bitmask == 0))
                if hasattr(diff_ext, 'OBJMASK'):
                    crmask &= (diff_ext.OBJMASK == 0)
                crmask = binary_dilation(crmask, structure)
                ext.mask |= np.where(crmask, DQ.cosmic_ray, DQ.good)

            ad.update_filename(suffix=params["suffix"], strip=True)

        return adinputs
    def addOIWFSToDQ(self, adinputs=None, **params):
        """
        Flags pixels affected by the On-Instrument Wavefront Sensor (OIWFS) on a
        GMOS image.

        It uses the header information to determine the location of the
        guide star, and basically "flood-fills" low-value pixels around it to
        give a first estimate. This map is then grown pixel-by-pixel until the
        values of the new pixels it covers stop increasing (indicating it's got to the
        sky level).

        Extensions to the right of the one with the guide star are handled by
        taking a starting point near the left-hand edge of the extension, level
        with the location at which the probe met the right-hand edge of the
        previous extension.

        This code assumes that data_section extends over all rows. It is, of
        course, very GMOS-specific.

        Parameters
        ----------
        adinputs : list of :class:`~gemini_instruments.gmos.AstroDataGmos`
            Science data that contains the shadow of the OIWFS.

        contrast : float (range 0-1)
            Initial fractional decrease from sky level to minimum brightness
            where the OIWFS "edge" is defined.

        convergence : float
            Amount within which successive sky level measurements have to
            agree during dilation phase for this phase to finish.

        Returns
        -------
        list of :class:`~gemini_instruments.gmos.AstroDataGmos`
            Data with updated `.DQ` plane considering the shadow of the OIWFS.
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        border = 5  # Pixels in from edge where sky level is reliable
        boxsize = 5
        contrast = params["contrast"]
        convergence = params["convergence"]

        for ad in adinputs:
            wfs = ad.wavefront_sensor()
            if wfs is None or 'OIWFS' not in wfs:
                log.fullinfo('OIWFS not used for image {}.'.format(
                    ad.filename))
                continue

            oira = ad.phu.get('OIARA')
            oidec = ad.phu.get('OIADEC')
            if oira is None or oidec is None:
                log.warning('Cannot determine location of OI probe for {}.'
                            'Continuing.'.format(ad.filename))
                continue

            # DQ planes must exist so the unilluminated region is flagged
            if np.any([ext.mask is None for ext in ad]):
                log.warning('No DQ plane for {}. Continuing.'.format(
                    ad.filename))
                continue

            # OIWFS comes in from the right, so we need to have the extensions
            # sorted in order from left to right
            ampsorder = list(
                np.argsort([detsec.x1 for detsec in ad.detector_section()]))
            datasec_list = ad.data_section()
            gs_index = -1
            for index in ampsorder:
                ext = ad[index]
                wcs = WCS(ext.hdr)
                x, y = wcs.all_world2pix([[oira, oidec]], 0)[0]
                if x < datasec_list[index].x2 + 0.5:
                    gs_index = index
                    log.fullinfo('Guide star location found at ({:.2f},{:.2f})'
                                 ' on EXTVER {}'.format(
                                     x, y, ext.hdr['EXTVER']))
                    break
            if gs_index == -1:
                log.warning(
                    'Could not find OI probe location on any extensions.')
                continue

            # The OIWFS extends to the left of the actual star location, which
            # might have it vignetting a part of an earlier extension. Also, it
            # may be in a chip gap, which has the same effect
            amp_index = ampsorder.index(gs_index)
            if x < 50:
                amp_index -= 1
                x = (datasec_list[ampsorder[amp_index]].x2 -
                     datasec_list[ampsorder[amp_index]].x1 - border)
            else:
                x -= datasec_list[ampsorder[amp_index]].x1

            dilator = ndimage.morphology.generate_binary_structure(2, 1)
            for index in ampsorder[amp_index:]:
                datasec = datasec_list[index]
                sky, skysig, _ = gt.measure_bg_from_image(ad[index])

                # To avoid hassle with whether the overscan region is present
                # or not and how adjacent extensions relate to each other,
                # just deal with the data sections
                data_region = ad[index].data[:, datasec.x1:datasec.x2]
                mask_region = ad[index].mask[:, datasec.x1:datasec.x2]
                x1 = max(int(x - boxsize), border)
                x2 = max(min(int(x + boxsize), datasec.x2 - datasec.x1),
                         x1 + border)

                # Try to find the minimum closest to our estimate of the
                # probe location, by downhill method on a spline fit (to
                # smooth out the noise)
                data, mask, var = NDStacker.mean(ad[index].data[:, x1:x2].T,
                                                 mask=ad[index].mask[:,
                                                                     x1:x2].T)

                good_rows = np.logical_and(mask == DQ.good, var > 0)

                if np.sum(good_rows) == 0:
                    log.warning("No good rows in {} extension {}".format(
                        ad.filename, index))
                    continue

                rows = np.arange(datasec.y2 - datasec.y1)
                spline = UnivariateSpline(rows[good_rows],
                                          data[good_rows],
                                          w=1. / np.sqrt(var[good_rows]))
                newy = int(
                    optimize.minimize(spline, y, method='CG').x[0] + 0.5)
                y1 = max(int(newy - boxsize), 0)
                y2 = max(min(int(newy + boxsize), len(rows)), y1 + border)
                wfs_sky = np.median(data_region[y1:y2, x1:x2])
                if wfs_sky > sky - convergence:
                    log.warning('Cannot distinguish probe region from sky for '
                                '{}'.format(ad.filename))
                    break

                # Flood-fill region around guide-star with all pixels fainter
                # than this boundary value
                boundary = sky - contrast * (sky - wfs_sky)
                regions, nregions = ndimage.measurements.label(
                    np.logical_and(data_region < boundary, mask_region == 0))
                wfs_region = regions[newy, int(x + 0.5)]
                blocked = ndimage.morphology.binary_fill_holes(
                    np.where(regions == wfs_region, True, False))
                this_mean_sky = wfs_sky
                condition_met = False
                while not condition_met:
                    last_mean_sky = this_mean_sky
                    new_blocked = ndimage.morphology.binary_dilation(
                        blocked, structure=dilator)
                    this_mean_sky = np.median(data_region[new_blocked
                                                          ^ blocked])
                    blocked = new_blocked
                    if index <= gs_index or ad[index].array_section().x1 == 0:
                        # Stop when convergence is reached on either the first
                        # extension looked at, or the leftmost CCD3 extension
                        condition_met = (this_mean_sky - last_mean_sky <
                                         convergence)
                    else:
                        # Dilate until WFS width at left of image equals width at
                        # right of previous extension image
                        width = np.sum(blocked[:, 0])
                        # Note: this will not be called before y_width is defined
                        condition_met = (y_width - width <
                                         2) or index > 9  # noqa

                # Flag DQ pixels as unilluminated only if not flagged
                # (to avoid problems with the edge extensions and/or saturation)
                datasec_mask = ad[index].mask[:, datasec.x1:datasec.x2]
                datasec_mask |= np.where(
                    blocked,
                    np.where(datasec_mask > 0, DQ.good, DQ.unilluminated),
                    DQ.good)

                # Set up for next extension. If flood-fill hasn't reached
                # right-hand edge of detector, stop.
                column = blocked[:, -1]
                y_width = np.sum(column)
                if y_width == 0:
                    break
                y = np.mean(np.arange(datasec.y1, datasec.y2)[column])
                x = border

            ad.update_filename(suffix=params["suffix"], strip=True)

        return adinputs
Exemple #6
0
    def stackFrames(self, adinputs=None, **params):
        """
        This primitive will stack each science extension in the input dataset.
        New variance extensions are created from the stacked science extensions
        and the data quality extensions are propagated through to the final
        file.

        Parameters
        ----------
        suffix: str
            suffix to be added to output files
        apply_dq: bool
            apply DQ mask to data before combining?
        nhigh: int
            number of high pixels to reject
        nlow: int
            number of low pixels to reject
        operation: str
            combine method
        reject_method: str
            type of pixel rejection (passed to gemcombine)
        zero: bool
            apply zero-level offset to match background levels?
        """
        log = self.log
        log.debug(gt.log_message("primitive", self.myself(), "starting"))
        timestamp_key = self.timestamp_keys["stackFrames"]
        sfx = params["suffix"]
        memory = params["memory"]
        if memory is not None:
            memory = int(memory * 1000000000)

        zero = params["zero"]
        scale = params["scale"]
        apply_dq = params["apply_dq"]
        separate_ext = params["separate_ext"]
        statsec = params["statsec"]
        reject_method = params["reject_method"]
        if statsec:
            statsec = tuple([
                slice(int(start) - 1, int(end))
                for x in reversed(statsec.strip('[]').split(','))
                for start, end in [x.split(':')]
            ])

        if len(adinputs) <= 1:
            log.stdinfo("No stacking will be performed, since at least two "
                        "input AstroData objects are required for stackFrames")
            return adinputs

        if (reject_method == "minmax" and self.mode == "qa"
                and params["nlow"] + params["nhigh"] >= len(adinputs)):
            log.warning(
                "Trying to reject too many images. Setting nlow=nhigh=0.")
            params["nlow"] = 0
            params["nhigh"] = 0

        # Perform various checks on inputs
        for ad in adinputs:
            if not "PREPARED" in ad.tags:
                raise IOError("{} must be prepared".format(ad.filename))

        if len(set(len(ad) for ad in adinputs)) > 1:
            raise IOError("Not all inputs have the same number of extensions")
        if len(set([ext.nddata.shape for ad in adinputs for ext in ad])) > 1:
            raise IOError("Not all inputs images have the same shape")

        # Determine the average gain from the input AstroData objects and
        # add in quadrature the read noise
        gains = [ad.gain() for ad in adinputs]
        read_noises = [ad.read_noise() for ad in adinputs]

        assert all(gain is not None for gain in gains), "Gain problem"
        assert all(rn is not None for rn in read_noises), "RN problem"

        # Compute gain and read noise of final stacked images
        nexts = len(gains[0])
        gain_list = [
            np.mean([gain[i] for gain in gains]) for i in range(nexts)
        ]
        read_noise_list = [
            np.sqrt(np.sum([rn[i] * rn[i] for rn in read_noises]))
            for i in range(nexts)
        ]

        num_img = len(adinputs)
        num_ext = len(adinputs[0])
        zero_offsets = np.zeros((num_ext, num_img), dtype=np.float32)
        scale_factors = np.ones_like(zero_offsets)

        # Try to determine how much memory we're going to need to stack and
        # whether it's necessary to flush pixel data to disk first
        # Also determine kernel size from offered memory and bytes per pixel
        bytes_per_ext = []
        for ext in adinputs[0]:
            bytes = 0
            # Count _data twice to handle temporary arrays
            for attr in ('_data', '_data', '_uncertainty'):
                item = getattr(ext.nddata, attr)
                if item is not None:
                    # A bit of numpy weirdness in the difference between normal
                    # python types ("float32") and numpy types ("np.uint16")
                    try:
                        bytes += item.dtype.itemsize
                    except TypeError:
                        bytes += item.dtype().itemsize
                    except AttributeError:  # For non-lazy VAR
                        bytes += item._array.dtype.itemsize
            bytes += 2  #  mask always created
            bytes_per_ext.append(bytes * np.multiply.reduce(ext.nddata.shape))

        if memory is not None and (num_img * max(bytes_per_ext) > memory):
            adinputs = self.flushPixels(adinputs)

        # Compute the scale and offset values by accessing the memmapped data
        # so we can pass those to the stacking function
        # TODO: Should probably be done better to consider only the overlap
        # regions between frames
        if scale or zero:
            levels = np.empty((num_img, num_ext), dtype=np.float32)
            for i, ad in enumerate(adinputs):
                for index in range(num_ext):
                    nddata = (ad[index].nddata.window[:] if statsec is None
                              else ad[i][index].nddata.window[statsec])
                    #levels[i, index] = np.median(nddata.data)
                    levels[i,
                           index] = gt.measure_bg_from_image(nddata,
                                                             value_only=True)
            if scale and zero:
                log.warning(
                    "Both scale and zero are set. Setting scale=False.")
                scale = False
            if separate_ext:
                # Target value is corresponding extension of first image
                if scale:
                    scale_factors = (levels[0] / levels).T
                else:  # zero=True
                    zero_offsets = (levels[0] - levels).T
            else:
                # Target value is mean of all extensions of first image
                target = np.mean(levels[0])
                if scale:
                    scale_factors = np.tile(target / np.mean(levels, axis=1),
                                            num_ext).reshape(num_ext, num_img)
                else:  # zero=True
                    zero_offsets = np.tile(target - np.mean(levels, axis=1),
                                           num_ext).reshape(num_ext, num_img)
            if scale and np.min(scale_factors) < 0:
                log.warning("Some scale factors are negative. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isinf(scale_factors)):
                log.warning("Some scale factors are infinite. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False
            if scale and np.any(np.isnan(scale_factors)):
                log.warning("Some scale factors are undefined. Not scaling.")
                scale_factors = np.ones_like(scale_factors)
                scale = False

        if reject_method == "varclip" and any(ext.variance is None
                                              for ad in adinputs
                                              for ext in ad):
            log.warning("Rejection method 'varclip' has been chosen but some"
                        "extensions have no variance. 'sigclip' will be used"
                        "instead.")
            reject_method = "sigclip"

        stack_function = NDStacker(combine=params["operation"],
                                   reject=reject_method,
                                   log=self.log,
                                   **params)

        # NDStacker uses DQ if it exists; if we don't want that, delete the DQs!
        if not apply_dq:
            [setattr(ext, 'mask', None) for ad in adinputs for ext in ad]

        ad_out = astrodata.create(adinputs[0].phu)
        for index, (extver, sfactors, zfactors) in enumerate(
                zip(adinputs[0].hdr.get('EXTVER'), scale_factors,
                    zero_offsets)):
            status = ("Combining EXTVER {}.".format(extver)
                      if num_ext > 1 else "Combining images.")
            if scale:
                status += " Applying scale factors."
                numbers = sfactors
            elif zero:
                status += " Applying offsets."
                numbers = zfactors
            log.stdinfo(status)
            if ((scale or zero) and (index == 0 or separate_ext)):
                for ad, value in zip(adinputs, numbers):
                    log.stdinfo("{:40s}{:10.3f}".format(ad.filename, value))

            shape = adinputs[0][index].nddata.shape
            if memory is None:
                kernel = shape
            else:
                # Chop the image horizontally into equal-sized chunks to process
                # This uses the minimum number of steps and uses minimum memory
                # per step.
                oversubscription = (bytes_per_ext[index] *
                                    num_img) // memory + 1
                kernel = ((shape[0] + oversubscription - 1) //
                          oversubscription, ) + shape[1:]
            with_uncertainty = True  # Since all stacking methods return variance
            with_mask = apply_dq and not any(
                ad[index].nddata.window[:].mask is None for ad in adinputs)
            result = windowedOp(partial(stack_function,
                                        scale=sfactors,
                                        zero=zfactors),
                                [ad[index].nddata for ad in adinputs],
                                kernel=kernel,
                                dtype=np.float32,
                                with_uncertainty=with_uncertainty,
                                with_mask=with_mask)
            ad_out.append(result)
            log.stdinfo("")

        # Propagate REFCAT as the union of all input REFCATs
        refcats = [ad.REFCAT for ad in adinputs if hasattr(ad, 'REFCAT')]
        if refcats:
            out_refcat = table.unique(table.vstack(
                refcats, metadata_conflicts='silent'),
                                      keys='Cat_Id')
            out_refcat['Cat_Id'] = list(range(1, len(out_refcat) + 1))
            ad_out.REFCAT = out_refcat

        # Set AIRMASS to be the mean of the input values
        try:
            airmass_kw = ad_out._keyword_for('airmass')
            mean_airmass = np.mean([ad.airmass() for ad in adinputs])
        except:  # generic implementation failure (probably non-Gemini)
            pass
        else:
            ad_out.phu.set(airmass_kw, mean_airmass,
                           "Mean airmass for the exposure")

        # Set GAIN to the average of input gains. Set the RDNOISE to the
        # sum in quadrature of the input read noises.
        for ext, gain, rn in zip(ad_out, gain_list, read_noise_list):
            ext.hdr.set('GAIN', gain, self.keyword_comments['GAIN'])
            ext.hdr.set('RDNOISE', rn, self.keyword_comments['RDNOISE'])
        # Stick the first extension's values in the PHU
        ad_out.phu.set('GAIN', gain_list[0], self.keyword_comments['GAIN'])
        ad_out.phu.set('RDNOISE', read_noise_list[0],
                       self.keyword_comments['RDNOISE'])

        # Add suffix to datalabel to distinguish from the reference frame
        ad_out.phu.set('DATALAB', "{}{}".format(ad_out.data_label(), sfx),
                       self.keyword_comments['DATALAB'])

        # Add other keywords to the PHU about the stacking inputs
        ad_out.orig_filename = ad_out.phu.get('ORIGNAME')
        ad_out.phu.set('NCOMBINE', len(adinputs),
                       self.keyword_comments['NCOMBINE'])
        for i, ad in enumerate(adinputs, start=1):
            ad_out.phu.set('IMCMB{:03d}'.format(i),
                           ad.phu.get('ORIGNAME', ad.filename))

        # Timestamp and update filename and prepare to return single output
        gt.mark_history(ad_out, primname=self.myself(), keyword=timestamp_key)
        ad_out.update_filename(suffix=sfx, strip=True)

        return [ad_out]